pax_global_header00006660000000000000000000000064122730000530014502gustar00rootroot0000000000000052 comment=162f65d96e1a5fb8a17652701558718aa773f086 nipype-0.9.2/000077500000000000000000000000001227300005300130165ustar00rootroot00000000000000nipype-0.9.2/.coveragerc000066400000000000000000000002251227300005300151360ustar00rootroot00000000000000[run] branch = True source = nipype include = */nipype/* omit = */nipype/external/* */nipype/workflows/* */nipype/fixes/* */setup.py nipype-0.9.2/.gitattributes000066400000000000000000000000441227300005300157070ustar00rootroot00000000000000nipype/COMMIT_INFO.txt export-subst nipype-0.9.2/.gitignore000066400000000000000000000002641227300005300150100ustar00rootroot00000000000000/build /dist /nipype.egg-info /MANIFEST /nipype/build /nipype/nipype.egg-info /doc/_build /doc/users/examples /doc/api/generated *.pyc *.so .project .settings .pydevproject .idea/ nipype-0.9.2/.mailmap000066400000000000000000000015761227300005300144500ustar00rootroot00000000000000Ariel Rokem arokem Cindee Madison cindeem <> Cindee Madison cindeem Chris Filo Gorgolewski filo Chris Filo Gorgolewski Krzysztof Gorgolewski Erik Ziegler erikz Michael Waskom mwaskom Michael Waskom mwaskom Gael Varoquaux GaelVaroquaux Gael Varoquaux GaelVaroquaux Daniel Ginsburg danginsburg Colin Buchanan colinbuchanan nipype-0.9.2/.travis.yml000066400000000000000000000026431227300005300151340ustar00rootroot00000000000000language: python python: - 2.6 - 2.7 # Setup anaconda before_install: - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then wget http://repo.continuum.io/miniconda/Miniconda-2.2.2-Linux-x86_64.sh -O miniconda.sh; else wget http://repo.continuum.io/miniconda/Miniconda3-2.2.2-Linux-x86_64.sh -O miniconda.sh; fi - chmod +x miniconda.sh - ./miniconda.sh -b - export PATH=/home/travis/anaconda/bin:$PATH # The next couple lines fix a crash with multiprocessing on Travis - sudo rm -rf /dev/shm - sudo ln -s /run/shm /dev/shm - bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh) - travis_retry sudo apt-get install -qq --no-install-recommends fsl afni - travis_retry sudo apt-get install -qq fsl-atlases - source /etc/fsl/fsl.sh # Install packages install: - conda create -n testenv --yes pip python=$TRAVIS_PYTHON_VERSION - source activate testenv - conda install --yes numpy scipy nose traits networkx dateutil - pip install nibabel --use-mirrors - pip install python-coveralls --use-mirrors - pip install nose-cov --use-mirrors - pip install https://github.com/RDFLib/rdflib/archive/master.zip - pip install https://github.com/satra/prov/archive/enh/rdf.zip - python setup.py install # Run test script: - nosetests --with-doctest --with-cov --cov nipype --cov-config .coveragerc --logging-level=INFO # Calculate coverage after_success: - coveralls --config_file .coveragerc nipype-0.9.2/CHANGES000066400000000000000000000372771227300005300140310ustar00rootroot00000000000000Future Release ============== * FIX: DataFinder was broken due to a typo * FIX: Order of DataFinder outputs was not guaranteed, it's human sorted now * ENH: New interfaces: Vnifti2Image, VtoMat Release 0.9.1 (December 25, 2013) ============ * FIX: installation issues Release 0.9.0 (December 20, 2013) ============ * ENH: SelectFiles: a streamlined version of DataGrabber * ENH: new tools for defining workflows: JoinNode, synchronize and itersource * ENH: W3C PROV support with optional RDF export built into Nipype * ENH: Added support for Simple Linux Utility Resource Management (SLURM) * ENH: AFNI interfaces refactor, prefix, suffix are replaced by "flexible_%s_templates" * ENH: New SPM interfaces: - spm.ResliceToReference, - spm.DicomImport * ENH: New AFNI interfaces: - afni.AFNItoNIFTI - afni.TCorr1D * ENH: Several new interfaces related to Camino were added: - camino.SFPICOCalibData - camino.Conmat - camino.QBallMX - camino.LinRecon - camino.SFPeaks One outdated interface no longer part of Camino was removed: - camino.Conmap * ENH: Three new mrtrix interfaces were added: - mrtrix.GenerateDirections - mrtrix.FindShPeaks - mrtrix.Directions2Amplitude * ENH: New FSL interfaces: - fsl.PrepareFieldmap - fsl.TOPUP - fsl.ApplyTOPUP - fsl.Eddy * ENH: New misc interfaces: - FuzzyOverlap, - P2PDistance * ENH: New workflows: nipype.workflows.dmri.fsl.epi.[fieldmap_correction&topup_correction] * ENH: Added simplified outputname generation for command line interfaces. * ENH: Allow ants use a single mask image * ENH: Create configuration option for parameterizing directories with hashes * ENH: arrange nodes by topological sort with disconnected subgraphs * ENH: uses the nidm iri namespace for uuids * ENH: remove old reporting webpage * ENH: Added support for Vagrant * API: 'name' is now a positional argument for Workflow, Node, and MapNode constructors * API: SPM now defaults to SPM8 or SPM12b job format * API: DataGrabber and SelectFiles use human (or natural) sort now * FIX: Several fixes related to Camino interfaces: - ProcStreamlines would ignore many arguments silently (target, waypoint, exclusion ROIS, etc.) - DTLUTGen would silently round the "step", "snr" and "trace" parameters to integers - PicoPDFs would not accept more than one lookup table - PicoPDFs default pdf did not correspond to Camino default - Track input model names were outdated (and would generate an error) - Track numpds parameter could not be set for deterministic tractography - FA created output files with erroneous extension * FIX: Deals properly with 3d files in SPM Realign * FIX: SPM with MCR fixed * FIX: Cleaned up input and output spec metadata * FIX: example openfmri script now makes the contrast spec a hashed input * FIX: FILMGLS compatibility with FSL 5.0.5 * FIX: Freesurfer recon-all resume now avoids setting inputs * FIX: File removal from node respects file associations img/hdr/mat, BRIK/HEAD Release 0.8.0 (May 8, 2013) =========================== * ENH: New interfaces: nipy.Trim, fsl.GLM, fsl.SigLoss, spm.VBMSegment, fsl.InvWarp, dipy.TensorMode * ENH: Allow control over terminal output for commandline interfaces * ENH: Added preliminary support for generating Python code from Workflows. * ENH: New workflows for dMRI and fMRI pre-processing: added motion artifact correction with rotation of the B-matrix, and susceptibility correction for EPI imaging using fieldmaps. Updated eddy_correct pipeline to support both dMRI and fMRI, and new parameters. * ENH: Minor improvements to FSL's FUGUE and FLIRT interfaces * ENH: Added optional dilation of parcels in cmtk.Parcellate * ENH: Interpolation mode added to afni.Resample * ENH: Function interface can accept a list of strings containing import statements that allow external functions to run without their imports defined in the function body * ENH: Allow node configurations to override master configuration * FIX: SpecifyModel works with 3D files correctly now. Release 0.7.0 (Dec 18, 2012) ============================ * ENH: Add basic support for LSF plugin. * ENH: New interfaces: ICC, Meshfix, ants.Register, C3dAffineTool, ants.JacobianDeterminant, afni.AutoTcorrelate, DcmStack * ENH: New workflows: ants template building (both using 'ANTS' and the new 'antsRegistration') * ENH: New examples: how to use ANTS template building workflows (smri_ants_build_tmeplate), how to set SGE specific options (smri_ants_build_template_new) * ENH: added no_flatten option to Merge * ENH: added versioning option and checking to traits * ENH: added deprecation metadata to traits * ENH: Slicer interfaces were updated to version 4.1 Release 0.6.0 (Jun 30, 2012) ============================ * API: display variable no longer encoded as inputs in commandline interfaces * ENH: input hash not modified when environment DISPLAY is changed * ENH: support for 3d files for TSNR calculation * ENH: Preliminary support for graph submission with SGE, PBS and Soma Workflow * ENH: New interfaces: MySQLSink, nipy.Similarity, WatershedBEM, MRIsSmooth, NetworkBasedStatistic, Atropos, N4BiasFieldCorrection, ApplyTransforms, fs.MakeAverageSubject, epidewarp.fsl, WarpTimeSeriesImageMultiTransform, AVScale, mri_ms_LDA * ENH: simple interfaces for spm * FIX: CompCor component calculation was erroneous * FIX: filename generation for AFNI and PRELUDE * FIX: improved slicer module autogeneration * FIX: added missing options for BBRegsiter * FIX: functionality of remove_unnecessary_ouputs cleaned up * FIX: local hash check works with appropriate inputs * FIX: Captures all stdout from commandline programs * FIX: Afni outputs should inherit from TraitedSpec Release 0.5.3 (Mar 23, 2012) ============================ * FIX: SPM model generation when output units is in scans Release 0.5.2 (Mar 14, 2012) ============================ * API: Node now allows specifying node level configuration for SGE/PBS clusters * API: Logging to file is disabled by default * API: New location of log file -> .nipype/nipype.cfg * ENH: Changing logging options via config works for distributed processing * FIX: Unittests on debian (logging and ipython) Release 0.5 (Mar 10, 2012) ========================== * API: FSL defaults to Nifti when OUTPUTTYPE environment variable not found * API: By default inputs are removed from Node working directory * API: InterfaceResult class is now versioned and stores class type not instance * API: Added FIRST interface * API: Added max_jobs paramter to plugin_args. limits the number of jobs executing at any given point in time * API: crashdump_dir is now a config execution option * API: new config execution options for controlling hash checking, execution and logging behavior when running in distributed mode. * API: Node/MapNode has new attribute that allows it to run on master thread. * API: IPython plugin now invokes IPython 0.11 or greater * API: Canned workflows are now all under a different package structure * API: SpecifyModel event_info renamed to event_files * API: DataGrabber is always being rerun (unless overwrite is set to False on Node level) * API: "stop_on_first_rerun" does not stop for DataGrabber (unless overwrite is set to True on Node level) * API: Output prefix can be set for spm nodes (SliceTiming, Realign, Coregister, Normalize, Smooth) * ENH: Added fsl resting state workflow based on behzadi 2007 CompCorr method. * ENH: TSNR node produces mean and std-dev maps; allows polynomial detrending * ENH: IdentityNodes are removed prior to execution * ENH: Added Michael Notter's beginner's guide * ENH: Added engine support for status callback functions * ENH: SPM create warped node * ENH: All underlying interfaces (including python ones) are now optional * ENH: Added imperative programming option with Nodes and caching * ENH: Added debug mode to configuration * ENH: Results can be stored and loaded without traits exceptions * ENH: Added concurrent log handler for distributed writing to log file * ENH: Reporting can be turned off using config * ENH: Added stats files to FreeSurferOutput * ENH: Support for Condor through qsub emulation * ENH: IdentityNode with iterable expansion takes place after remaining Identity Node removal * ENH: Crashfile display script added * ENH: Added FmriRealign4d node wrapped from nipy * ENH: Added TBSS workflows and examples * ENH: Support for openfmri data processing * ENH: Package version check * FIX: Fixed spm preproc workflow to cater to multiple functional runs * FIX: Workflow outputs displays nodes with empty outputs * FIX: SUSAN workflow works without usans * FIX: SGE fixed for reading custom templates * FIX: warping in SPM realign, Dartel and interpolation parameters * FIX: Fixed voxel size parameter in freesurfer mri_convert * FIX: 4D images in spm coregister * FIX: Works around matlab tty bug * FIX: Overwriting connection raises exception * FIX: Outputs are loaded from results and not stored in memory for during distributed operation * FIX: SPM threshold uses SPM.mat name and improved error detection * FIX: Removing directory contents works even when a node has no outputs * FIX: DARTEL workflows will run only when SPM 8 is available * FIX: SPM Normalize estimate field fixed * FIX: hashmethod argument now used for calculating hash of old file * FIX: Modelgen now allows FSL style event files Release 0.4.1 (Jun 16, 2011) ============================ * Minor bugfixes Release 0.4 (Jun 11, 2011) ========================== * API: Timestamp hashing does not use ctime anymore. Please update your hashes by running workflows with updatehash=True option NOTE: THIS IS THE DEFAULT CONFIG NOW, so unless you updatehash, workflows will rerun * API: Workflow run function no longer supports (inseries, createdirsonly). Functions used in connect string must be pickleable * API: SPM EstimateContrast: ignore_derivs replaced by use_derivs * API: All interfaces: added new config option ignore_exception * API: SpecifModel no longer supports (concatenate_runs, output_specs). high_pass_filter cutoff is mandatory (even if its set to np.inf). Additional interfaces SpecifySPMModel and SpecifySparseModel support other types of data. * API: fsl.DTIFit input "save" is now called "save_tensor" * API: All inputs of IdentityInterfaces are mandatory by default. You can turn this off by specifying mandatory_inputs=False to the constructor. * API: fsl FILMGLS input "autocorr_estimate" is now called "autocorr_estimate_only" * API: fsl ContrastMgr now requires access to specific files (no longer accepts the result directory) * API: freesurfer.GLMFit input "surf" is now a boolean with three corresponding inputs -- subject_id, hemi, and surf_geo * ENH: All commandline interfaces display stdout and stderr * ENH: All interfaces raise exceptions on error with an option to suppress * ENH: Supports a plugin interface for execution (current support for multiprocessing, IPython, SGE, PBS) * ENH: MapNode runs in parallel under IPython, SGE, MultiProc, PBS * ENH: Optionally allows keeping only required outputs * ENH: New interface: utility.Rename to change the name of files, optionally using python string-formatting with inputs or regular expressions matching * ENH: New interface: freesurfer.ApplyMask (mri_mask) * ENH: New FSL interface -- SwapDimensions (fslswapdim) * ENH: Sparse models allow regressor scaling and temporal derivatives * ENH: Added support for the component parts of FSL's TBSS workflow (TBSSSkeleton and DistanceMap) * ENH: dcm2nii interface exposes bvals, bvecs, reoriented and cropped images * ENH: Added several higher-level interfaces to the fslmaths command: - ChangeDataType, Threshold, MeanImage, IsotropicSmooth, ApplyMask, TemporalFilter DilateImage, ErodeImage, SpatialFilter, UnaryMaths, BinaryMaths, MultiImageMaths * ENH: added support for networx 1.4 and improved iterable expansion * ENH: Replaced BEDPOSTX and EddyCurrent with nipype pipelines * ENH: Ability to create a hierarchical dot file * ENH: Improved debugging information for rerunning nodes * ENH: Added 'stop_on_first_rerun' option * ENH: Added support for Camino * ENH: Added support for Camino2Trackvis * ENH: Added support for Connectome Viewer * BF: dcm2nii interface handles gzipped files correctly * BF: FNIRT generates proper outputs * BF: fsl.DTIFit now properly collects tensor volume * BF: updatehash now removes old result hash file Release 0.3.4 (Jan 12, 2011) ============================ * API: hash values for float use a string conversion up to the 10th decimal place. * API: Iterables in output path will always be generated as _var1_val1_var2_val2 pairs * ENH: Added support to nipy: GLM fit, contrast estimation and calculating mask from EPI * ENH: Added support for manipulating surface files in Freesurfer: - projecting volume images onto the surface - smoothing along the surface - transforming a surface image from one subject to another - using tksurfer to save pictures of the surface * ENH: Added support for flash processing using FreeSurfer * ENH: Added support for flirt matrix in BBRegister * ENH: Added support for FSL convert_xfm * ENH: hashes can be updated again without rerunning all nodes. * ENH: Added multiple regression design for FSL * ENH: Added SPM based Analyze to Nifti converter * ENH: Added increased support for PyXNAT * ENH: Added support for MCR-based binary version of SPM * ENH: Added SPM node for calculating various threshold statistics * ENH: Added distance and dissimilarity measurements * BF: Diffusion toolkit gets installed * BF: Changed FNIRT interface to accept flexible lists (rather than 4-tuples) on all options specific to different subsampling levels Release 0.3.3 (Sep 16, 2010) ============================ * API: subject_id in ModelSpec is now deprecated * API: spm.Threshold - does not need mask, beta, RPV anymore - takes only one image (stat_image - mind the name change) - works with SPM2 SPM.mat - returns additional map - pre topological FDR * ENH: Added support for Diffusion toolkit * ENH: Added support for FSL slicer and overlay * ENH: Added support for dcm2nii * BF: DataSink properly handles lists of lists now * BF: DataGrabber has option for raising Exception on getting empty lists * BF: Traits logic for 'requires' metadata * BF: allows workflows to be relocatable * BF: nested workflows with connections don't raise connection not found error * BF: multiple workflows with identical nodenames and iterables do not create nestsed workflows Release 0.3.2 (Aug 03, 2010) ============================ Enhancements ------------ - all outputs from nodes are now pickled as part of workflow processing - added git developer docs Bugs fixed ---------- * FreeSurfer - Fixed bugs in SegStats doctest Release 0.3.1 (Jul 29, 2010) ============================ Bugs fixed ---------- * FreeSurfer - Fixed bugs in glmfit and concatenate - Added group t-test to freesurfer tutorial Release 0.3 (Jul 27, 2010) ========================== Incompatible changes -------------------- * Complete redesign of the Interface class - heavy use of Traits. * Changes in the engine API - added Workflow and MapNode. Compulsory name argument. Features added -------------- * General: - Type checking of inputs and outputs using Traits from ETS_. - Support for nested workflows. - Preliminary Slicer and AFNI support. - New flexible DataGrabber node. - AtlasPick and Threshold nodes. - Preliminary support for XNAT. - Doubled number of the tutorials. * FSL: - Added DTI processing nodes (note that TBSS nodes are still experimental). - Recreated FEAT workflow. * SPM: - Added New Segment and many other nodes. - Redesigned second level analysis. nipype-0.9.2/INSTALL000066400000000000000000000002441227300005300140470ustar00rootroot00000000000000.. -*- rst -*- rest mode for emacs .. _development-quickstart: For installation instructions see documentation: http://nipy.org/nipype/ or doc/users/install.rst nipype-0.9.2/LICENSE000066400000000000000000000030041227300005300140200ustar00rootroot00000000000000Copyright (c) 2009-2014, NIPY Developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the NIPY Developers nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nipype-0.9.2/MANIFEST.in000066400000000000000000000004401227300005300145520ustar00rootroot00000000000000include INSTALL include LICENSE include MANIFEST.in include README include THANKS include Makefile include build_docs.py include setup_egg.py include doc/documentation.zip include nipype/COMMIT_INFO.txt recursive-include doc * recursive-include matlabscripts * recursive-include tools * nipype-0.9.2/Makefile000066400000000000000000000034761227300005300144700ustar00rootroot00000000000000# Makefile for building distributions of nipype. # Files are then pushed to sourceforge using rsync with a command like this: # rsync -e ssh nipype-0.1-py2.5.egg cburns,nipy@frs.sourceforge.net:/home/frs/project/n/ni/nipy/nipype/nipype-0.1/ PYTHON ?= python NOSETESTS ?= nosetests zipdoc: html zip documentation.zip doc/_build/html sdist: zipdoc @echo "Building source distribution..." python setup.py sdist @echo "Done building source distribution." # XXX copy documentation.zip to dist directory. # XXX Somewhere the doc/_build directory is removed and causes # this script to fail. egg: zipdoc @echo "Building egg..." python setup.py bdist_egg @echo "Done building egg." upload_to_pypi: zipdoc @echo "Uploading to PyPi..." python setup.py sdist --formats=zip,gztar upload trailing-spaces: find . -name "*.py" | xargs perl -pi -e 's/[ \t]*$$//' @echo "Reverting test_docparse" git checkout nipype/utils/tests/test_docparse.py clean-pyc: find . -name "*.pyc" | xargs rm -f clean-so: find . -name "*.so" | xargs rm -f find . -name "*.pyd" | xargs rm -f clean-build: rm -rf build clean-ctags: rm -f tags clean: clean-build clean-pyc clean-so clean-ctags in: inplace # just a shortcut inplace: $(PYTHON) setup.py build_ext -i test-code: in $(NOSETESTS) -s nipype --with-doctest test-doc: $(NOSETESTS) -s --with-doctest --doctest-tests --doctest-extension=rst \ --doctest-fixtures=_fixture doc/ test-coverage: $(NOSETESTS) -s --with-doctest --with-coverage --cover-package=nipype \ --config=.coveragerc test: clean test-code html: @echo "building docs" make -C doc clean html specs: @echo "Checking specs and autogenerating spec tests" python tools/checkspecs.py check-before-commit: trailing-spaces html test specs @echo "removed spaces" @echo "built docs" @echo "ran test" @echo "generated spec tests" nipype-0.9.2/README.rst000066400000000000000000000061401227300005300145060ustar00rootroot00000000000000======================================================== NIPYPE: Neuroimaging in Python: Pipelines and Interfaces ======================================================== .. image:: https://travis-ci.org/nipy/nipype.png?branch=master :target: https://travis-ci.org/nipy/nipype .. image:: https://coveralls.io/repos/nipy/nipype/badge.png :target: https://coveralls.io/r/nipy/nipype Current neuroimaging software offer users an incredible opportunity to analyze data using a variety of different algorithms. However, this has resulted in a heterogeneous collection of specialized applications without transparent interoperability or a uniform operating interface. *Nipype*, an open-source, community-developed initiative under the umbrella of NiPy, is a Python project that provides a uniform interface to existing neuroimaging software and facilitates interaction between these packages within a single workflow. Nipype provides an environment that encourages interactive exploration of algorithms from different packages (e.g., SPM, FSL, FreeSurfer, AFNI, Slicer), eases the design of workflows within and between packages, and reduces the learning curve necessary to use different packages. Nipype is creating a collaborative platform for neuroimaging software development in a high-level language and addressing limitations of existing pipeline systems. *Nipype* allows you to: * easily interact with tools from different software packages * combine processing steps from different software packages * develop new workflows faster by reusing common steps from old ones * process data faster by running it in parallel on many cores/machines * make your research easily reproducible * share your processing workflows with the community Documentation ------------- Please see the ``doc/README.txt`` document for information on our documentation. Website ------- Information specific to NIPYPE is located here:: http://nipy.org/nipype Mailing Lists ------------- For core NIPYPE related issues, please see the developer's list here:: http://projects.scipy.org/mailman/listinfo/nipy-devel For user NIPYPE related issues, please see the user's list here:: http://groups.google.com/group/nipy-user For NIPYPE related issues, please add *NIPYPE* to the subject line NIPYPE structure ---------------- Currently NIPYPE consists of the following files and directories: INSTALL NIPYPE prerequisites, installation, development, testing, and troubleshooting. README This document. THANKS NIPYPE developers and contributors. Please keep it up to date!! LICENSE NIPYPE license terms. doc/ Sphinx/reST documentation examples/ nipype/ Contains the source code. setup.py Script for building and installing NIPYPE. License information ------------------- We use the 3-clause BSD license; the full license is in the file ``LICENSE`` in the nipype distribution. There are interfaces to some GNU code but these are entirely optional. All trademarks referenced herein are property of their respective holders. Copyright (c) 2009-2014, NIPY Developers All rights reserved. nipype-0.9.2/THANKS.rst000066400000000000000000000055351227300005300145500ustar00rootroot00000000000000.. -*- mode: rst -*- Code contributors ----------------- Contributors to Nipype include but are not limited to: .. hlist:: * Aimi Watanabe * Alexander Schaefer * Alexandre Gramfort * Anisha Keshavan * Ariel Rokem * Ben Acland * Basile Pinsard * Brendan Moloney * Brian Cheung * Charl Linssen * Chris Filo Gorgolewski * Chris Steele * Christian Haselgrove * Christopher Burns * Cindee Madison * Claire Tarbert * Colin Buchanan * Daniel Ginsburg * Daniel Haehn * Daniel Margulies * Dav Clark * David Welch * Drew Erickson * Erik Kastman * Félix C. Morency * Gael Varoquaux * Hans Johnson * Janosch Linkersdörfer * Januzz * Jarrod Millman * Jeff Lai * Jessica Forbes * John Salvatore * Lijie Huang * Michael Hallquist * Michael Hanke * Michael Notter * Michael Waskom * Nolan Nichols * Oliver Hinds * Oscar Esteban * Rosalia Tungaraza * Satrajit Ghosh * Sharad Sikka * Stephan Gerhard * Erik Ziegler * Valentin Haenel * Xiangzhen Kong * Xu Wang * Yannick Schwartz * Yaroslav O. Halchenko For full most up to date list see `Ohloh `__. Other contributors ------------------ .. hlist:: * Matthew Brett * Michael Castelle * Philippe Ciuciu * Yann Cointepas * Mark D'Esposito * Susan Gabrieli * Brian Hawthorne * Tim Leslie * Fernando Perez * Tyler Perrachione * Jean-Baptiste Poline * Alexis Roche * Denis Riviere * Gretchen Reynolds * Jonathan Taylor * Bertrand Thirion * Bernjamin Thyreau * Mike Trumpis * Karl Young * Tom Waite We would also like to thank `JetBrains `__ for providing `Pycharm `__ licenses. Funding ------- Satrajit Ghosh work on this project was partially funded by NIBIB R03 EB008673 (PI: Ghosh and Whitfield-Gabrieli) and by the `INCF `__ through a contract with TankThink Labs, LLC. Chris Burns was supported by NIMH grant 5R01MH081909-02 (PI: Desposito). Hans Jonson was supported by `2 U54 EB005149 - 06 Core 2b Huntington's Disease - Driving Biological Project `__, `S10 RR023392 Enterprise Storage In A Collaborative Neuroimaging Environment `__, `R01 NS040068 Neurobiological Predictors of Huntington's Disease `__, and `UL1 TR000442 University of Iowa Clinical and Translational Science Program `__. nipype-0.9.2/Vagrantfile000066400000000000000000000046541227300005300152140ustar00rootroot00000000000000VAGRANTFILE_API_VERSION = "2" $script = < {% endblock %} nipype-0.9.2/doc/_templates/indexsidebar.html000066400000000000000000000016371227300005300212560ustar00rootroot00000000000000{% block nipypelinks %}

{{ _('Links') }}

{% endblock %} nipype-0.9.2/doc/_templates/layout.html000066400000000000000000000033551227300005300201310ustar00rootroot00000000000000{% extends "!layout.html" %} {% set title = 'Neuroimaging in Python - Pipelines and Interfaces' %} {% set short_title = 'Nipype' %} {% block extrahead %} {{ super() }} {% endblock %} {% block header %} {% endblock %} {% block relbar1 %}{% endblock %} {% block relbar2 %}{% endblock %} {% block sidebar1 %}{{ sidebar() }}{% endblock %} {% block sidebar2 %}{% endblock %} {% block footer %} {{ super() }} {% endblock %} nipype-0.9.2/doc/_templates/navbar.html000066400000000000000000000010501227300005300200530ustar00rootroot00000000000000 Home · Quickstart · Documentation · Citation · NiPy nipype-0.9.2/doc/_templates/sidebar_versions.html000066400000000000000000000031231227300005300221460ustar00rootroot00000000000000{% block versions %}

{{ _('Versions') }}

ReleaseDevel
0.9.21.0-dev
Download Github
{% endblock %} nipype-0.9.2/doc/about.rst000066400000000000000000000017651227300005300154400ustar00rootroot00000000000000.. _about: ===== About ===== Citation -------- .. admonition:: Reference Gorgolewski K, Burns CD, Madison C, Clark D, Halchenko YO, Waskom ML, Ghosh SS. (2011). Nipype: a flexible, lightweight and extensible neuroimaging data processing framework in Python. Front. Neuroimform. 5:13. `Download`__ __ paper_ :: @article { Gorgolewski2011, title = "Nipype: a flexible, lightweight and extensible neuroimaging data processing framework in python.", year = "2011", author = "Krzysztof Gorgolewski and Christopher D Burns and Cindee Madison and Dav Clark and Yaroslav O Halchenko and Michael L Waskom and Satrajit S Ghosh", journal = "Front Neuroinform", volume = "5", month = "08", doi = "10.3389/fninf.2011.00013", pubmed = "21897815", url = "http://dx.doi.org/10.3389/fninf.2011.00013", issn = "1662-5196"} .. include:: links_names.txt .. include:: ../THANKS.rst nipype-0.9.2/doc/api/000077500000000000000000000000001227300005300143345ustar00rootroot00000000000000nipype-0.9.2/doc/api/index.rst000066400000000000000000000001401227300005300161700ustar00rootroot00000000000000.. _api-index: ### API ### :Release: |version| :Date: |today| .. include:: generated/gen.rst nipype-0.9.2/doc/changes.rst000066400000000000000000000002111227300005300157170ustar00rootroot00000000000000:tocdepth: 2 .. _changes: ================= Changes in Nipype ================= .. include:: ../CHANGES .. include:: links_names.txt nipype-0.9.2/doc/conf.py000066400000000000000000000200541227300005300150630ustar00rootroot00000000000000# emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set fileencoding=utf-8 ft=python sts=4 ts=4 sw=4 et: # # nipype documentation build configuration file, created by # sphinx-quickstart on Mon Jul 20 12:30:18 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os nipypepath = os.path.abspath('..') sys.path.insert(1,nipypepath) import nipype # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('sphinxext')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.graphviz', 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.pngmath', 'sphinx.ext.autosummary', 'numpy_ext.numpydoc', 'matplotlib.sphinxext.plot_directive', 'matplotlib.sphinxext.only_directives' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'nipype' copyright = u'2009-13, Neuroimaging in Python team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y, %H:%M PDT' # List of documents that shouldn't be included in the build. unused_docs = ['api/generated/gen'] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Sphinxext configuration --------------------------------------------------- # Set attributes for layout of inheritance diagrams inheritance_graph_attrs = dict(rankdir="LR", size='"6.0, 8.0"', fontsize=14, ratio='compress') inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75, color='dodgerblue1', style='filled') # Flag to show todo items in rendered output todo_include_todos = True # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinxdoc' # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'nipype.css' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'nipy pipeline and interfaces package' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # Content template for the index page. html_index = 'index.html' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {'**': ['gse.html','localtoc.html', 'sidebar_versions.html', 'indexsidebar.html'], 'searchresults' : ['sidebar_versions.html', 'indexsidebar.html'], 'version' : []} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {'index': 'index.html'} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'nipypedoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('documentation', 'nipype.tex', u'nipype Documentation', u'Neuroimaging in Python team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} exclude_patterns = ['interfaces/generated/gen.rst', 'api/generated/gen.rst'] nipype-0.9.2/doc/devel/000077500000000000000000000000001227300005300146625ustar00rootroot00000000000000nipype-0.9.2/doc/devel/architecture.rst000066400000000000000000000073451227300005300201070ustar00rootroot00000000000000====================================== Architecture (discussions from 2009) ====================================== This section reflects notes and discussion between developers during the start of the nipype project in 2009. Design Guidelines ----------------- These are guidelines that the core nipype developers have agreed on: Interfaces should keep all parameters affecting construction of the appropriate command in the "input" bunch. The .run() method of an Interface should include all required inputs as explicitly named parameters, and they should take a default value of None. Any Interface should at a minimum support cwd as a command-line argument to .run(). This may be accomplished by allowing cwd as an element of the input Bunch, or handled as a separate case. Relatedly, any Interface should output all files to cwd if it is set, and otherwise to os.getcwd() (or equivalent). We need to decide on a consistent policy towards the maintinence of paths to files. It seems like the best strategy might be to do absolute (os.realpath?) filenames by default, allowing for relative paths by explicitly including something that doesn't start with a '/'. This could include '.' in some sort of path-spec. Class attributes should never be modified by an instance of that class. And probably not ever. Providing for Provenance ------------------------ The following is a specific discussion that should be thought out an more generally applied to the way we handle auto-generation / or "sourcing" of settings in an interface. There are two possible sources (at a minimum) from which the interface instance could obtain "outputtype" - itself, or FSLInfo. Currently, the outputtype gets read from FSLInfo if self.outputtype (er, _outputtype?) is None. In the case of other opt_map specifications, there are defaults that get specified if the value is None. For example output filenames are often auto-generated. If you look at the code for fsl.Bet for example, there is no way for the outfile to get picked up at the pipeline level, because it is a transient variable. This is OK, as the generation of the outfile name is contingent ONLY on inputs which ARE available to the pipeline machinery (i.e., via inspection of the Bet instance's attributes). However, with outputtype, we are in a situation in which "autogeneration" incorporates potentially transient information external to the instance itself. Thus, some care needs to be taken in always ensuring this information is hashable. Design Principles ----------------- These are (currently) Dav Clark's best guess at what the group might agree on: It should be very easy to figure out what was done by the pypeline. Code should support relocatability - this could be via URIs, relative paths or potentially other mechanisms. Unless otherwise called for, code should be thread safe, just in case. The pipeline should make it easy to change aspects of an analysis with minimal recomputation, downloading, etc. (This is not the case currently - any change will overwrite the old node). Also, the fact that multiple files get rolled into a single node is problematic for similar reasons. E.g. - node([file1 ... file100]) will get recomputed if we add only one file!. However, it should also be easy to identify and delete things you don't need anymore. Pipelines and bits of pipelines should be easy to share. Things that are the same should be called the same thing in most places. For interfaces that have an obvious meaning for the terms, "infiles" and "outfile(s)". If a file is in both the inputs and outputs, it should be called the same thing in both places. If it is produced by one interface and consumed by another, same thing should be used. Discussions ----------- .. toctree:: :maxdepth: 1 filename_generation nipype-0.9.2/doc/devel/cmd_interface_devel.rst000066400000000000000000000176511227300005300213700ustar00rootroot00000000000000.. _interface_devel: =============================== How to wrap a command line tool =============================== The aim of this section is to describe how external programs and scripts can be wrapped for use in Nipype either as interactive interfaces or within the workflow/pipeline environment. Currently, there is support for command line executables/scripts and matlab scripts. One can also create pure Python interfaces. The key to defining interfaces is to provide a formal specification of inputs and outputs and determining what outputs are generated given a set of inputs. Defining inputs and outputs =========================== In Nipype we use Enthought Traits to define inputs and outputs of the interfaces. This allows to introduce easy type checking. Inputs and outputs are grouped into separate classes (usually suffixed with InputSpec and OutputSpec). For example: .. testcode:: class ExampleInputSpec(TraitedSpec): input_volume = File(desc = "Input volume", exists = True, mandatory = True) parameter = traits.Int(desc = "some parameter") class ExampleOutputSpec(TraitedSpec): output_volume = File(desc = "Output volume", exists = True) For the Traits (and Nipype) to work correctly output and input spec has to be inherited from TraitedSpec (however, this does not have to be direct inheritance). Traits (File, Int etc.) have different parameters (called metadata). In the above example we have used the ``desc`` metadata which holds human readable description of the input. The ``mandatory`` flag forces Nipype to throw an exception if the input was not set. ``exists`` is a special flag that works only for ``File traits`` and checks if the provided file exists. More details can be found at `interface_specs`_. The input and output specifications have to be connected to the our example interface class: .. testcode:: class Example(Interface): input_spec = ExampleInputSpec output_spec = ExampleOutputSpec Where the names of the classes grouping inputs and outputs were arbitrary the names of the fields within the interface they are assigned are not (it always has to be input_spec and output_spec). Of course this interface does not do much because we have not specified how to process the inputs and create the outputs. This can be done in many ways. Command line executable ======================= As with all interfaces command line wrappers need to have inputs defined. Command line input spec has to inherit from CommandLineInputSpec which adds two extra inputs: environ (a dictionary of environmental variables), and args (a string defining extra flags). In addition input spec can define the relation between the inputs and the generated command line. To achieve this we have added two metadata: ``argstr`` (string defining how the argument should be formated) and ``position`` (number defining the order of the arguments). For example .. testcode:: class ExampleInputSpec(CommandLineSpec): input_volume = File(desc = "Input volume", exists = True, mandatory = True, position = 0, argstr="%s") parameter = traits.Int(desc = "some parameter", argstr = "--param %d") As you probably noticed the ``argstr`` is a printf type string with formatting symbols. For an input defined in InputSpec to be included into the executed commandline ``argstr`` has to be included. Additionally inside the main interface class you need to specify the name of the executable by assigning it to the ``_cmd`` field. Also the main interface class needs to inherit from `CommandLine`_: .. testcode:: class Example(CommandLine): _cmd = 'my_command' input_spec = ExampleInputSpec output_spec = ExampleOutputSpec There is one more thing we need to take care of. When the executable finishes processing it will presumably create some output files. We need to know which files to look for, check if they exist and expose them to whatever node would like to use them. This is done by implementing `_list_outputs`_ method in the main interface class. Basically what it does is assigning the expected output files to the fields of our output spec: .. testcode:: def _list_outputs(self): outputs = self.output_spec().get() outputs['output_volume'] = os.path.abspath('name_of_the_file_this_cmd_made.nii') return outputs Sometimes the inputs need extra parsing before turning into command line parameters. For example imagine a parameter selecting between three methods: "old", "standard" and "new". Imagine also that the command line accept this as a parameter "--method=" accepting 0, 1 or 2. Since we are aiming to make nipype scripts as informative as possible it's better to define the inputs as following: .. testcode:: class ExampleInputSpec(CommandLineSpec): method = traits.Enum("old", "standard", "new", desc = "method", argstr="--method=%d") Here we've used the Enum trait which restricts input a few fixed options. If we would leave it as it is it would not work since the argstr is expecting numbers. We need to do additional parsing by overloading the following method in the main interface class: .. testcode:: def _format_arg(self, name, spec, value): if name == 'method': return spec.argstr%{"old":0, "standard":1, "new":2}[value] return super(Example, self)._format_arg(name, spec, value) Here is a minimalistic interface for the gzip command: .. testcode:: from nipype.interfaces.base import ( TraitedSpec, CommandLineInputSpec, CommandLine, File ) import os class GZipInputSpec(CommandLineInputSpec): input_file = File(desc="File", exists=True, mandatory=True, argstr="%s") class GZipOutputSpec(TraitedSpec): output_file = File(desc = "Zip file", exists = True) class GZipTask(CommandLine): input_spec = GZipInputSpec output_spec = GZipOutputSpec cmd = 'gzip' def _list_outputs(self): outputs = self.output_spec().get() outputs['output_file'] = os.path.abspath(self.inputs.input_file + ".gz") return outputs if __name__ == '__main__': zipper = GZipTask(input_file='an_existing_file') print zipper.cmdline zipper.run() Creating outputs on the fly =========================== In many cases, command line executables will require specifying output file names as arguments on the command line. We have simplified this procedure with three additional metadata terms: ``name_source``, ``name_template``, ``keep_extension``. For example in the :ref:`InvWarp ` class, the ``inverse_warp`` parameter is the name of the output file that is created by the routine. .. testcode:: class InvWarpInputSpec(FSLCommandInputSpec): ... inverse_warp = File(argstr='--out=%s', name_source=['warp'], hash_files=False, name_template='%s_inverse', ... we add several metadata to inputspec. name_source indicates which field to draw from, this field must be the name of a File. hash_files indicates that the input for this field if provided should not be used in computing the input hash for this interface. name_template (optional) overrides the default ``_generated`` suffix output_name (optional) name of the output (if this is not set same name as the input will be assumed) keep_extension (optional - not used) if you want the extension from the input to be kept In addition one can add functionality to your class or base class, to allow changing extensions specific to package or interface .. testcode:: def self._overload_extension(self, value): return value #do whatever you want here with the name Finally, in the outputspec make sure the name matches that of the inputspec. .. testcode:: class InvWarpOutputSpec(TraitedSpec): inverse_warp = File(exists=True, desc=('Name of output file, containing warps that ' 'are the "reverse" of those in --warp.')) nipype-0.9.2/doc/devel/filename_generation.rst000066400000000000000000000132041227300005300214070ustar00rootroot00000000000000========================== Auto-generated filenames ========================== In refactoring the inputs in the traitlets branch I'm working through the different ways that filenames are generated and want to make sure the interface is consistent. The notes below are all using fsl.Bet as that's the first class we're Traiting. Other interface classes may handle this differently, but should agree on a convention and apply it across all Interfaces (if possible). Current Rules ------------- These rules are for fsl.Bet, but it appears they are the same for all fsl and spm Interfaces. Bet has two mandatory parameters, ``infile`` and ``outfile``. These are the rules for how they are handled in different use cases. 1. If ``infile`` or ``outfile`` are absolute paths, they are used as-is and never changed. This allows users to override any filename/path generation. 2. If ``outfile`` is not specified, a filename is generated. 3. Generated filenames (at least for ``outfile``) are based on: * ``infile``, the filename minus the extensions. * A suffix specified by the Interface. For example Bet uses *_brain* suffix. * The current working directory, os.getcwd(). Example: If ``infile`` == 'foo.nii' and the cwd is ``/home/cburns`` then generated ``outfile`` for Bet will be ``/home/cburns/foo_brain.nii.gz`` 4. If ``outfile`` is not an absolute path, for instance just a filename, the absolute path is generated using ``os.path.realpath``. This absolute path is needed to make sure the packages (Bet in this case) write the output file to a location of our choosing. The generated absolute path is only used in the ``cmdline`` at runtime and does __not__ overwrite the class attr ``self.inputs.outfile``. It is generated only when the ``cmdline`` is invoked. Walking through some examples ----------------------------- In this example we assign ``infile`` directly but ``outfile`` is generated in ``Bet._parse_inputs`` based on ``infile``. The generated ``outfile`` is only used in the cmdline at runtime and not stored in ``self.inputs.outfile``. This seems correct. .. sourcecode:: ipython In [15]: from nipype.interfaces import fsl In [16]: mybet = fsl.Bet() In [17]: mybet.inputs.infile = 'foo.nii' In [18]: res = mybet.run() In [19]: res.runtime.cmdline Out[19]: 'bet foo.nii /Users/cburns/src/nipy-sf/nipype/trunk/nipype/interfaces/tests/foo_brain.nii.gz' In [21]: mybet.inputs Out[21]: Bunch(center=None, flags=None, frac=None, functional=None, infile='foo.nii', mask=None, mesh=None, nooutput=None, outfile=None, outline=None, radius=None, reduce_bias=None, skull=None, threshold=None, verbose=None, vertical_gradient=None) In [24]: mybet.cmdline Out[24]: 'bet foo.nii /Users/cburns/src/nipy-sf/nipype/trunk/nipype/interfaces/tests/foo_brain.nii.gz' In [25]: mybet.inputs.outfile In [26]: mybet.inputs.infile Out[26]: 'foo.nii' We get the same behavior here when we assign ``infile`` at initialization: .. sourcecode:: ipython In [28]: mybet = fsl.Bet(infile='foo.nii') In [29]: mybet.cmdline Out[29]: 'bet foo.nii /Users/cburns/src/nipy-sf/nipype/trunk/nipype/interfaces/tests/foo_brain.nii.gz' In [30]: mybet.inputs Out[30]: Bunch(center=None, flags=None, frac=None, functional=None, infile='foo.nii', mask=None, mesh=None, nooutput=None, outfile=None, outline=None, radius=None, reduce_bias=None, skull=None, threshold=None, verbose=None, vertical_gradient=None) In [31]: res = mybet.run() In [32]: res.runtime.cmdline Out[32]: 'bet foo.nii /Users/cburns/src/nipy-sf/nipype/trunk/nipype/interfaces/tests/foo_brain.nii.gz' Here we specify absolute paths for both ``infile`` and ``outfile``. The command line's look as expected: .. sourcecode:: ipython In [53]: import os In [54]: mybet = fsl.Bet() In [55]: mybet.inputs.infile = os.path.join('/Users/cburns/tmp/junk', 'foo.nii') In [56]: mybet.inputs.outfile = os.path.join('/Users/cburns/tmp/junk', 'bar.nii') In [57]: mybet.cmdline Out[57]: 'bet /Users/cburns/tmp/junk/foo.nii /Users/cburns/tmp/junk/bar.nii' In [58]: res = mybet.run() In [59]: res.runtime.cmdline Out[59]: 'bet /Users/cburns/tmp/junk/foo.nii /Users/cburns/tmp/junk/bar.nii' Here passing in a new ``outfile`` in the ``run`` method will update ``mybet.inputs.outfile`` to the passed in value. Should this be the case? .. sourcecode:: ipython In [110]: mybet = fsl.Bet(infile='foo.nii', outfile='bar.nii') In [111]: mybet.inputs.outfile Out[111]: 'bar.nii' In [112]: mybet.cmdline Out[112]: 'bet foo.nii /Users/cburns/src/nipy-sf/nipype/trunk/nipype/interfaces/tests/bar.nii' In [113]: res = mybet.run(outfile = os.path.join('/Users/cburns/tmp/junk', 'not_bar.nii')) In [114]: mybet.inputs.outfile Out[114]: '/Users/cburns/tmp/junk/not_bar.nii' In [115]: mybet.cmdline Out[115]: 'bet foo.nii /Users/cburns/tmp/junk/not_bar.nii' In this case we provide ``outfile`` but not as an absolue path, so the absolue path is generated and used for the ``cmdline`` when run, but ``mybet.inputs.outfile`` is not updated with the absolute path. .. sourcecode:: ipython In [74]: mybet = fsl.Bet(infile='foo.nii', outfile='bar.nii') In [75]: mybet.inputs.outfile Out[75]: 'bar.nii' In [76]: mybet.cmdline Out[76]: 'bet foo.nii /Users/cburns/src/nipy-sf/nipype/trunk/nipype/interfaces/tests/bar.nii' In [77]: res = mybet.run() In [78]: res.runtime.cmdline Out[78]: 'bet foo.nii /Users/cburns/src/nipy-sf/nipype/trunk/nipype/interfaces/tests/bar.nii' In [80]: res.interface.inputs.outfile Out[80]: 'bar.nii' nipype-0.9.2/doc/devel/gitwash/000077500000000000000000000000001227300005300163305ustar00rootroot00000000000000nipype-0.9.2/doc/devel/gitwash/branch_list.png000066400000000000000000000320611227300005300213300ustar00rootroot00000000000000PNG  IHDRyU pHYs   IDATx]\T/,lcL-F1bSS^QcFhF+RK~g‚ vzΜ9sf]xp8WSG# <G##S\5G#<G## oΆJ߳=#<^D֜ȀRD"xq85Xl3 )xGGGܭAp8ՈJhwT#xG ,xe>x8Gf!`g;p855Kָi4 233QPP`U]IJhooWWWږLwGJ[J#SD@@REkl>甔h)W5pS@(yf3_^:;BSeG!`]\4̂;N"Yճ3G#x{\4ea"uY8GZKZz5-'!3@X(GC-y-bOH.#vyn0^z 8#*,8<  QgtHFuǔ;5&Z6o*q H#dy~LYhb [W?.!|%tR'ؗHV"5+^A\FKn]–+/$$i&-dnj2fnfhh1vQ0KɎUkƇ|53w > ,dľp,] )5Rѿ-drg9~oG]YuⓇ3qX|,,o@d=#rfD/0gr[Magx8kaFPkM e|z+1Pb(;_M:;OB/@Išطl.Q2).X30 )c"p!cC^ƣWpr**8  [ Iv!*m&nPiteW"!WU 0ط N&ՙX=&MEW2zR#Rى[ۊ|0{}܏؈kA #Vf5 ^9PԣWiq`~}Fv»=}XTi ;HN {79lsHiU]{6.Fہq-ĠɊуt*jI5dE]wr:'KZw-Ь-u#)lJ>8c`HDЫq+6.FD:5EG Quא5+SG§lMah*ԋu푹%[m;1]B˜ UY5,q{U_1eI{`ؿr)fb١' *%_֩VcLC#I`okep7=v,œkg>~>z{P&v| ~oLگFU7K wo@+\Z4-ImulN'ad5?=Rw,XZ O7QyX=DOSPթRBCcvy WWdҁ0v2VgN^-0h4u.~ߐH?s3Dj;q#S[Ƽ6DM:r6ߢ!`&v; boGJqw`/X=g>6vعu$+24 a/U%lz6fnc0EilRч{Ʒر堀L^ sHVړ ϿE&y#ZS\! -Y5Ur Zvơ\@.)U5v@x8h1ZO@kGd Gu[h {tC_j1B(yds]0zL >[y"4c/vaִGS<#S]U͸!)'7IpR:F,u!Y{k8'/HƐ~*pl] ع' %K*7 [BO& ؁RK|4c$IcZ7ǦQ8r1 A]eX`tP9Y{ڴ=@įǕh֪5d)5Zm޴`DoDշQi ǔ~1LĠٳ>diYdĚY7~=rq'\C}"-3ǐGKM}D8ΘxB ez>E _̌'_Q%60jg">j'f=y8C~M7KG47۠|Q8ID8iwG8 674^r#; X6ک)X4@Rʺض68y.n艄8Lx:^ŕT-h >O//aq%}F@O<'fk&%r߅-| 2uI|WR:M!bRl]-U>]HsԸz23*bpOOypew-1g?k'y"`Ş!=ÉVA;ԫ]}|:,l \d vE~j 2ӄ,C {psײ`Wʗo^WU\A2Lal߱ȅ‚;X`-Α7W0{=9P?Hn#<?!1{V0)F"R3!!*q /k aԸ-Ҵp:t2[dIƝ<2l eI^ mt\p#\w:Syptv(9"xZyY򎭇ൠXMV )o GWI|,Z~qG`{ˠ^B^l%P{?{;{?*+"n'xE3~dZ i GcL_'!}GY6b8P]@gANFiP~ixw` i/܅R0PsCr>_1{k>@,w Ǡe;}꓏7->}C{W>Zwr0"Ѕ@?>ȗR0384Pa0[TۣRD*9 /W(+'03̃G#V"<2W#7Z|Pwn߾#/`G/`P5,= ~3p_ITڹ{9KpE$\W˛8z&\: (̘ V|5(E&%[iS dhhch2W +[ϖxi*r"Dn @tAm컣 s i E3C`r'e"٥Zk !$C?l)hΫh>\Ov7#Ƞ-bsnLę<;]1.^maM8Q⎺n"2(d;2F$`Xeۇ*5֗Af+b5>9 Z+a/k%\Dޘ \8m; ""q6Y2z9^pшhЍ}pƷW2Dۤ-~XI$8PnDC;a5!Mb4ۖ|ZJ]0lg,eNi+7'Y/$42 1OFJܴ/t*%s)V򽩡ͱA$Rڣge Rd8aR{"0E2Ȫ37M+#ȨqPʧO=# jK<>펦*d0^y)ŒMEZ 1nŽezdn "J0kVSGh0SIVDvydّRQADVZCA$$<б=Y¤2 ,;( G&qo MD(O=k"N)#⥬$')qfY lzcuZ̶,^yy |Tј> χ`pg23=* LJݍ4 Q_b?"tB50lt0Eseđew3t:loڱo z-р(gDB~NFwhꌅQsTeN\*ph=IBVpJcDD$W]`D?]+p矈Sۑ &< 0h?ipgG{!hݒ-5x=7B̕%Ֆ>HWfRS*C½+,Rmx#mtahGSjKGMl;ƯRIsJe?Ej.bN_I#ȻhSߌwz= ʯIiG4~G".hbQF26s^OF_6H"=%o qÿ}'a ,:;o6TÓR+w c)X V d-le'bMWx W|_ b # k="3ƈE,):w}Pԇ sG1m3 /&c@ND3, zLǒՂ` NM pp֬ZͿڹ }0=J.]D:t *˳`KUe'_wZfȧ'eRtM6郃<Ҥ"rfλ1|8#C;- oąLglH-'Kq*7ՎYfmxcL@,nױG6 h4bz忇 QGxU3U\VM:C2_#yKexAtŐHb8a^IiƮ21o >{'NGF\[ĉ:njҋʰ!+I%1jί'ƿGoCP`ӄX "PyQe ϮQ t6dl${eaŜ؃5˗㷋9<1%a,zJ Þt|uXgc- vX,Zr؏znCю{1JӸӑ͊Pp镢,Xӂk/=xKقLMFf3${z`(ؒS!_nvj𨿫k5%jy@[{zR ?]-ox:OTާTҹJbN9G;;ɏJ9g)dѦZ+Yhr49)BM9r S+0lnKH]f,CE2#mfoX]R:˜CL_)st'JI\!7fTL9*6prs{5)a }٠P6ZЦ>8І&QW@* О6db(3"mlѶ3#.m-ah,9/bZ' al]U!7n%W7h>bvwܹ/ΌWvO3rטڒО*_ ɓYpZ9%9\npc*ڼUT*野 ssH>Id2+)@Hk1/6x6Kug:JIToIiwv^"2բJe0dV99B:MgOa)˽JK~DµFɗYI dJxHdذ,YXZr<e]pjUYeo.#p oaެ9C4v(; ԱXHDң"bʩp^-§<*HL ? PfqO1_l^q|cԿ&mE @=#iIo粖&{]_xw~ ҇EbIxڻTSFrpѧ?F/Wzs~ '$WqBtu<2e1m܂IMJ9߅J[jɫrR!r4sutDZ\_1O$XWos5,,O?O`*1Ub8>f7C*E"=)?LMD@/C.  Z5ơhM㮢9Vo,>G+2ikq| \9Bl #bp:4]%f;U4"e#R҅[ż1Pygbv$˙'o<_-qaB@a(F lMԦH]އG7<_Afo#:.l>XD@yup;$]vCOB\= ~$?mO`U kìi=w67`ոگ!2=Nl_$?G-Mѷ-L8湫iW`#$ h\} ƍh蝏tyϽ1Qrx ѷۚƉ( LAO5G^0cԡgD+WaYKU>y%uۡ1r0G=|*,_?'q;W'C߿6C19)~Qk 3S1oX8}㣙{UFRR^+b@t:Q |x~)b޼x"6#m9QI=pjd"߀6/c1N 6ChɎw牘DԹ}ײW\Ƃ=M̝>'w #30yH>Gi0:7)&D⽻‡i^ 6G'YǙ8tp;wNV9~v58 '.gzǘԧ.f"H|zpܳWs%9JO[dų6ѱ.\+ʫ'iJOt= %dOT N!ڼys1m]w톎@k*Я?Yl麞*a[>/očy6Gw£ӫ3~ĥ5Í$IC' ehe-ד'C Y8v\.L UL; lY]bOR[fKҍ됵8`oT)SxYɩYٰA@ oJG2ߌA̍3Z9]1czg`'bR 9b"Y{ j[=.3.5ɅuKa1u6+>|6gt?9R;l6$U:~g6CY<޴A $H=*U^mxKY%Idd+ 'r)fYy< zBoz"݀n>3ՅA g:jgJ,+to@~1y>"!\qh/Y~/`v^SK"ƆgJLy[صXLw,A/6fgMڷAP6hݦOCΤD7Xt_K.ooęJ4iYf9xFl0i@ C'4FY̑TiJe%]G>Z->:wX8ohXo!^HQ^bk S14j"2Uޚ4Q6FǞ0 tIh'˽^y?DbkHiҲ R±1DS?N3ˢP xC&7Ȗؽb^z[5oų`ǮY('qR #@"3e39o:a04cR|h~/9fDOi14Tbh,-)N; SC>kkȐ5GW1P9L-4׋cȔ0Q2 (筌4ͪ ef&2pa^wZ:9xK@tnV  d-P5f̮,φbHǕ†/hy.IdFH)8]t<@Ehڥ&9 7=~٠!TWn",.4*o+#֚K}׫[̂wqq1LYo/wAzlV?q okP5B}FhOtg4!_:Io⡶ %S.- _~Ӎ`⽱d\P}4xԚiNJMxmH[;X!]H梑,]sZؽ--YJ{\ yBI ,0MMIKFJr X4B'@fY'`.~)aaĴA+(xwucWڌ#6o!MmHAџ 7 ~ڄY*l7r٢r'i#MYL0s߉ϻ]&Z4_ ř,C-GyP??ޠ`)^T/+mÂӦM }޾t0YR;#k|0w$)ܵÙa0^ !"̟yZ=x:Hq BhRkþlL NNlT!D Мo:g!^Q%bFbc4N_[5qmr/8fp#ѻ{@,KUƼu8i)Z<>kWkll,$]~. 7_CYVCL0TP( fc,ɃT*χ YC*/ZE. :rub,@dp(5~={B-'$ 4>)Ao# */g+ 6Ԁ0sh<*bp=z~uφ5;]K*.f-NcxIENDB`nipype-0.9.2/doc/devel/gitwash/branch_list_compare.png000066400000000000000000000246671227300005300230530ustar00rootroot00000000000000PNG  IHDRL4sRGBbKGD pHYs  tIME0 IDATxy|u4I4mz=-Zh r! JUP\WPpZQ ޠTVXE[RP,-6M#Ee|B!2JB!OB B!$B!VB ֒cX]*S&D!^k]3WBH^xe 5 s{ 2;SO︗%fC%gLP <9;[F\/=:Jˤdqɲ>#n{J Bi9qRx|3"L#RBڒ2ג?#!?:d0o(SGDV"=w t08ztfÎT8BĮmeT8lmձ7e&d+ q0өG i+Y0e_sU7̵s]K9NWMd: G #enz6{-J2v7!5yԅNrol>Gj6c/$iC-A)buZw Ŷ۷RL,]څ7(͈߮#..6m0d q:F-!{>2ӗ0{VT˿x۷`j;@B\snѵ: $3H\;ݓr">٫v/U" JRQ#ڸ2h*B\l]ٴIϠ?,0b)sA9SȬQl 'gp+!5UU; qۍ)f%h?~{[ӱ[ PG.r } !1\F]B!% B!$B!B]!B!$B!B]!mces_r݇~U%JB\!e7/I.;g-y|kGbːdKn чֲB]!oZMat/jIPZ)߿n¥فC:ŲEiҮJl۲Ehkܿ&O `;_D.*7Dl! HׅvSisbH,M(.Ub4)hlXqoSI~ʯBhiM Ӓ=`[:b}wvx#_XP;`D 2cb& W fBqstx>r0Ex~ n ؀&fKş¢ZRU& Դ~`; )R!PQ]v39QҚ| UžP C (GH`laJ69нfq&BˁS.Z[5I.-;Cb-1F6<Ѐz]Ť<Il|g7YU[5*Rm*.⭫zUl nl6 hGz-!$.*6_ԊOVQG_Azi`^F86*؋ H`+Jcf\ӡNSgڰ(؈Ѓ^zܞ/'lJOa{]K$%` ^X9oC'_E!,uwV[XPj4r[;nЗl*qCo?nTa()jqnVuѦ} VR 7et zon\ui}74äT}TCpx[)|h܊:qN`WIk6Ǚv^r"0~6 Ź6ErO"e穀hsFwg9R Ԟ͵޸fU }XSܜ]yok% 37'jX!tY95&MKHuCiwS>`Ex<F V>FB {o檞'ȴhx] /~d=јPw1(\{T~-;dkYWx<8qr  !,, ?soUTUU8k3뉫)XN WqĮO捾ju`&[O,L46ޙO5_Vg6 哩 ٷAyi걱|1k*x)aXo7C80Mmg?Cu|5lj¼SD8|.<oԌכylku賯|_ËRVb0d;߫,vt&: 8nt'JP5nc ԂӉON.&4 !įwq<&IqTU ???6mz˝}6)Rt&ޖq귟yjT[k靦RM'=niUY%dU_:⿿]ˇ)LpD[8≯6YvXuW|UpЦRWYp 8p6޸306GܷJ[QyT&=oh˒x7JuxDڲ{R/槊:  -D9& 搐3$$8c g0aL3jk -Kk2K+, FQqEh4bXkֿ[njM,~7rޖMm+QA]IOs MlY (A`7|`y[jN8M@ۺ7Є!x2VPɫp࿷vN-8UK#meaUaފqJ7{Y9xya 1G>-f(pnB\5k^bygd(Y15]8^w#_OB(G9 iF\:#wy$3\ըed}Ŵp }6r%uᄧnz-:_.Jmu]ѺWІ1|ҁBN)ѱT}v1}`8>k5ԻWO]]PΩ~?gQ(8uP}s+h+k xfh.^2.ךb}]_Q]5Z/wUT.ժZ"!MHXɱ&Bb<{UUU5k3+9ZNg|lʔDw N _!!(χwB\-~k̘1(J /ԩSQVK>}NQ5ϥZFu;Zy ,fV-SNhO9_BQ5~gm@P&'jB{kzuẚ.H `K iZM_C{3fk5?Db`432.&ՅSc[PXsx(wԌvEC\E~׆sB!~?SO??W_}bt:o4|V^ݸy#+0~$}>0%q5Naݻ7a^F.b;q+1woy+yav7VBUU̓&3ݛ[|-6TUxv:N 7@U+INySne\T>?vMuƸy}_^E͓߼VJݠnݛ\'y 4+;h0p2@dlf`qCdTi׿O?`dbʔ)`09r$ .l0 5g!CRY0J*T@`2-[͍'SN"яf<,cғ<'~GS"$3۹Խ(F p>]3i}'7Mw/N#4iszt{%O$5yͩvZNgaaAm۵;_Į n2omQE)?{hMZFsitD6OF!>`^W;Y`vUUt' BCuU{\MU?m 1s8;ᶩd(fl7o#ۄ^}L&O_Oe'׵"7?Cܴ3+wQS}/6<q"ϬB!9/vr{2eoѭ0gǼ;י:(#w_]?&w¤QyLo& (z t#'y lPb<)h_wX6!iZ}yޖh%@5qo=OߟeZ]wzhB#7 !75{n"##Oŋcj9r$/`ȑ 毙qvva jFHӆ}̗NqQU.f!cힲ<Չ% @⩢܃S7_ACW܎oՕ>K rVr*ǨBGBꡦVrOU.Z.:y~:km z7.g<tҠʈQ*\3U=칳\2BqI Qߤ4Z\<~L].fտ?`yV71czg}iЇsIW@ɜ{Qy_ݭjxsxj LJOȶoEF?ى6RKv٩dI -|eX9gP!)<@;kϾo4Gxor <7<8n,%$qe7#o fi:U2` Ӡg3ɺYG8[R}&3n]mc{1Ww?;pZn"3 }B|t$1j>/O샓cG>^Z@ؑ{o0o%A[mE!1>z7ulR-JeQ7c-%UyY|4c~*>>w>1l]Xz\KѱpQ+ea>Lۺa߄ZFņ14Cók^Q$FCAeέMb4GrΥ܊泏l-Q/!s.]V>\N_5XYUn(sIP`/bkn[HC  ^c c&MeRJm|<~bԩoƚo UV0}JÄ$sm&?o/~k3ld&ȮT4G~{VR?Uֲ߮~O FLOiEӺH^ mOY`EǤkIRb8)3eTw4LC9OG._) 1uX/|IҪqՏ9Xshңvj>v擣.*t?k[槂J m;F5pURxFh 땈Fc_*̲V@+ |j@Bh4x<$ќqW%Zll3 %¤S,H`l[$0tP\duk.7=eCΠt!TEV@㧥d<ݺBCѨ`wGQbڸs@NAui#C! +IDAT<(8 uB ӌL`jU!`=OqW ;cV}OG_);o,yo,WnEMmL̘ "#Nnl}NYDMFmejQ~FIvaA!R[ OLe)Ir >1+b]bLиP ԢH7Ѥd3KW"QT~4rwJ[.eox8.Ն`Fud׵5q򯢴Nʍ,_N Wb.]7c-#d2I0.@UU'5Wo/bop}=QfPJ69нfqɦBƝp ),n]"44e9]@]Beh}qDMlYWet<32zJ茮8jиb`Uix ˹[y[ &ob/ȺI(ɣiֲ"=G_ \NhJT0(wFP@URR&"-/&5B!c z=3xCJi9wS[3Y[E_W 4Sо8:ϡLe+!)=3x{g9w9&\I/>H@2OαT@lu7ثM%a]ߩ\|Y(D h5 ^So'ʑÒm2D] qFp5Lk1vh]J0wm?*c+.*hNNq:\6 h- !ğ͛7y9%fTV?ȽڳvlƭtKaӆTOnȢgoO& 3bIK[a Z20&d%zZ:QRHf⽽Иaf|2PУ!덄| sly{S2xi_ȂSj/yl z3HayL4×EBYMB>n0gl[[vBZ̨赚}[5113ãi@H`Gՠ3,{9Zz& d;s=RW"1a&w }#WGID~/aׂ,$#`oN̰'Ƒ3u}=6|-UCHՒڦyXhrt nFVE'z$X6ѥ}8JV M/A+md_.찔bui3j`2,%EX &s:`:z)ưuSQR7O=Φk&a9QInk 'p,!aZqL5q8pi:5;,Tu[)=aEu[+8aDx/a`EC(7騢܎`=a'ǵ^Y㸴!`Ȧa+ =PuSܑ[8M wn>t= p=#hYn%Q7Bdk ;Xl#6{eh:eђzwOLɜվ/7X'QZ O{FOݣ F#e !݋q#?l<9;¯YMI<9(Asѳ];evA!Os[ƈPcۉZdB+<\V 6M A%B!n#7[3)D8lŹYdnR' =naPg-$B!B]!B]!B!NEF/B! !B!B! !B:/Q֭[!I!2B!,I!B!B! !B!B! !B!B!DW!BItB! hE$IENDB`nipype-0.9.2/doc/devel/gitwash/configure_git.rst000066400000000000000000000057711227300005300217200ustar00rootroot00000000000000.. _configure-git: =============== Configure git =============== .. _git-config-basic: Overview ======== Your personal git_ configurations are saved in the ``.gitconfig`` file in your home directory. Here is an example ``.gitconfig`` file:: [user] name = Your Name email = you@yourdomain.example.com [alias] ci = commit -a co = checkout st = status -a stat = status -a br = branch wdiff = diff --color-words [core] editor = vim [merge] summary = true You can edit this file directly or you can use the ``git config --global`` command:: git config --global user.name "Your Name" git config --global user.email you@yourdomain.example.com git config --global alias.ci "commit -a" git config --global alias.co checkout git config --global alias.st "status -a" git config --global alias.stat "status -a" git config --global alias.br branch git config --global alias.wdiff "diff --color-words" git config --global core.editor vim git config --global merge.summary true To set up on another computer, you can copy your ``~/.gitconfig`` file, or run the commands above. In detail ========= user.name and user.email ------------------------ It is good practice to tell git_ who you are, for labeling any changes you make to the code. The simplest way to do this is from the command line:: git config --global user.name "Your Name" git config --global user.email you@yourdomain.example.com This will write the settings into your git configuration file, which should now contain a user section with your name and email:: [user] name = Your Name email = you@yourdomain.example.com Of course you'll need to replace ``Your Name`` and ``you@yourdomain.example.com`` with your actual name and email address. Aliases ------- You might well benefit from some aliases to common commands. For example, you might well want to be able to shorten ``git checkout`` to ``git co``. Or you may want to alias ``git diff --color-words`` (which gives a nicely formatted output of the diff) to ``git wdiff`` The following ``git config --global`` commands:: git config --global alias.ci "commit -a" git config --global alias.co checkout git config --global alias.st "status -a" git config --global alias.stat "status -a" git config --global alias.br branch git config --global alias.wdiff "diff --color-words" will create an ``alias`` section in your ``.gitconfig`` file with contents like this:: [alias] ci = commit -a co = checkout st = status -a stat = status -a br = branch wdiff = diff --color-words Editor ------ You may also want to make sure that your editor of choice is used :: git config --global core.editor vim Merging ------- To enforce summaries when doing merges (``~/.gitconfig`` file again):: [merge] log = true Or from the command line:: git config --global merge.log true .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/development_workflow.rst000066400000000000000000000174051227300005300233450ustar00rootroot00000000000000.. _development-workflow: ==================== Development workflow ==================== You already have your own forked copy of the nipype_ repository, by following :ref:`forking`, :ref:`set-up-fork`, and you have configured git_ by following :ref:`configure-git`. Workflow summary ================ * Keep your ``master`` branch clean of edits that have not been merged to the main nipype_ development repo. Your ``master`` then will follow the main nipype_ repository. * Start a new *feature branch* for each set of edits that you do. * If you can avoid it, try not to merge other branches into your feature branch while you are working. * Ask for review! This way of working really helps to keep work well organized, and in keeping history as clear as possible. See |emdash| for example |emdash| `linux git workflow`_. Making a new feature branch =========================== :: git branch my-new-feature git checkout my-new-feature Generally, you will want to keep this also on your public github_ fork of nipype_. To do this, you `git push`_ this new branch up to your github_ repo. Generally (if you followed the instructions in these pages, and by default), git will have a link to your github_ repo, called ``origin``. You push up to your own repo on github_ with:: git push origin my-new-feature In git >1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option:: git push --set-upstream origin my-new-feature From now on git_ will know that ``my-new-feature`` is related to the ``my-new-feature`` branch in the github_ repo. The editing workflow ==================== Overview -------- :: # hack hack git add my_new_file git commit -am 'NF - some message' git push In more detail -------------- #. Make some changes #. See which files have changed with ``git status`` (see `git status`_). You'll see a listing like this one:: # On branch ny-new-feature # Changed but not updated: # (use "git add ..." to update what will be committed) # (use "git checkout -- ..." to discard changes in working directory) # # modified: README # # Untracked files: # (use "git add ..." to include in what will be committed) # # INSTALL no changes added to commit (use "git add" and/or "git commit -a") #. Check what the actual changes are with ``git diff`` (`git diff`_). #. Add any new files to version control ``git add new_file_name`` (see `git add`_). #. To commit all modified files into the local copy of your repo,, do ``git commit -am 'A commit message'``. Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag |emdash| you can just take on faith |emdash| or see `why the -a flag?`_ |emdash| and the helpful use-case description in the `tangled working copy problem`_. The `git commit`_ manual page might also be useful. #. To push the changes up to your forked repo on github_, do a ``git push`` (see `git push`). Asking for code review ====================== #. Go to your repo URL |emdash| e.g. ``http://github.com/your-user-name/nipype``. #. Click on the *Branch list* button: .. image:: branch_list.png #. Click on the *Compare* button for your feature branch |emdash| here ``my-new-feature``: .. image:: branch_list_compare.png #. If asked, select the *base* and *comparison* branch names you want to compare. Usually these will be ``master`` and ``my-new-feature`` (where that is your feature branch name). #. At this point you should get a nice summary of the changes. Copy the URL for this, and post it to the `nipype mailing list`_, asking for review. The URL will look something like: ``http://github.com/your-user-name/nipype/compare/master...my-new-feature``. There's an example at http://github.com/matthew-brett/nipy/compare/master...find-install-data See: http://github.com/blog/612-introducing-github-compare-view for more detail. The generated comparison, is between your feature branch ``my-new-feature``, and the place in ``master`` from which you branched ``my-new-feature``. In other words, you can keep updating ``master`` without interfering with the output from the comparison. More detail? Note the three dots in the URL above (``master...my-new-feature``). .. admonition:: Two vs three dots Imagine a series of commits A, B, C, D... Imagine that there are two branches, *topic* and *master*. You branched *topic* off *master* when *master* was at commit 'E'. The graph of the commits looks like this:: A---B---C topic / D---E---F---G master Then:: git diff master..topic will output the difference from G to C (i.e. with effects of F and G), while:: git diff master...topic would output just differences in the topic branch (i.e. only A, B, and C). [#thank_yarik]_ Asking for your changes to be merged with the main repo ======================================================= When you are ready to ask for the merge of your code: #. Go to the URL of your forked repo, say ``http://github.com/your-user-name/nipype.git``. #. Click on the 'Pull request' button: .. image:: pull_button.png Enter a message; we suggest you select only ``nipype`` as the recipient. The message will go to the `nipype mailing list`_. Please feel free to add others from the list as you like. Merging from trunk ================== This updates your code from the upstream `nipype github`_ repo. Overview -------- :: # go to your master branch git checkout master # pull changes from github git fetch upstream # merge from upstream git merge upstream/master In detail --------- We suggest that you do this only for your ``master`` branch, and leave your 'feature' branches unmerged, to keep their history as clean as possible. This makes code review easier:: git checkout master Make sure you have done :ref:`linking-to-upstream`. Merge the upstream code into your current development by first pulling the upstream repo to a copy on your local machine:: git fetch upstream then merging into your current branch:: git merge upstream/master Deleting a branch on github_ ============================ :: git checkout master # delete branch locally git branch -D my-unwanted-branch # delete branch on github git push origin :my-unwanted-branch (Note the colon ``:`` before ``test-branch``. See also: http://github.com/guides/remove-a-remote-branch Several people sharing a single repository ========================================== If you want to work on some stuff with other people, where you are all committing into the same repository, or even the same branch, then just share it via github_. First fork nipype into your account, as from :ref:`forking`. Then, go to your forked repository github page, say ``http://github.com/your-user-name/nipype`` Click on the 'Admin' button, and add anyone else to the repo as a collaborator: .. image:: pull_button.png Now all those people can do:: git clone git@githhub.com:your-user-name/nipype.git Remember that links starting with ``git@`` use the ssh protocol and are read-write; links starting with ``git://`` are read-only. Your collaborators can then commit directly into that repo with the usual:: git commit -am 'ENH - much better code' git push origin master # pushes directly into your repo Exploring your repository ========================= To see a graphical representation of the repository branches and commits:: gitk --all To see a linear list of commits for this branch:: git log You can also look at the `network graph visualizer`_ for your github_ repo. .. include:: links.inc .. rubric:: Footnotes .. [#thank_yarik] Thanks to Yarik Halchenko for this explanation. nipype-0.9.2/doc/devel/gitwash/following_latest.rst000066400000000000000000000014761227300005300224460ustar00rootroot00000000000000.. _following-latest: ============================= Following the latest source ============================= These are the instructions if you just want to follow the latest *nipype* source, but you don't need to do any development for now. The steps are: * :ref:`install-git` * get local copy of the git repository from github_ * update local copy from time to time Get the local copy of the code ============================== From the command line:: git clone git://github.com/nipy/nipype.git You now have a copy of the code tree in the new ``nipype`` directory. Updating the code ================= From time to time you may want to pull down the latest code. Do this with:: cd nipype git pull The tree in ``nipype`` will now have the latest changes from the initial repository. .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/forking_button.png000066400000000000000000000314441227300005300220760ustar00rootroot00000000000000PNG  IHDR]Vl8E pHYs   IDATx]|Tv7ݴMN]&O  X(("}`@z!!@$ I6{wHŐ ww|s,JlG#i7p8t"p8Dn ͛p8tp8Dn ͛p8tp8D۪SMYVܺu jN:H;Ц22{wG(7o#77}Z+6/_18+{22+ߔ'Tnn.\\\z)H~cCEҿ++3^KX,`B`UC(s*Ҁ{/22'7#G>F},\>ë˗ sݿ/^v;s+p[cF5nGd2{VVՍÙ3gh"A0'OFvٱ߹nɊd-ڕzzz:mۆ9s=33ž鯬C~0~Ure$!nm,W#q(gǝ݌XlP844\K7n' USֽ__\] \d 222@h[/Aadr]3\ĻTnJo8aӻ%&52_VN߫nV䭄ha9r0`0vލM6 駟.ڙ٦z$])Fg,Zn.P:٠3Uӕ+17sBo aMM0V9EGի3f S`f Bq]_w34&k%dAG=pS*V999;w.Fc$o0}tNANj̈́B!@<E~=ֹG ~Jk6hubC}@#S1@H³^gȌn1j˕+W" AAA5kؽS6> |oZlY`ݖ>,uh ؽę}&|~4Ú`{a{hi_}UΘ(d]ٳÐD74'Xfm Z`zd ޚ0Ԥ/{q٫!Lq)5(J/1+pũK|K+bէƏ>ƺ9Bc>GSޝfDND?](f[<kcc\wviprr\*씝6vTJ%m㿳b%/Q8`[BLjo?@ ew26؆c޻E'6W  _B8'^m6@m_ϚA'|%=s}]j8HGǦ`wgiSbȜy8|b7r$*&t+|Nc;#p헏P:GJ+.2. Ô)S'|RHc_}=Z6P(QKؘUN|N6ؤlEqbs!ES0nl"\CoBvn>lB{o6IBѽs#-F!#ؾ= D*βă84I!V<.#\iZe?`(l 6 >َ; pOoʼnU %/8. D/8˨㻘vn'[UH+w-_|@l1b9&N(rY-47{=Q/W[n4b`vq=b,bpu&=\Ip`g8ً+V!JJ@Gc?ExF(>1JO 2/s@~λvH4rl.ӻ-,Ĩc#U"~T|bBǽzDؗLrK@硫]+d)J ('Y<3nʲJZLeE;)yc3QiXʊYc,^A>,#El %I_ɎƙHB# <$W|,S. FqF{-.-R=F?zGzys> 2ȭ0.峂dSr%b>\f1eiggg和ܸhME(p]_ ĶC`zP__cgA ~U%%Yt5q˞ՁÈ!kl;HdiLט\.8>n0y: aPK%:`#"DJ.])b#P|cl,:hՂNB"C^~.n,"NG.zc2jb7Xƶň % ^.nJ?DKVfʃ,/S)].`:2= ]EXE>H{ Xt`qܹϟ/={~xxx7Do*|]k=&teAN:mj>:EO![N>EqwpJx CqḬlRR4TƋ k ¯Ɍr тTRF0r,zp9pG%4oo~)dߞn ƅ*X攝D4Ta3'XJB%Qj`.KPˋ9i M&[Yp{yq,Y\0*9a=!aON:`AB,l"mӏWr~ţcah|4$ꔠ`Bo3 {e5;Q#m.擏810I"'S33~^fӘah#18mkSp`rGZ(D}woo5+p_ +9[Kc;91czꅤ$pq>|*OJJ\ uX*:e> Q+ƌ8c1>[f(h^ de 76ѯ=.]a6 y_Bzϡg#3.7 1P-FbM&'c3Иz >5<( /^ Kj(ǸO1wٟ|e=?6M~A q`lѩS'!XYJN&&<9soVC_=똧s-? !:bs". ۿϛ/R􅊔ʫQБ=ˋs ONS.VM>‘tװ}ؿe"_^5^оMNָ.W 6C':ep[ȶ)o۠m]h c'q&iI>7`=p!xhVEφ2HGa{T*{ӾߊVC&&iɲv.p)2br-ˈYk:NgϞ &y @L W?}@+8w4¤sщfF 'ڲ3S9HF@O4)PZMh1Q̞0 pQ!E/G ز`sa&^~fXh4|]D7pTZ͛z/^T 53ĉ,"{e[C}#+N!vr_b\ \*w2l%7=gadaR.^H7n\A ޗ$ - Lj,::Sycr67'nAp_A+Y'A>0|1ާWg`ΆH<'x*A_IS"PNkcoǓf!#&Ǿo,ċt(r3i"m7с{ 55 N\D"R"G"_gRdU2VޖSqI=?r2eȶD]Rhew pMyhZ=#hӓ&6-P*/w;R\\=i&UZg&&3|=)L5@mh`+  =R\V;ӍZˡP J|MKKCHHH_+믿^Ph•ijrJ@J>K gOBC.gGZ-LdyzcW q!pKa1h_vPtE^5km+d4FAwJd`iǝg]7,)H/n@1 7Ȝ!w;CFq{Eƫ-ɭpÇZt+IF zm vAFZXԆAThO";l \vHftoTJ6PSwRrƀMvSfΪ%ҒU# ŋ.,VJ~C4T*R\VKe0fRفٯ*,^l2FF?qd__fޖ"*'ov(JH*VZ`E½<^?olz[wG8 7Z18'{322];aVun 1ǡ" +#3~{.ɍxq9.5s_VMv6Gy~ǘPʹȌ);+G>E|S>4G#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-km.G#s8yyyչAs8Bj y.G#P'plur|G6nmp,t9@ju&G#Pgn=8G#Ppҭ y@E9@[:o#pKΊ# [69:lʔ)ӫ:fD}$U-\OhZ #r4՛t.Y?p"ͭHbѦ…xe!-/_]oxDE"QxR5|\ing8]ܢ>d:jpv]9kMDMTMtȼQ Rq!1psqxÔeɦK/m804sayF ,YWcu DMkYaIߢ#GwgSp&"V(OaRa_7`?ķ"lQk\Nȸr+~ك[LW^w CQ#OCj$6l(=㥁Q8}KV}6"öduSLꫛj2dFaǎs4*ծŞp݋xUkشgM3k[KӜ[@)xl&!͟#c ZʧN0k"lV CDҁHt`12^$XL6]1wV7 R~ҟtp vGxcʳh+8f6$w'Ӟ*Y̮Pa܊9<=4/}k |MP[D??|e_ϫO˘0qHsK<1TO=$_jF¾q,^ v2b(Zkǖ}r 4n=26`Q`mBG*s[Ȃ͕'-/@Ett /2Dz%D;yxa` ",DYKTU8k$褰!awU1Hv͌ m}1v0 oÑ/w||}%DϨ=4ƀ^a”W81ra4ޱ O›<`p4]һ>-%t'`!'W'|t59*֯ .!H?1~@S$Dg!'0ok8[2m h  \ Nmؓe1rHsIYvPTѾC!,z >b0Z+q`8-4&5IȒ*i5f^GVB>T0K6z%6.sQ/C1bpONᗅ{`0(wʕЭPk:R6FhBd":Ҥ%زy%y\]4eBDGiK$[HUu1#е-#|֟"ԄHw#Ȉˑκ˩(ɇwסA#?S{h'\}(YQY0F 6p}Q#,|4_`IšP7t8 F- םi._i&Mg@LAPiySpխwm$QMՔ|rU{ KHI%-C1\ˆt˰)W2P Md,a#T:Qx 9ߦZ\s'LfA>[5&Mo£W'Q B " q1't"񮐑QKY˷yyUDC;"ڤB O/;Çggd \'!2<Ύe q1pUzbCå'iV#KxM¬ 4)7֞ z[ѭ[s@v)]BKKS*읤VF6fIVbs&4莽!ȴ6sg sx[9l0p#3ѡl̀ JgAR3 F3dMKdYìuXlr_udMȻ!?f|`IF(ĥg"W$ꎩS?YJڶrlsçtܳk&R0,bE<=;$bTv_F2+ԭ(rxȿoo!+a[I[.`EC/3aĠIDATF6`!vdI#:T ZT=$&{+-O^#XA>c 01x(`61ʠ\g/IkM"݊J/.\EK[4i@_ĥ,훉hZ5T=:df?ʒ47n@{mFL-\-yM4X>'6icTr9lthJGN}gr>\u#O/.d':F6$vgp&70eڹ;j?B;W*s=lɇ#m5Х>d aF~0 ޭ >e"4XPG^Cf1f`D=Ef@@61X?{26CmukhDii/( ;H#$2h)aQz#1hfˉ YKO im,?-(aoak&*"HI+q0<ӔR 5$ ! /Hw"\4~1߯u@9LGAeñ+7KE{KȲ!:ՆaP،ɡе\:1oJݣ`ީoȥcu bBGzBWZ $^!g67: c tf q$&_p=IB-4"{8W趒#zlBZ%RhEk5!Gk4(F'4 ͪe}-.PuKV`s@|^+D=.xێ.#6)Ei* i,<ĢFڡ)Vjq%|;-Phk&v!_ޱ㈊?oSc0HT#[ Tcv$zھ-OFd3[ 7PѷڊS.#!Uqe8pƍ4諔F`#I žHػvñq6RMdÉq*җHXgk1d mmT@R)[X vI2!d0˒yÍpv;0 if" YidD!YӬ3▧PgJȓ{%!rP]PN!CV:X[iAxx2BAvͰoWf:4Ul$+Š"RgCoڦ-4p/HgQacfANDf5.],$ &&ӡ&] 9 B 5$ە_xaI6ƢBKm&=?߹OgQ!xE= :Ǹsss^6'_3Y0I!6€aKm|T[0hj:B4R⥧GgB/AT}۰!IiCۋ+\FlpJUw ]qOҤ" t:Y~, F"/ĨehE@ m:HMh{Q}lD17R4ep&mq+C[D/]V q[xN-lH7ɔ *49v0i{= hN=)B !~2rHXXB=4΂gyXFH8 E +EP _-("Cv|ċT͚ةKHS_Xf&Y8vEI 9h dP]z :.'M;D-ċccwM˓ 歡ƉYrm߁_* ++pCtJߍҰ ,Y{U!aeҲ^4{F&G4Y\XciG%hأTV:@kv i׈Z6e7wع2VڤP߉~©rkbhr\Tn]_fc-jxV4hۊ&sVj\?a$N'WRdt;$dLV;[GYJG~4d%ŚG&F֣t]k#X):j_`pgx!$7Uy`X{ARؙ@c=ca&ůN%ȅ:(h6Wь"H[Oؿ E,3N^'7O¼6'[KUDt ?gc JtA`V>rV:~^O_$ڙ9匉gѧoZNuIUti $3KB;fA4.ApT#z5Act9EY* <HSI_4!4Kװh vb-!3iGךp! hEYqڴ\#It[0vcX=p)>)/)m'dۆ?>b]yOM\1phmO}P/+^5h5p4W]paزO֠&6ȅY*\6dg#UZ,)#a=MpǘIHOeߙ²4b+CflZ [z?? = ^]111 V%߅ ɕTwWVWNt^MŐ@oZ+\_"o_=4w1#ڒ0PxEKwDǚѵ]؏Sm3hڈ'RT*ꀓ™ɂ#n䅕8tyS`QZіᓸ9d2`9+L>MΡ LeMKvGv]+hRQφƒUO6fdUC88~+e=9ܝc[ȍKA !6w沸tƂہ ^Bf;W5H^C?K4 (NidQ8)TӬFnYE,tH樗.Ɓ]tp"W]) rA5*`9ɹ^|PZf>NÛ"nNHf:tc DE)Y~f|JMr+sjQU/QxCub|M. [Г޴"2QJ((/DYYEI?vX0&)<|}lVAlK ZJד_ |-f'DbYt`&wV+|UtKĂ'YwZ:H4* J %ʂnTN@aW;%x1paLͿUSg My::WY:Ҕ*_wY(Ț%pYFrZ<b7ɇ~?MehdE{ix*# hSId3}n÷coK[95w?]sN W9ёӎOQ١܂2V2rWᏠk3t@ -݂ɢX;l :+Jݙ\ڛ\s"Ǵ&&}/o/[^@[f%~5[,A\GFm˲feK'\Vd"kTirMn .Mƿ;8su*$TVY䋑poo kJ U-ro~Z p/gRUkiKJf$h.kj)$\]'q0tJ 7NGA^Ąv} &O Lj_DUXʪӒ(c3|WںhkpYIݮ`e )V2d 3YB^5 )qXE%W[{ {ѿDWҲ4} zTYx|# UC5[:TI/_wrȭpM~XL[,E1w<$!!sJM p8@8G#P>U2V~}<#pA[Ó8@u#-FpA[Ó8@u#Iq8rrIGd-N$IENDB`nipype-0.9.2/doc/devel/gitwash/forking_hell.rst000066400000000000000000000021451227300005300215270ustar00rootroot00000000000000.. _forking: ========================================== Making your own copy (fork) of nipype ========================================== You need to do this only once. The instructions here are very similar to the instructions at http://help.github.com/forking/ |emdash| please see that page for more detail. We're repeating some of it here just to give the specifics for the nipype_ project, and to suggest some default names. Set up and configure a github_ account ====================================== If you don't have a github_ account, go to the github_ page, and make one. You then need to configure your account to allow write access |emdash| see the ``Generating SSH keys`` help on `github help`_. Create your own forked copy of nipype_ =========================================== #. Log into your github_ account. #. Go to the nipype_ github home at `nipype github`_. #. Click on the *fork* button: .. image:: forking_button.png Now, after a short pause and some 'Hardcore forking action', you should find yourself at the home page for your own forked copy of nipype_. .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/git_development.rst000066400000000000000000000003151227300005300222460ustar00rootroot00000000000000.. _git-development: ===================== Git for development ===================== Contents: .. toctree:: :maxdepth: 2 forking_hell set_up_fork configure_git development_workflow nipype-0.9.2/doc/devel/gitwash/git_install.rst000066400000000000000000000011111227300005300213650ustar00rootroot00000000000000.. _install-git: ============= Install git ============= Overview ======== ================ ============= Debian / Ubuntu ``sudo apt-get install git-core`` Fedora ``sudo yum install git-core`` Windows Download and install msysGit_ OS X Use the git-osx-installer_ ================ ============= In detail ========= See the git_ page for the most recent information. Have a look at the github_ install help pages available from `github help`_ There are good instructions here: http://book.git-scm.com/2_installing_git.html .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/git_intro.rst000066400000000000000000000010361227300005300210600ustar00rootroot00000000000000============== Introduction ============== These pages describe a git_ and github_ workflow for the nipype_ project. There are several different workflows here, for different ways of working with *nipype*. This is not a comprehensive git_ reference, it's just a workflow for our own project. It's tailored to the github_ hosting service. You may well find better or quicker ways of getting stuff done with git_, but these should get you started. For general resources for learning git_ see :ref:`git-resources`. .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/git_links.inc000066400000000000000000000060601227300005300210100ustar00rootroot00000000000000.. This (-*- rst -*-) format file contains commonly used link targets and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible candidates for this list. .. NOTE: reST targets are __not_case_sensitive__, so only one target definition is needed for nipy, NIPY, Nipy, etc... .. git stuff .. _git: http://git-scm.com/ .. _github: http://github.com .. _github help: http://help.github.com .. _msysgit: http://code.google.com/p/msysgit/downloads/list .. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list .. _subversion: http://subversion.tigris.org/ .. _git cheat sheet: http://github.com/guides/git-cheat-sheet .. _pro git book: http://progit.org/ .. _git svn crash course: http://git-scm.com/course/svn.html .. _learn.github: http://learn.github.com/ .. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer .. _git user manual: http://www.kernel.org/pub/software/scm/git/docs/user-manual.html .. _git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html .. _git community book: http://book.git-scm.com/ .. _git ready: http://www.gitready.com/ .. _git casts: http://www.gitcasts.com/ .. _Fernando's git page: http://www.fperez.org/py4science/git.html .. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html .. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/ .. _git clone: http://www.kernel.org/pub/software/scm/git/docs/git-clone.html .. _git checkout: http://www.kernel.org/pub/software/scm/git/docs/git-checkout.html .. _git commit: http://www.kernel.org/pub/software/scm/git/docs/git-commit.html .. _git push: http://www.kernel.org/pub/software/scm/git/docs/git-push.html .. _git pull: http://www.kernel.org/pub/software/scm/git/docs/git-pull.html .. _git add: http://www.kernel.org/pub/software/scm/git/docs/git-add.html .. _git status: http://www.kernel.org/pub/software/scm/git/docs/git-status.html .. _git diff: http://www.kernel.org/pub/software/scm/git/docs/git-diff.html .. _git log: http://www.kernel.org/pub/software/scm/git/docs/git-log.html .. _git branch: http://www.kernel.org/pub/software/scm/git/docs/git-branch.html .. _git remote: http://www.kernel.org/pub/software/scm/git/docs/git-remote.html .. _git config: http://www.kernel.org/pub/software/scm/git/docs/git-config.html .. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html .. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html .. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git .. _git management: http://kerneltrap.org/Linux/Git_Management .. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html .. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html .. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html .. other stuff .. _python: http://www.python.org .. |emdash| unicode:: U+02014 nipype-0.9.2/doc/devel/gitwash/git_resources.rst000066400000000000000000000034441227300005300217440ustar00rootroot00000000000000.. _git-resources: ================ git_ resources ================ Tutorials and summaries ======================= * `github help`_ has an excellent series of how-to guides. * `learn.github`_ has an excellent series of tutorials * The `pro git book`_ is a good in-depth book on git. * A `git cheat sheet`_ is a page giving summaries of common commands. * The `git user manual`_ * The `git tutorial`_ * The `git community book`_ * `git ready`_ |emdash| a nice series of tutorials * `git casts`_ |emdash| video snippets giving git how-tos. * `git magic`_ |emdash| extended introduction with intermediate detail * The `git parable`_ is an easy read explaining the concepts behind git. * Our own `git foundation`_ expands on the `git parable`_. * Fernando Perez' git page |emdash| `Fernando's git page`_ |emdash| many links and tips * A good but technical page on `git concepts`_ * `git svn crash course`_: git_ for those of us used to subversion_ Advanced git workflow ===================== There are many ways of working with git_; here are some posts on the rules of thumb that other projects have come up with: * Linus Torvalds on `git management`_ * Linus Torvalds on `linux git workflow`_ . Summary; use the git tools to make the history of your edits as clean as possible; merge from upstream edits as little as possible in branches where you are doing active development. Manual pages online =================== You can get these on your own machine with (e.g) ``git help push`` or (same thing) ``git push --help``, but, for convenience, here are the online manual pages for some common commands: * `git add`_ * `git branch`_ * `git checkout`_ * `git clone`_ * `git commit`_ * `git config`_ * `git diff`_ * `git log`_ * `git pull`_ * `git push`_ * `git remote`_ * `git status`_ .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/index.rst000066400000000000000000000003451227300005300201730ustar00rootroot00000000000000.. _using-git: Working with *nipype* source code ====================================== Contents: .. toctree:: :maxdepth: 2 git_intro git_install following_latest patching git_development git_resources nipype-0.9.2/doc/devel/gitwash/known_projects.inc000066400000000000000000000027031227300005300220720ustar00rootroot00000000000000.. Known projects .. PROJECTNAME placeholders .. _PROJECTNAME: http://neuroimaging.scipy.org .. _`PROJECTNAME github`: http://github.com/nipy .. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel .. numpy .. _numpy: hhttp://numpy.scipy.org .. _`numpy github`: http://github.com/numpy/numpy .. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion .. scipy .. _scipy: http://www.scipy.org .. _`scipy github`: http://github.com/scipy/scipy .. _`scipy mailing list`: http://mail.scipy.org/mailman/listinfo/scipy-dev .. nipy .. _nipy: http://nipy.org/nipy .. _`nipy github`: http://github.com/nipy/nipy .. _`nipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel .. ipython .. _ipython: http://ipython.scipy.org .. _`ipython github`: http://github.com/ipython/ipython .. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev .. dipy .. _dipy: http://nipy.org/dipy .. _`dipy github`: http://github.com/Garyfallidis/dipy .. _`dipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel .. nibabel .. _nibabel: http://nipy.org/nibabel .. _`nibabel github`: http://github.com/nipy/nibabel .. _`nibabel mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel .. marsbar .. _marsbar: http://marsbar.sourceforge.net .. _`marsbar github`: http://github.com/matthew-brett/marsbar .. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users nipype-0.9.2/doc/devel/gitwash/links.inc000066400000000000000000000001611227300005300201410ustar00rootroot00000000000000.. compiling links file .. include:: known_projects.inc .. include:: this_project.inc .. include:: git_links.inc nipype-0.9.2/doc/devel/gitwash/patching.rst000066400000000000000000000076651227300005300206750ustar00rootroot00000000000000================ Making a patch ================ You've discovered a bug or something else you want to change in nipype_ .. |emdash| excellent! You've worked out a way to fix it |emdash| even better! You want to tell us about it |emdash| best of all! The easiest way is to make a *patch* or set of patches. Here we explain how. Making a patch is the simplest and quickest, but if you're going to be doing anything more than simple quick things, please consider following the :ref:`git-development` model instead. .. _making-patches: Making patches ============== Overview -------- :: # tell git who you are git config --global user.email you@yourdomain.example.com git config --global user.name "Your Name Comes Here" # get the repository if you don't have it git clone git://github.com/nipy/nipype.git # make a branch for your patching cd nipype git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack git commit -am 'BF - added fix for Funny bug' # make the patch files git format-patch -M -C master Then, send the generated patch files to the `nipype mailing list`_ |emdash| where we will thank you warmly. In detail --------- #. Tell git_ who you are so it can label the commits you've made:: git config --global user.email you@yourdomain.example.com git config --global user.name "Your Name Comes Here" #. If you don't already have one, clone a copy of the nipype_ repository:: git clone git://github.com/nipy/nipype.git cd nipype #. Make a 'feature branch'. This will be where you work on your bug fix. It's nice and safe and leaves you with access to an unmodified copy of the code in the main branch:: git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of #. Do some edits, and commit them as you go:: # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack git commit -am 'BF - added fix for Funny bug' Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag |emdash| you can just take on faith |emdash| or see `why the -a flag?`_. #. When you have finished, check you have committed all your changes:: git status #. Finally, make your commits into patches. You want all the commits since you branched from the ``master`` branch:: git format-patch -M -C master You will now have several files named for the commits:: 0001-BF-added-tests-for-Funny-bug.patch 0002-BF-added-fix-for-Funny-bug.patch Send these files to the `nipype mailing list`_. When you are done, to switch back to the main copy of the code, just return to the ``master`` branch:: git checkout master Moving from patching to development =================================== If you find you have done some patches, and you have one or more feature branches, you will probably want to switch to development mode. You can do this with the repository you have. Fork the nipype_ repository on github_ |emdash| :ref:`forking`. Then:: # checkout and refresh master branch from main repo git checkout master git pull origin master # rename pointer to main repository to 'upstream' git remote rename origin upstream # point your repo to default read / write to your fork on github git remote add origin git@github.com:your-user-name/nipype.git # push up any branches you've made and want to keep git push origin the-fix-im-thinking-of Then you can, if you want, follow the :ref:`development-workflow`. .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/pull_button.png000066400000000000000000000311351227300005300214100ustar00rootroot00000000000000PNG  IHDR~\iu pHYs   IDATx]|ToߔMHH PKT,"S, O}%"]zHR@dw޽fIBB$a&{3s̙3s72p8 G# ?G#p!}p]G#?G#p!}p]G#?G#p!HKKCVVVuNPF C./y>xbpˏUVe͛7׭=Obyyy()6c\,4lYѬ)4,= {$H& 0,n8^CtqѸ˱gEsZzl6~?vu;Oͽyv+gkx G#}كSԩSª8xy$;w}Z*.6.O[B$ɴ r)ɝĻB? Z}|?ìqu'.=IXHG.A0[rgcǎN,YDx_>kƮG όSN3^ǶQX+PmY+ˁ\K_,?~\#GBPN$,"E؟ A~ǧGLVs',o<5#3( fa0`9UI l>`ŒOлa彣p0e3ggx~Zb>1cƠiӦE*(O_˝)ޝ:8G| < 'g>!A>; oxMۍ9gH0taق.IFp̟3W6bά|`xhs &އĶ?EK+X4|ad#,0+VCSȦٳоq?$` 0ot$WRsp-.vx3k;~ŒO >`4|H>Og,O5E 4}LZ'Š&^xz` hXl,CbM8ִvXI:?? $̯?c{+]DJ___L4 ׿h5 &O  ^d g|tU"?5yɟ0pwX&<[?GWirToV|){<3mLAmxs 6'@9l>.#0}H4tŅx-w}__n Ơu~1b{֮҇#`4|(6y{fm^g][l?2wagsxqt'`oqѪ!!CMcZxL8ϦmE)*ue/fJ{ ž{LhϞ=WZˀ8y36 ‘3ݟL?w $ZMsѬ^ tɉ)"NKto+Щw+N7oG@2_ >tł@Y``0<\"aJ)Ϭ &57"&I! Rs=oֱ8) V8ޑxkme: #Hxӧp8ƿRh,@ #膣kw )@Ȥ r-e0gG|)o$b}"1bPddpyC#W.?E'\lǂ\Zز\m4rognYr2y^ /&욟*SY|!ڜi&rE|v9#' eH\|=0i;@F%!_Am62bK ? ( #`H.Ң #ӑݐ`6ֿfO< ͨOzde/mȄ<@soΛXyɒKcs30oOIZM}5 R-AnJiz$HlY3"1j^8A: GZE_ErrV $1W,PߐT?M!1v\w'_w :\db5nMQ^&SZ ,h׮Knq7mP?څ3PBK2}G6IjwMF:,`*Lъ<-r$PjZaY4l/,y&q-[$e]u߾}'ABWՂgyL&%1/H}0 3wSJ;Fc~1{NĿ?\ꂠ8\"`}H`5%zf40>-|ڊyJw-<˖-1}tt ؿ?q7ni#)}ӡd:җ؁hn)U8D_Nҗ"z@vM$Y/ kg߲CC0[mğev= Lg!Kĩ֮#_<*JzPp. ˌA"ө)(5K@0Mm?_@~}'v U8k ~Z(tn@Pu/71fM |_7P>1/fGn, K/)_QWKcAtهK嗰EVG,]0>\!dD6]4}.>_F MX5GfNY {;Ga1 Z*,Rf-Nslff*e0gJ1)#36fKGzܰ W&}9cpNA-[XP&ɹ[u.lNw}M=]gT̚ghSOgϞ`&eؤ) |4xnwH{ n>X'5!ȋQ(0Mз"d]H001\ka/<[`0->BkG8.ТVUo[1㥓l&IUF˾B["^.W".=REIiWiMb6WW/.MDvT~kg ;DY(4(sy#jᓟephn×/?Wx,j[ П|\Ld1q@)2j8ˊ}'`㝑|v ( j:/}Za;,Tw(pDQBU2Z}d.s^_|~j?^4IaA'¯vW>TaS I) ֖4$^+0ܹ^=NѺs ""]tg:ea3YLN˳g/tZ9llv4AAslnf6L4Ծbf"^}ϱS Y9P{“].ã䞠{'śrx)帬V _cO%U8c\LdҗVHe =S9SnLH'?lm|,:Z( cq8@,o]UW8UKi<+{? f˘0`X.pnPⱹWS8nL8ftv\+­=bcNIU:%qK^L<+B-p8!PiX\)p8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#Peʲpʇ+Kq8@D@YF*I8'#pʇw7^#TY p8#-cKp8*WU}xG#PvGJ#-*>NP{@)#mq57 B"hC<jE@O>ǧ"ӪC:9ш /1g.%O_V0$t3D& S݆WT`3kA-sprNdlg.Ν8x"g/!Eo.0nUՆ+ZjUW*@\=SXXe(^l0T:2hVquSM(lx:}0^su*i 7 ,I&g-51xk/cx2S&2[qyZHAͩ#ɑz[Pw ~!} _lH2pm0q;W۶q6_1\=D"|+`-'B-09Tyy0 "u Aj>{`ƄpmȭWw1XXZM90t$Q6h@<FsxE(٤mҮ/}|%fg7е7IAvAPz^TTKu8L: ځ,{+NZ*(Vw_Bn+oAxbPSĬɊ-ʋB_zeʠhƕ qB뗐Њ$㲞CzegڪZO$<g_b̛F>-rOG`V'%|<|࡮x y-U rYѦfdLYǾy8dAZ>\ IfEnA$_Nͳ/[liQyNxvyC;S[>SXm1dqd4v:mS`- ܈6tAx5nCfy9A7Nnʭ kja&5Rnhۼ6^6䦜ǁh3|zc՘W)0;BHwb9bj4 cuɰhjt/nF蓮!mHPHs`"nfVbl8l#ߠ@ {/|3cGw|g,h42G`Ӯ|l7aCwa\]T߉ 7"9=$FNm:6ݒ!c)v5.gBgNϧV㔃+Pw yF+D_ҝ EzqLhIf/@1[Ix}!⃺uB?m"A VD{lŔycۡtxb]?ca6JZIbvby-z==\dryW)ih__:(\JNESOgL"VtjڹkuImvHtUBe_h^&*oM6hGN>g WCaBU|A _ I+,N,ɉ~ʕF)2\ju~]8kGP^AwԈ9ʇum %O6B\ti-%IT2ꃊr*Cm& o^ f2f쒘pF6r%߾tzfdQ OQ2 hZKwV&v@taa iZV~ jOadtNC},ŏ4<RdL'Sԥ9s>ǡa,Jm{|˨CLN]3>^| vM.\5|xU wnsI,!Τ-gI<4 nۣ 1f4GOJ~F\VD؜A<)IaR$1tX6{y _?x$ ;6\KC=;!keؖpMhBT1X9]l A&-q4 ž|Yj:/; PÓ͂bdٱILMQjb =)7M7@ >G'5j4abwTm |9٠9!mʻ@ w+ۀVJh-7iĽ>;$ \V&·/>:=C+:joxFjobpH;;x'#%nȋJ$9nYioP.eӠ1Ypr>̦ oH ^Bs@jr)l+kl7QWSg]-u9[ӈՑIU.d+)/'Ъ jFw$@20HV~$˛')^ЫI-l}V z'&h`űbi.2b J+W"lR*d f$He bɔ)D{2E= u yS/,oe@t6Gځ+Fk 4UZ됰{-"ikKpq ώ u9yG; bYR~S&@tmļԀMBHygCju/.Ykس4c|:(>*6mBjڐ&lM4X.mv6\[lS)o"jegGʸ4@|d:tjH.*ъ x=ŷj 4$e%z FN8џEgD׎K( _kҥȩF~JeY.i7!%5O߫cm'lŶN6ܔl$ɊfD9hP3;okJ-RY~uʒ{?$Zܶ;۱yՏ$:ڣK;r;zCbwād >vvQ 6at܆BDȗ*XMTcKVGӪ. >,o;~g&M~)?lFRv_;r1gbK5$_5::EuZsZG)&rЊMn.@gT"T:5,JVC`R]Ehi~ 2߶hk'5!h߆k[&xB4v>EMWBb<iR+UŖ)$K'Ϯ3pSb7\8X0: <;+K#;Yv#^;7҆ZtBZш՘Xj,8(G0S[ gbI T&(dt Vn O t…NAF*Qϓtj#+)e& LdȾSx 2K.Ҳ ]?tj rrjCm#-ϡ{.M9d,de8_7,mf$i3O(lI IɆл 5$PER Cg1~wf )kVzփϥ7csb7?ƳSn>yH̫TShj$R:;o .<Ѫfڌ;^EapiWSGJ6#>|nV7ՄԠpǟxL{.鴡)kK~b$|& r's;% '* ;mAӬ?g~ݒ<|}=p)LW|>}LIRT/M}twI4mP{Ċuo" vZ ,7:t`J!6%) E ge4)ZF,{ MX%Ft8W23- ^ y/pQP2ZSyfRC1.6ꈼ>^l$ཹo9"l$Crr#6R $36iXiEG ╭_cYd;̚=3!k-Nr䲘dF #UəPFPBxKI+\;&{&{eTR%}@Y3rr)|wE1`h׬&RF\C1T~4j%3&lgz dQ4#-ߝYzr/! zjI^p8@+ hWp8:youSp8eA[eAp8nW&.p8 -r8jWՀ G,pWOYy9@5@[Հ G,?V#9}nIENDB`nipype-0.9.2/doc/devel/gitwash/set_up_fork.rst000066400000000000000000000037001227300005300214020ustar00rootroot00000000000000.. _set-up-fork: ================== Set up your fork ================== First you follow the instructions for :ref:`forking`. Overview ======== :: git clone git@github.com:your-user-name/nipype.git cd nipype git remote add upstream git://github.com/nipy/nipype.git In detail ========= Clone your fork --------------- #. Clone your fork to the local computer with ``git clone git@github.com:your-user-name/nipype.git`` #. Investigate. Change directory to your new repo: ``cd nipype``. Then ``git branch -a`` to show you all branches. You'll get something like:: * master remotes/origin/master This tells you that you are currently on the ``master`` branch, and that you also have a ``remote`` connection to ``origin/master``. What remote repository is ``remote/origin``? Try ``git remote -v`` to see the URLs for the remote. They will point to your github_ fork. Now you want to connect to the upstream `nipype github`_ repository, so you can merge in changes from trunk. .. _linking-to-upstream: Linking your repository to the upstream repo -------------------------------------------- :: cd nipype git remote add upstream git://github.com/nipy/nipype.git ``upstream`` here is just the arbitrary name we're using to refer to the main nipype_ repository at `nipype github`_. Note that we've used ``git://`` for the URL rather than ``git@``. The ``git://`` URL is read only. This means we that we can't accidentally (or deliberately) write to the upstream repo, and we are only going to use it to merge into our own code. Just for your own satisfaction, show yourself that you now have a new 'remote', with ``git remote -v show``, giving you something like:: upstream git://github.com/nipy/nipype.git (fetch) upstream git://github.com/nipy/nipype.git (push) origin git@github.com:your-user-name/nipype.git (fetch) origin git@github.com:your-user-name/nipype.git (push) .. include:: links.inc nipype-0.9.2/doc/devel/gitwash/this_project.inc000066400000000000000000000002561227300005300215230ustar00rootroot00000000000000.. nipype .. _nipype: http://nipy.org/nipype .. _`nipype github`: http://github.com/nipy/nipype .. _`nipype mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel nipype-0.9.2/doc/devel/index.rst000066400000000000000000000007751227300005300165340ustar00rootroot00000000000000.. _developers-guide-index: ================= Developer Guide ================= :Release: |version| :Date: |today| Since nipype is part of the NIPY_ project, we follow the same conventions documented in the `NIPY Developers Guide `_. For bleeding-edge version help see `Nightly documentation `_ .. toctree:: :maxdepth: 2 writing_custom_interfaces gitwash/index architecture provenance .. include:: ../links_names.txt nipype-0.9.2/doc/devel/interface_specs.rst000066400000000000000000000533771227300005300205700ustar00rootroot00000000000000.. _interface_specs: ======================== Interface Specifications ======================== Before you start ---------------- Nipype is a young project maintained by an enthusiastic group of developers. Even though the documentation might be sparse or cryptic at times we strongly encourage you to contact us on the official nipype developers mailing list in case of any troubles: nipy-devel@neuroimaging.scipy.org (we are sharing a mailing list with the nipy community therefore please add ``[nipype]`` to the messsage title). Overview -------- We're using the `Enthought Traits `_ package for all of our inputs and outputs. Traits allows us to validate user inputs and provides a mechanism to handle all the *special cases* in a simple and concise way though metadata. With the metadata, each input/output can have an optional set of metadata attributes (described in more detail below). The machinery for handling the metadata is located in the base classes, so all subclasses use the same code to handle these cases. This is in contrast to our previous code where every class defined it's own _parse_inputs, run and aggregate_outputs methods to handle these cases. Which of course leads to a dozen different ways to solve the same problem. Traits is a big package with a lot to learn in order to take full advantage of. But don't be intimidated! To write a Nipype Trait Specification, you only need to learn a few of the basics of Traits. Here are a few starting points in the documentation: * What are Traits? The `Introduction in the User Manual `_ gives a brief description of the functionality traits provides. * Traits and metadata. The `second section of the User Manual `_ gives more details on traits and how to use them. Plus there a section describing metadata, including the metadata all traits have. * If your interested in more of a *big picture* overview, `Gael wrote a good tutorial `_ that shows how to write a scientific application using traits for the benefit of the generated UI components. (For now, Nipype is not taking advantage of the generated UI feature of traits.) Traits version ^^^^^^^^^^^^^^ We're using Traits version 3.x which can be install as part of `EPD `_ or from `pypi `_ More documentation ^^^^^^^^^^^^^^^^^^ Not everything is documented in the User Manual, in those cases the `enthought-dev mailing list `_ or the `API docs `_ is your next place to look. Nipype Interface Specifications ------------------------------- Each interface class defines two specifications: 1) an InputSpec and 2) an OutputSpec. Each of these are prefixed with the class name of the interfaces. For example, Bet has these specs: - BETInputSpec - BETOutputSpec Each of these Specs are classes, derived from a base TraitedSpec class (more on these below). The InputSpec consists of attributes which correspond to different parameters for the tool they wrap/interface. In the case of a command-line tool like Bet, the InputSpec attributes correspond to the different command-line parameters that can be passed to Bet. If you are familiar with the Nipype 0.2 code-base, these attributes are the same as the keys in the opt_map dictionaries. When an interfaces class is instantiated, the InputSpec is bound to the ``inputs`` attribute of that object. Below is an example of how the ``inputs`` appear to a user for Bet:: >>> from nipype.interfaces import fsl >>> bet = fsl.BET() >>> type(bet.inputs) >>> bet.inputs. bet.inputs.__class__ bet.inputs.center bet.inputs.__delattr__ bet.inputs.environ bet.inputs.__doc__ bet.inputs.frac bet.inputs.__getattribute__ bet.inputs.functional bet.inputs.__hash__ bet.inputs.hashval bet.inputs.__init__ bet.inputs.infile bet.inputs.__new__ bet.inputs.items bet.inputs.__reduce__ bet.inputs.mask bet.inputs.__reduce_ex__ bet.inputs.mesh bet.inputs.__repr__ bet.inputs.nooutput bet.inputs.__setattr__ bet.inputs.outfile bet.inputs.__str__ bet.inputs.outline bet.inputs._generate_handlers bet.inputs.outputtype bet.inputs._get_hashval bet.inputs.radius bet.inputs._hash_infile bet.inputs.reduce_bias bet.inputs._xor_inputs bet.inputs.skull bet.inputs._xor_warn bet.inputs.threshold bet.inputs.args bet.inputs.vertical_gradient Each Spec inherits from a parent Spec. The parent Specs provide attribute(s) that are common to all child classes. For example, FSL InputSpecs inherit from interfaces.fsl.base.FSLTraitedSpec. FSLTraitedSpec defines an ``outputtype`` attribute, which stores the file type (NIFTI, NIFTI_PAIR, etc...) for all generated output files. InputSpec class hierarchy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Below is the current class hierarchy for InputSpec classes (from base class down to subclasses).: ``TraitedSpec``: Nipype's primary base class for all Specs. Provides initialization, some nipype-specific methods and any trait handlers we define. Inherits from traits.HasTraits. ``BaseInterfaceInputSpec``: Defines inputs common to all Interfaces (``ignore_exception``). If in doubt inherit from this. ``CommandLineInputSpec``: Defines inputs common to all command-line classes (``args`` and ``environ``) ``FSLTraitedSpec``: Defines inputs common to all FSL classes (``outputtype``) ``SPMCommandInputSpec``: Defines inputs common to all SPM classes (``matlab_cmd``, ``path``, and ``mfile``) ``FSTraitedSpec``: Defines inputs common to all FreeSurfer classes (``sbjects_dir``) ``MatlabInputSpec``: Defines inputs common to all Matlab classes (``script``, ``nodesktop``, ``nosplash``, ``logfile``, ``single_comp_thread``, ``mfile``, ``script_file``, and ``paths``) ``SlicerCommandLineInputSpec``: Defines inputs common to all Slicer classes (``module``) Most developers will only need to code at the the interface-level (i.e. implementing custom class inheriting from one of the above classes). Output Specs ^^^^^^^^^^^^ The OutputSpec defines the outputs that are generated, or possibly generated depending on inputs, by the tool. OutputSpecs inherit from ``interfaces.base.TraitedSpec`` directly. Traited Attributes ------------------ Each specification attribute is an instance of a Trait class. These classes encapsulate many standard Python types like Float and Int, but with additional behavior like type checking. (*See the documentation on traits for more information on these trait types.*) To handle unique behaviors of our attributes we us traits metadata. These are keyword arguments supplied in the initialization of the attributes. The base classes ``BaseInterface`` and ``CommandLine`` (defined in ``nipype.interfaces.base``) check for the existence/or value of these metadata and handle the inputs/outputs accordingly. For example, all mandatory parameters will have the ``mandatory = True`` metadata:: class BetInputSpec(FSLTraitedSpec): infile = File(exists=True, desc = 'input file to skull strip', argstr='%s', position=0, mandatory=True) Common ^^^^^^ ``exists`` For files, use ``nipype.interfaces.base.File`` as the trait type. If the file must exist for the tool to execute, specify ``exists = True`` in the initialization of File (as shown in BetInputSpec above). This will trigger the underlying traits code to confirm the file assigned to that *input* actually exists. If it does not exist, the user will be presented with an error message:: >>> bet.inputs.infile = 'does_not_exist.nii' ------------------------------------------------------------ Traceback (most recent call last): File "", line 1, in File "/Users/cburns/local/lib/python2.5/site-packages/nipype/interfaces/base.py", line 76, in validate self.error( object, name, value ) File "/Users/cburns/local/lib/python2.5/site-packages/enthought/traits/trait_handlers.py", line 175, in error value ) TraitError: The 'infile' trait of a BetInputSpec instance must be a file name, but a value of 'does_not_exist.nii' was specified. ``hash_files`` To be used with inputs that are defining output filenames. When this flag is set to false any Nipype will not try to hash any files described by this input. This is useful to avoid rerunning when the specified output file already exists and has changed. ``desc`` All trait objects have a set of default metadata attributes. ``desc`` is one of those and is used as a simple, one-line docstring. The ``desc`` is printed when users use the ``help()`` methods. **Required:** This metadata is required by all nipype interface classes. ``usedefault`` Set this metadata to True when the *default value* for the trait type of this attribute is an acceptable value. All trait objects have a default value, ``traits.Int`` has a default of ``0``, ``traits.Float`` has a default of ``0.0``, etc... You can also define a default value when you define the class. For example, in the code below all objects of ``Foo`` will have a default value of 12 for ``x``:: >>> import enthought.traits.api as traits >>> class Foo(traits.HasTraits): ... x = traits.Int(12) ... y = traits.Int ... >>> foo = Foo() >>> foo.x 12 >>> foo.y 0 Nipype only passes ``inputs`` on to the underlying package if they have been defined (more on this later). So if you specify ``usedefault = True``, you are telling the parser to pass the default value on to the underlying package. Let's look at the InputSpec for SPM Realign:: class RealignInputSpec(BaseInterfaceInputSpec): jobtype = traits.Enum('estwrite', 'estimate', 'write', desc='one of: estimate, write, estwrite', usedefault=True) Here we've defined ``jobtype`` to be an enumerated trait type, ``Enum``, which can be set to one of the following: ``estwrite``, ``estimate``, or ``write``. In a container, the default is always the first element. So in this case, the default will be ``estwrite``:: >>> from nipype.interfaces import spm >>> rlgn = spm.Realign() >>> rlgn.inputs.infile >>> rlgn.inputs.jobtype 'estwrite' ``xor`` and ``requires`` Both of these accept a list of trait names. The ``xor`` metadata reflects mutually exclusive traits, while the requires metadata reflects traits that have to be set together. When a xor-ed trait is set, all other traits belonging to the list are set to Undefined. The function check_mandatory_inputs ensures that all requirements (both mandatory and via the requires metadata are satisfied). These are also reflected in the help function. ``copyfile`` This is metadata for a File or Directory trait that is relevant only in the context of wrapping an interface in a `Node` and `MapNode`. `copyfile` can be set to either `True` or `False`. `False` indicates that contents should be symlinked, while `True` indicates that the contents should be copied over. ``min_ver`` and ``max_ver`` These metadata determine if a particular trait will be available when a given version of the underlying interface runs. Note that this check is performed at runtime.:: class RealignInputSpec(BaseInterfaceInputSpec): jobtype = traits.Enum('estwrite', 'estimate', 'write', min_ver='5', usedefault=True) ``deprecated`` and ``new_name`` This is metadata for removing or renaming an input field from a spec.:: class RealignInputSpec(BaseInterfaceInputSpec): jobtype = traits.Enum('estwrite', 'estimate', 'write', deprecated='0.8', desc='one of: estimate, write, estwrite', usedefault=True) In the above example this means that the `jobtype` input is deprecated and will be removed in version 0.8. Deprecation should be set to two versions from current release. Raises `TraitError` after package version crosses the deprecation version. For inputs that are being renamed, one can specify the new name of the field.:: class RealignInputSpec(BaseInterfaceInputSpec): jobtype = traits.Enum('estwrite', 'estimate', 'write', deprecated='0.8', new_name='job_type', desc='one of: estimate, write, estwrite', usedefault=True) job_type = traits.Enum('estwrite', 'estimate', 'write', desc='one of: estimate, write, estwrite', usedefault=True) In the above example, the `jobtype` field is being renamed to `job_type`. When `new_name` is provided it must exist as a trait, otherwise an exception will be raised. .. note:: The version information for `min_ver`, `max_ver` and `deprecated` has to be provided as a string. For example, `min_ver='0.1'`. CommandLine ^^^^^^^^^^^ ``argstr`` The metadata keyword for specifying the format strings for the parameters. This was the *value* string in the opt_map dictionaries of Nipype 0.2 code. If we look at the ``FlirtInputSpec``, the ``argstr`` for the reference file corresponds to the argument string I would need to provide with the command-line version of ``flirt``:: class FlirtInputSpec(FSLTraitedSpec): reference = File(exists = True, argstr = '-ref %s', mandatory = True, position = 1, desc = 'reference file') **Required:** This metadata is required by all command-line interface classes. ``position`` This metadata is used to specify the position of arguments. Both positive and negative values are accepted. ``position = 0`` will position this argument as the first parameter after the command name. ``position = -1`` will position this argument as the last parameter, after all other parameters. ``genfile`` If True, the ``genfile`` metadata specifies that a filename should be generated for this parameter *if-and-only-if* the user did not provide one. The nipype convention is to automatically generate output filenames when not specified by the user both as a convenience for the user and so the pipeline can easily gather the outputs. Requires ``_gen_filename()`` method to be implemented. This way should be used if the desired file name is dependent on some runtime variables (such as file name of one of the inputs, or current working directory). In case when it should be fixed it's recommended to just use ``usedefault``. ``sep`` For List traits the string with witch elements of the list will be joined. ``name_source`` Indicates the list of input fields from which the value of the current File output variable will be drawn. This input field must be the name of a File. ``name_template`` By default a ``%s_generated`` template is used to create the output filename. This metadata keyword allows overriding the generated name. ``keep_extension`` Use this and set it ``True`` if you want the extension from the input to be kept. SPM ^^^ ``field`` name of the structure refered by the SPM job manager **Required:** This metadata is required by all SPM-mediated interface classes. Defining an interface class --------------------------- Common ^^^^^^ When you define an interface class, you will define these attributes and methods: * ``input_spec``: the InputSpec * ``output_spec``: the OutputSpec * ``_list_outputs()``: Returns a dictionary containing names of generated files that are expected after package completes execution. This is used by ``BaseInterface.aggregate_outputs`` to gather all output files for the pipeline. CommandLine ^^^^^^^^^^^ For command-line interfaces: * ``_cmd``: the command-line command If you used genfile: * ``_gen_filename(name)``: Generate filename, used for filenames that nipype generates as a convenience for users. This is for parameters that are required by the wrapped package, but we're generating from some other parameter. For example, ``BET.inputs.outfile`` is required by BET but we can generate the name from ``BET.inputs.infile``. Override this method in subclass to handle. And optionally: * ``_format_arg(name, spec, value)``: For extra formatting of the input values before passing them to generic ``_parse_inputs()`` method. For example this is the class definition for Flirt, minus the docstring:: class FLIRTInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='-in %s', mandatory=True, position=0, desc='input file') reference = File(exists=True, argstr='-ref %s', mandatory=True, position=1, desc='reference file') out_file = File(argstr='-out %s', desc='registered output file', name_source=['in_file'], name_template='%s_flirt', position=2, hash_files=False) out_matrix_file = File(argstr='-omat %s', name_source=['in_file'], keep_extension=True, name_template='%s_flirt.mat', desc='output affine matrix in 4x4 asciii format', position=3, hash_files=False) out_log = File(name_source=['in_file'], keep_extension=True, requires=['save_log'], name_template='%s_flirt.log', desc='output log') ... class FLIRTOutputSpec(TraitedSpec): out_file = File(exists=True, desc='path/name of registered file (if generated)') out_matrix_file = File(exists=True, desc='path/name of calculated affine transform ' '(if generated)') out_log = File(desc='path/name of output log (if generated)') class Flirt(FSLCommand): _cmd = 'flirt' input_spec = FlirtInputSpec output_spec = FlirtOutputSpec There are two possible output files ``outfile`` and ``outmatrix``, both of which can be generated if not specified by the user. Also notice the use of ``self._gen_fname()`` - a FSLCommand helper method for generating filenames (with extensions conforming with FSLOUTPUTTYPE). See also :doc:`cmd_interface_devel`. SPM ^^^ For SPM-mediated interfaces: * ``_jobtype`` and ``_jobname``: special names used used by the SPM job manager. You can find them by saving your batch job as an .m file and looking up the code. And optionally: * ``_format_arg(name, spec, value)``: For extra formatting of the input values before passing them to generic ``_parse_inputs()`` method. Matlab ^^^^^^ See :doc:`matlab_interface_devel`. Python ^^^^^^ See :doc:`python_interface_devel`. Undefined inputs ---------------- All the inputs and outputs that were not explicitly set (And do not have a usedefault flag - see above) will have Undefined value. To check if something is defined you have to explicitly call ``isdefiend`` function (comparing to None will not work). Example of inputs ----------------- Below we have an example of using Bet. We can see from the help which inputs are mandatory and which are optional, along with the one-line description provided by the ``desc`` metadata:: >>> from nipype.interfaces import fsl >>> fsl.BET.help() Inputs ------ Mandatory: infile: input file to skull strip Optional: args: Additional parameters to the command center: center of gravity in voxels environ: Environment variables (default={}) frac: fractional intensity threshold functional: apply to 4D fMRI data mask: create binary mask image mesh: generate a vtk mesh brain surface nooutput: Don't generate segmented output outfile: name of output skull stripped image outline: create surface outline image outputtype: None radius: head radius reduce_bias: bias field and neck cleanup skull: create skull image threshold: apply thresholding to segmented brain image and mask vertical_gradient: vertical gradient in fractional intensity threshold (-1, 1) Outputs ------- maskfile: path/name of binary brain mask (if generated) meshfile: path/name of vtk mesh file (if generated) outfile: path/name of skullstripped file outlinefile: path/name of outline file (if generated) Here we create a bet object and specify the required input. We then check our inputs to see which are defined and which are not:: >>> bet = fsl.BET(infile = 'f3.nii') >>> bet.inputs args = center = environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'} frac = functional = infile = f3.nii mask = mesh = nooutput = outfile = outline = outputtype = NIFTI_GZ radius = reduce_bias = skull = threshold = vertical_gradient = >>> bet.cmdline 'bet f3.nii /Users/cburns/data/nipype/s1/f3_brain.nii.gz' We also checked the command-line that will be generated when we run the command and can see the generated output filename ``f3_brain.nii.gz``. nipype-0.9.2/doc/devel/matlab_interface_devel.rst000066400000000000000000000073501227300005300220600ustar00rootroot00000000000000.. matlab_interface_devel: =========================== How to wrap a MATLAB script =========================== This is minimal script for wrapping MATLAB code. You should replace the MATLAB code template, and define approriate inputs and outputs. Example 1 +++++++++ .. testcode:: from nipype.interfaces.matlab import MatlabCommand from nipype.interfaces.base import TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File import os from string import Template class ConmapTxt2MatInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True) out_file = File('cmatrix.mat', usedefault=True) class ConmapTxt2MatOutputSpec(TraitedSpec): out_file = File(exists=True) class ConmapTxt2Mat(BaseInterface): input_spec = ConmapTxt2MatInputSpec output_spec = ConmapTxt2MatOutputSpec def _run_interface(self, runtime): d = dict(in_file=self.inputs.in_file, out_file=self.inputs.out_file) #this is your MATLAB code template script = Template("""in_file = ‘$in_file'; out_file = ‘$out_file'; ConmapTxt2Mat(in_file, out_file); exit; """).substitute(d) # mfile = True will create an .m file with your script and executed. # Alternatively # mfile can be set to False which will cause the matlab code to be # passed # as a commandline argument to the matlab executable # (without creating any files). # This, however, is less reliable and harder to debug # (code will be reduced to # a single line and stripped of any comments). mlab = MatlabCommand(script=script, mfile=True) result = mlab.run() return result.runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs Example 2 +++++++++ By subclassing **MatlabCommand** for your main class, and **MatlabInputSpec** for your input and output spec, you gain access to some useful MATLAB hooks .. testcode:: import os from nipype.interfaces.base import File, traits from nipype.interfaces.matlab import MatlabCommand, MatlabInputSpec class HelloWorldInputSpec( MatlabInputSpec): name = traits.Str( mandatory = True, desc = 'Name of person to say hello to') class HelloWorldOutputSpec( MatlabInputSpec): matlab_output = traits.Str( ) class HelloWorld( MatlabCommand): """ Basic Hello World that displays Hello in MATLAB Returns ------- matlab_output : capture of matlab output which may be parsed by user to get computation results Examples -------- >>> hello = HelloWorld() >>> hello.inputs.name = 'hello_world' >>> out = hello.run() >>> print out.outputs.matlab_output """ input_spec = HelloWorldInputSpec output_spec = HelloWorldOutputSpec def _my_script(self): """This is where you implement your script""" script = """ disp('Hello %s Python') two = 1 + 1 """%(self.inputs.name) return script def run(self, **inputs): ## inject your script self.inputs.script = self._my_script() results = super(MatlabCommand, self).run( **inputs) stdout = results.runtime.stdout # attach stdout to outputs to access matlab results results.outputs.matlab_output = stdout return results def _list_outputs(self): outputs = self._outputs().get() return outputs nipype-0.9.2/doc/devel/provenance.rst000066400000000000000000000016211227300005300175540ustar00rootroot00000000000000================ W3C PROV support ================ Overview -------- We're using the the `W3C PROV data model `_ to capture and represent provenance in Nipype. For an overview see: `PROV-DM overview `_ Each interface writes out a provenance.json (currently prov-json) or provenance.rdf (if rdflib is available) file. The workflow engine can also write out a provenance of the workflow if instructed. This is very much an experimental feature as we continue to refine how exactly the provenance should be stored and how such information can be used for reporting or reconstituting workflows. By default provenance writing is disabled for the 0.9 release, to enable insert the following code at the top of your script:: >>> from nipype import config >>> config.enable_provenance() nipype-0.9.2/doc/devel/python_interface_devel.rst000066400000000000000000000037221227300005300221400ustar00rootroot00000000000000.. python_interface_devel: =========================== How to wrap a Python script =========================== This is a minimal pure python interface. As you can see all you need to do is to do is to define inputs, outputs, _run_interface() (not run()), and _list_outputs. .. testcode:: from nipype.interfaces.base import BaseInterface, \ BaseInterfaceInputSpec, traits, File, TraitedSpec from nipype.utils.filemanip import split_filename import nibabel as nb import numpy as np import os class SimpleThresholdInputSpec(BaseInterfaceInputSpec): volume = File(exists=True, desc='volume to be thresholded', mandatory=True) threshold = traits.Float(desc='everything below this value will be set to zero', mandatory=True) class SimpleThresholdOutputSpec(TraitedSpec): thresholded_volume = File(exists=True, desc="thresholded volume") class SimpleThreshold(BaseInterface): input_spec = SimpleThresholdInputSpec output_spec = SimpleThresholdOutputSpec def _run_interface(self, runtime): fname = self.inputs.volume img = nb.load(fname) data = np.array(img.get_data()) active_map = data > self.inputs.threshold thresholded_map = np.zeros(data.shape) thresholded_map[active_map] = data[active_map] new_img = nb.Nifti1Image(thresholded_map, img.get_affine(), img.get_header()) _, base, _ = split_filename(fname) nb.save(new_img, base + '_thresholded.nii') return runtime def _list_outputs(self): outputs = self._outputs().get() fname = self.inputs.volume _, base, _ = split_filename(fname) outputs["thresholded_volume"] = os.path.abspath(base + '_thresholded.nii') return outputs nipype-0.9.2/doc/devel/writing_custom_interfaces.rst000066400000000000000000000002351227300005300226740ustar00rootroot00000000000000.. _writing_custom_interfaces: .. toctree:: :maxdepth: 2 interface_specs cmd_interface_devel matlab_interface_devel python_interface_develnipype-0.9.2/doc/documentation.rst000066400000000000000000000021301227300005300171620ustar00rootroot00000000000000.. _documentation: ============= Documentation ============= .. htmlonly:: :Release: |version| :Date: |today| Previous versions: `0.8 `_ `0.7 `_ .. container:: doc2 .. admonition:: Guides .. hlist:: :columns: 2 * User .. toctree:: :maxdepth: 2 users/index .. toctree:: :maxdepth: 1 changes * Developer .. toctree:: :maxdepth: 2 api/index devel/index .. admonition:: Interfaces, Workflows and Examples .. hlist:: :columns: 2 * Workflows .. toctree:: :maxdepth: 1 :glob: interfaces/generated/*workflows* * Examples .. toctree:: :maxdepth: 1 :glob: users/examples/* * Interfaces .. toctree:: :maxdepth: 1 :glob: interfaces/generated/*algorithms* interfaces/generated/*interfaces* .. include:: links_names.txt nipype-0.9.2/doc/images/000077500000000000000000000000001227300005300150305ustar00rootroot00000000000000nipype-0.9.2/doc/images/nipype_architecture_overview2.png000066400000000000000000007646421227300005300236370ustar00rootroot00000000000000PNG  IHDRrsBIT|d pHYspptEXtSoftwarewww.inkscape.org< IDATxwUϳBHHBЋ@!`"(, E@"E @t(ҥ0PFMٽ~߯׼<{3;w9g1w26ɢem``!`0`5 RDDDDD$?Kp[8ooozZX*l -X- >IDDDDD ƙDMgNu0 -Yo( \DDDDDDj VzϾiįT[@ u<""""""uh n?>3u iXou7ۉ@Z#"""""D3ZTÚY<|g::|p{x&{x oz>r{=s`zQǯ~v6ȳVij9{f.N$dim.vzJ'"""""R+hX^cE`3tlhՖl#~+0͖agB7&7K""""""dKGKc`_{OoJp ?L_m*&"""""Rzp.8QbKS aM,+=|j4`X~l-_񾯿\2A5{l>fg]͸DDDDDD-`0vMCZm_ccՌ+[}&6|Bt+skU JDDDDD̢aM}}Z=hq$gI=+s-Md-V0ljuVjMVUR""""""̖ Kkmw{οn 99uS^?Is܄_fѰ_V;AyNJ ޳vL""""""]->&z$={ϼZG j璜Mߩv8""""""]1Q̎HY{fbyj #) } }W=(.Wyp~ª5?V5okߌeϞ\DDDDDDhvԥ47o꫽61ԴՖ \TDDDDDzoت1ԶokӯΧ:}_?%:o0ǯYj7tªs|7ޯv<""""""xә cK Ʋ] K_wUEDDDDDDޯC+%߲ǮIj|&aթWDDDDDƖ,-0&߻M jw[gለ*܅Ol*Ɠ0ߋ6oE8"5C~@_gf/[U LDDDDGS[:Ɩ ]{f%Yظ' +&U;>tQ+4&_}T[4u`hb.1_xg5bKz˽UBl[`Pª>Hff^FDDDD:I3%{7W=97aWtbf _XDDDDr82t{o5| }PԼ`ӄUryՎ%""""R)~E~Zڝދ,᪟Ckc+K%bf;l`:(aJ%""""rcιkU5A5`6+ԓHKY#wH6_kIx,YukP=3+"̬'GªV`_w_CkN²$M(4J%"葰V%""""RfmTΐ&j+RSOj""""$l AM%\ HJY')N}`R*R)$""""e~5*rJKXRD:S}niXU̚A0`Mxǵ̬v4E]]wřY`0ay,`{-cKbfCu@` A4ͭ̚ /XUmmfk{N. sj3peZqHצ'&z/Oؾ/pp(0 &WLىֆL3W@їBb|b4 kg9]]cLªsg~RϚ F}p?r;&zhlF9?|SvS$ dxxx֢#{;!L VwW"1_C;l.?h;6 f-~pNBgtް`߄UwW (Xjo]7,қ~& 7fVf,0Ձc~EM4B^t(i?7aw*kOQG/W7=5917;J.OOo)mOZ`?%5 Sa,5ES"\[(Q>>ЫRc`Fl O޳6( NB-fvv3;x8SZSlxb3&3;SHU(32*3-59Pz0)JjAoB3FSJ~Nq)pQsꢙg qJMN*""""603۔PCcEђT3;T6fmʋ8h.`Z}#ImP[ɒ!$f &$=R ۞黲ps.+%3;PKّKΈ> kj)/%XwR9.0.`kߕ~Ezr߁ G:Yp*eBMǹW{wJYs,!iK:l`2B'6)]T`l'w`d>Rߣr>Mǭr&4?Oπ^v>߻(;!/0N,PV!IM+<|RƯK87=Y9I&M4ijl&[8lf%wP}j>{oq7M&+r2Zsv2;<88$e#lr+^߽>l^T/> 4MQ{靁]LH ݗ |Yo%i1Y)gf`~׹ Ɠ$w,VSμp93%""""RJPe()첈^I!_Nz p1̷uزU27aY!P}&eݽp{\/!~ 1<te('u2].iIZo)MX̺"˜VJ@ d?qH~2|lNHS<'6A?D3 N7JDDD攠6w t5rX7elfsvd,h}c j> 73۞Be^7AMK^-geɯ碟uN&uĴI3N>KzH 3{x0~JXEDDV6naUjXԆkF r0Ԕi63cs zӏ<-Z$ O7oGy#%~&!{*"""բN%ZmIOkQ٢h{èT29-D9Ps2swP=7'$у R ԻjHUJGu#1h{f ~L_fE6g-Bm |8QD1zofG QDDD$Twi5OV)Z5% u/+W3+Yz6vN>NHثaaRfv!Fu_`<%Ւ7BɗTw,QH:13 w/rZTHKPK)iM~~U#)@ф v&19Bš=we%MJKKFV5og`PKLN6΁t+S?t!7fAC.H Իs7j^)p&2)׏hNyNwݿ lBz28JHUc)˷z*ee(N:3q9K*HiNfZU#wIl*$"""]T))]vªnn/+%5]I}ƗSVd`~ u&pHQH(ArHQt2_#TZ㖔Z X,S-)b(ra$I獈HєJ9,HYvOwq3T5V50 ?ɝ/f=ImޅL;'bfW5 saJBDDD$T)R%AuOj5fguF?]/)lfqn^`*)(ⷪt~JPKY^JYp}t]Vf^ˬci)̚?e@kn5fVagܽHt[3(| z', G#"""R%RMYe3ۢLǸ?eݗ3ofGyzm,M( 3\ |䨲E)73K9}}gZuwYY4ϛ=7-\f8eRS*" (l!Nم1IHi3y!皙6AQ`|:Kx3e45t3{yLx8=afSjfwR6Ikr-"""R7 pJ4afiwr79f8MNj>Z~@_`h`{F}~7h3=0x06igiBRE&{r3J;νcf.c!bgCHʓ@]ٝP{t؏pfɱY;3 < L.6>ά[ƵS ~oɡU"ۄwB7tx>f6d$Omݿ$ǯI)7&g+2Axxf37-MlWBOi̫O #VW{ޣo\G׏oFͳ_v oU'kK IN tPH"""RJPlY°$WlBM:&㼐H$>,6)gYuN'gbzp>>Lsea=Ά{:Z, 9MBDDDlшmY{e?TISUM|EDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT JPEDDDDD.(AUDDDDDDT j@YOjHZ}YNY 0.w%"""""R3{x43T66A5̀O:20`f[5 :ejf xxaRDDDDDDMhii3;al%T ppt5L)D3p p2Rt^|ͪ.JNEDDDDr-FslFUNYWU>vT-z0u6>p{ w_,6mD m L5"""""ո+Slp-)FHPse5EDDDDDV~CdRΒf:[kH"'۶uٚF!"""""R{Ӣ.AmqPͬ702U*"E3ղf[}Y e5ٽ-S Gf֝ q h3Ԡn"" ̚%Y5W.s"OU3UxDDt i$jT`ߏ}aM#lǺ Eu4G֢}b)՚XֱH~lOBWqBWG ']p#7kq Rs  3kܫ(4rbGM|EY_V=Lkub7>GuMB=Êf֝ܮha̬BO}Tw~Uedj[>!#c1poeitÚٿ ؅3׀'bNzh_'@]B{X e ܽЃYr.Zggkvrf,ve1"0 X&+RVJk7$̈́a}^u9ym3E8zΞvE*w߫e-?*>̞e-T/"]Tw/gef:*Jj fW'>D7c= lO֞eKKʾܔr~wʾOcmf.rlm2iDFքއwg7(FZw!pLT 6.d3JLJXhPʾG#aY? $3C$پMi \l]wG6)387lNv,zϸ8:*k `}/EWEwͬE~!Μc_6^'7A]'mCR43;x-:fG,.BH~ t!JPE3{`7n(xdMwc3 (cVnܯGmJDr{ ]B}h {f,$6Opn&f|Di tpS| m }*51~7^Yq(sn}o1`fʧ֙Ghop[ԴCfv>sڞk&n.oV  jB"SWJC[Ti$ 5 ̈́PtϻHBRt{lavAp3볦IE^KˁÁIw?ݷgp},0_>мwb;eCfa`(ޝ@BfBbp*& BM w;hB ٱmKnze²wb&֢ƷSwo:ʔs03]Gn#Ԑ-'Pq]}hb ~QfWO}ww!$OĶ+$stsAwwqΉ^wU fffMӲmmf+[9?EDF 4&x۸=w߇5߮Ny9_r\w?P]C9jxzj>gKٹp;edOWo\f;8%Rle~ !qvGfn vjǑݟ/8 %tX2/vvxCHaf?gJ]if04OH;-sZ8[_ҩVMjQ㧊Ti4}f۝N{ q3@&ۏske;w}i^En ˚Zz#f73!l{|^7}|kO۫ծ㢦9N^9u{{R[`x=:-%:횴muԼ7:r['hf*A"j֌$BRwVʮq|:G9Xaw m<:EDbWDBwj"1^#-H|<[e!I&;y$>w`f}6L!whSHU<-O'sϰZ?o&ھ^nz+|,ՠHCؑRVBwO=u@-JO:Kٷ$1!wdw0Bݽ -C,O ?6_S(,y?Q{1:{:Gܚ fQcIx-!ZLL/(A|{l]NN'o82ff}E֫,Mf{c3aw% #GێY'bьS7;F>z+o9sp~bD(A|VϚ/]N::b$nվuc瑜J 3Ф`u-r\oY bAl>~#OF2ܡ[+K,RzUDoYLb,g7 efkXư|͞6.;Cʞw7y?)}3{_EDJUDsĭUewZQMgB:97t6#UЁ+NҩT#(isW?t@TTB6Y̶!;;w<_dp@s)C,7'nU{}} wg3[qTqxH-t:2V΄ʅ,"R=JPE$?ef4; 6rG*tBhÉ7~O{ #n/x }0%kQw8W >!-,t"(ͬ-""uA Aլy)3[8:NAյy{ c2*S\Ra4g-=#]Y`HoT|wYl|ޛ4f'W{wFp =O)v([i8JPEC .@3;9lTb:_G0r5.0Mo)-YRvg9N&ofv-̬7˵50k2ࢊDXy˻66|[d+%"mN 3{̾hfw0af#9`tlQ~bw?^Ϛkfkl-3; +5/OxRV2IY}=ٵhfvjf[٦YSqŴ}vWI3Ք`c\TSf5VȾXb""5UD5;ܦ ^3)f![ >+z~q{:̞3[e=XoPiÊעf9R_;H^Țkwc7G&$]ӁbDTkOtJ)0qDDTɛIxo.%GIzn؊ 1` ~l lt`\# J7v w;),j0O$"Cڛkw`̈́]^zǛ+Lx UD }L28d|ݧj Ԕg6pϮtlR91pklԈ?n$[ ք[ |?GE6/g,n)Sُ;&lw2-"RuVhI{t&zY]|wOhAD*mIXPS:P+F&3k"|D% 1xݟaxRffi4>!Ռ%<ڈp7mgwIu@7/_(E#֧cgyYRn&"%q$Ej&lV%t?|""WDD`_X(DDDT*1́==Krc"""]T*0ބ[{EDDV;"""`f_#|׎gma(AϤ[|WT/&"""58ݧ:zTxAhx&Q*""R~dci4j+""""""uA %""""""RH]P*""""""uA %""""""RH]P*""""""u[j]GDTz%`fVu0/"q]m~ߠ z&`^D.~mH5Xk,",IYבZQZf+0l44(oL4MZ: _r0&`7`V]G2&.b1p/В>~ɾMfPkOc40 ]˘D5i~IIjuאzf8 Y ѷoߚ%ٵphPz4049*{A^Hd-[6#̤kG^4}KuӇf=/>ݷIHXJ##Q7"F jlp5 3cbܸq3ѣGӯ_R _)ٳg3uTL?zꩵ >$JeΗC5o> _:cƌa̘1lVtKHX{)S0enF>) ɼbY^L3\ O~w}ȰaêH5">(wqG}ѦE=IMv oЙ}f^tfwݽi}q*#F/g]vơE$Akk+>'|21瀅 ~ _ Y\y{7\r  aE$oGߞYt p׎M3L6Ym89cij "pa1c >$_;M%@אV=[4b[h}:xA)&U@QHW=>`MTƚ8yg|6zESOНz9p-\.Y{4qݺuknSr*Rc nkɴ^8؅Uב4eMF#M6|sy?x%"5.0m4>`׉H#kIy[Wl[peѧOZ#"M7ݔk6ssy0 d1/r_ fN8:"tAp nG 7ZyՃUבw֍kM7ݴ"Ӈ.-`TV݇d?8O&JP `f;G755qWгg"RKFʾ%0MLڍܛjԀ[piUx)i\8UאL$(73Ix?N8FUCHzW\ipC5X+yZ t$U jawƎ[XD$iСCc/Cb HpE!Hٳ']tQfv7k:~UCC.:6vX@|H^$@^$s?yt$U janCDѳgOƏݖOR jBEn.l0W^*VD*`w 0Od!-2j@*uKe [E2Iju$?FD jQjOE@$5Ѓ_ >=zsݺucљO2;IͮE\ye̵c@"" t&#T@:oqC&ssYFcbH[22׌Gٮ!B jS NIjv/j9ZĪK"X:"Ҙ:IN3iڱ g-.iLi|Et5%yJU"e'Ѵ,/. "#57ב.$ZМ7;9;YsayzХUDբ35_K ~,"D7"Pf#K:]"уUבLh{uKQ*"`US_iƐ!C.?!'N,(&&{V[uߛoɆnfi~bH.,r-2eJeKjB}SsXvuo93f([y;3׿ݦ믿7<7W >:3n:蠕Gy$w^TR}@5$)Ik]܇~ȸqRU `7`ѢE9ܝkoz6۬!ݥ^&9hj]dIi~KP*7m4N=T&Mseĉ/cOd5p+VwwGO#3IǺJNP|Mz! wзJ[tiY.-[[oN;ӧwXVKK <99ChnnpiŊ/ՒT;iC^KƏ- IDAT^RÇ/S4CIN͌ޛc=m&={_| .oy}E׿ΓO>joϙ?s7ajQœώZ_t9JPM;%Kڬ33 D޽={v%K_[nSkT<ޖ."z.~>}:ӧOg-,:/%)E>^Ñ*Y.B9uݺucذa,]9sisasUWU!Z4E|1vXN:Z)9vm8p ƍcڴi+iӦqqq%T%rmنO<ԕU*h~9jơ=6dzf,saԩs9+ ~bhTvkJ1ҞO<9ˆ Ͼ˰aV֌.[ٳg_+u]9QjtqW^ye5\mocܸq+O&NȹpΛ7oϟgkIeGJ~K+7xcz!f[3cȐ! 2=ܓ}k+뮻]wݕ &}}ه}7mg̘O~n喕˞y~_r}Ln7IΥb.]򋠳8cz}_N <+2U=hh8C99묳V?#~dMT]Z+.{C)/^:fm^{UPcƌsgyǗ5j&i.Y"xbnƕ};LLN|K_cβs=1f1b7|3_W3wkj_~e~n޻ZkUǃ>ȬYVΏ;+"={3d_l…\|UDO?MKKvکrvySH#P TD¼^{馛TƄ V+pŠCs饗>+7vX.\FodԨQnZw]p"R>'OΙ?c7.;\"D7Sʔ*Hzz:?O_+ .0%%ݻwǼy 5rrrϟ?$?$$NNNrSiܹ>} 88Xi=}摑Ei]]]ꖛUlxzz2iIqϟݻ5Nr vʬ e„ &ZA%J˗/~PYN=(ZZ([m۶EFS+o߾K$155Ebb"_t<==U|r,Y/%/AAA|Y[[&&&011HKKcoE֭annw8u)66NNNBڵ2|L>9foo:UVč7p.4irrr䱴Df͘I)S`0a|}}+80bD__&&&|L ϟڵc&ccӦM̱N: HIIADDN<ׯ_ȟX,{ظq#V^͗utt`bbZjQ0'5C*Q*H]vUIH$*Էofʪ4]{ۇK rKdgM8*0ԤI'@8CFA)\]vصk7n߿?… 4hoH&%%!00۶m+˗X`_ŋ1n8駟3gѨQ#ٚXx1z}6 SbԨQ@۶m+fYӧO#SgƌLoQ*::7oˆX~=Vׯ1b>,q@Ϟ=ahh"##_Zh3fˋ rD ===~,//puuĉ^._ݽ{W&*?&&&ŋIT%ѧOٳ/?Ä ЬY35jG%ϡCl24lPi[###~ &&&bŊ|uָtBo:͛h֬c3FƀٳgǏ|\zzzؼy3LLL/٘8q">,͛3g+$''cܹ;w. ѹsgt T'w ィCZZZ8?~|,majՊY=L P3%-`͛7Eѣvލ5iH%ˣGpȑ"RSS]C$a۶mJS9s&GCC6l(t1ד8رCރe///ƩNlѣHKKSX?((R=55رcaee + 4-[JTfhh(N>͗'Ʃ$˖-cr`ܹS,mmmݻrD/`rݿ&L@ӦMQ^= 2v⣿%999ؽ{74i„h+Vl,J_$$$իr!???GWAtQEڵK'q8y$ڵke˖>AAAػw/=Zn!H~&H瞿tR[(@%J &`̘1ؿ?n݊/2nܾ}oƲeЪU+7CUyztttuvrr† ԶEL侽{*5P{i>A?uVo?gtZ ,, aaa9s&zꅉ'Q0`-ZT(,8+TGr[h_zR{MMM8::AAAƥKpY8qBKLL ڵkW\4+WfVK Y666LYG*DB*Q&bРA4h>~/ܹs8{,"##e#Fرcعs'JUիc…5jT8y{{c`͚5r#&$$0Rɽ KRQlRpڵkcڴi6mqy={gϞųgÑ#GpQ,ZH|b<|)@3JEV:qD2HS]]]t]vł p}ڵ +V`NNNƤIpb]X[[xw۷|yÆ L4s*%ZD @%ʜ*U{޽; ٳgq1%GÇcǎtaZ*4H$B͚5ѬY34k ...022R w!44=z~~P555E}mZ9zhRFZ<{ gϞEhh(3b1Ld&'*A|mqS^AU+I#2))+V0ڴiS̛7Gȑ#qIqE>/睛"+3144TɅ([@%N OOOZ /_Ɩ-[efe͛Xl¤̘1}--ҤI"&&@"ʿE bĈ1bq1^L` 0TpDE@oXq]JyjƊ\Kr+SJ^^=8pڵݻwgϞUZ*ҟqT|yLuH?d H`̘1~׀AozdddI߻CIU\J3!*d%NÆ ry9,m$vo|DE@r,{ ޿_,ycALT?Cpϟ?iӦ044! "SGQɈ\z5/z2L*p9sF|i˜gLN| HI=z={eH$7Gt"!fVׯU֩:t(ςyk֬?ggROKH.d%%3P[E'"lذ!ڵkǗ |% {/ACs5N>{߼y<0lҏ t~W&?{F;w0el`` @m޼9~|!9Ri_0j(x?+?LLL{`cc#wÃ߿tZLy֭Jecłu) ܘ|+VS)"''̊τ8J8&&&LL)KKKfH?~w!^|ʕ+شiSd͝;1n+DA 3337/[n8|0a߾}֭>?qDȕdΜ9\2_ =0`mWLTG.] .˗2^Ç2e ϋf͚!$$Dڶm˔CBBajh۶-޽{Wh:fѢEL͙3'NDJJL{Ãq;!AT,@%J f믿о}{gf3KTϲۛlƅAe &tuu  WiڵkrJ#wOAL6:OOOTV ...7n+++ ))Yf)]^={쁙4hbԨQܹ3,--`zzzᅰ4h֮]8+V@ǎQn]_>ttt`mmQjU:tHEݺu=z4L2;v+^~ ---.ɤYljժ+++ 6 C lll׫U=ʤ#*&(7o+V]z[ٳ7ߠq022bƍXn̐&M*{() ^|߿"Bؾ};c7nٳg1o<4o2YYY8v֮]+ҷzj&·29Rh666XdIDyzꈊ?OxNN._Jӹsgl߾-4Ùx޽[nܹ[.TѣGVZ@||?Je888z;v@V~|*U¦Md5kвeK ++?# 5kđ#G`ffVVjDl2dffbLfEERR^~-wGӦM OQ#JQR BCCѥKǟ1V TW^goȐ!%>|p :>DTT"##SM43F({^vTT*UkkkX[[+UT30c <~gΜABBRRRhذ!,,,tƍc߾}˗ϟZj[.<<<(7m߾]e] Ԯy戎FJJ "##[nZjg5k 7iҤ 5B*QD"L<~~~;w.<Q _~͚5t-KKKfPUtttkIGϓN:1b3kYu׌Υ]j222–-[0zh,X ֨QǏǏ?5jZĄ[C˖-KDA'444ФI4i҄b(IW.]^, 7fr~-k&&&pwwU!2P2vXf V^7o… HJJBJJ ޿ڵk3氲ٟZ(;`166V)T<lذA۷O6 ppp#Gs)))Άο IDAT3Y[[3QB SN*-A%A*QD"mV&:ADEBWW={DϞ=խ AA|QP   \@*AAAQ.    (JAAA @%   dAAA2P   rAAAD T   \@*AAAQ.    (JAAA @%   dAAA2P   rAAAD T   \n  albbaÆQ/36l1cƨ[-(@%  ͛7o0uTlccCj 0w\,X1ccc2P/2P >|wMZjj!##HHHlll`hhnĹs222-TFz &&&hٲ%ԭA嘭[b̙V 2P ~g9r/ݻw TZְFT/_"""qqq077,--]"ݻSN-|>Dll,`jj~W^姤ƍGVЪU+T\X29Ǐ~5jHTlAAD2l0\|@ O? Ŝ9sd0w\ɜ1cHJJ'''I&|Yz򈎎̙3q FN&&&?~<Ǝ5j(/^Օ/?ѣ{n>9s/// :OUF ŋqFx.IV?:::JC$L</^D\\sNKK -Z@ǎR#G`ŊDZZykkkL4 VI_AKB\rrrsܾ}jC,HHHX,.q鈉gJ\vٸ{.ldߖ>};wRrl?\8~~~صk̘׍gI٣G^ŋhݺ5f̘c^tt4 m"==]}peiv1NƊ+`mmP2e 5N-FBӦMė~ܻw+WhkkcժUbڵ8w?D"ԭ[ݺuٳZ&''c֬Y 3p'3gADDxYGG:t(F Buƍ|Ν;#L4 GÇÇ1Z޽ѣG!CQ5?|%K ""kH5kcƌA>}Y}Hr-矸~:"""F]vǀ`jj*X&qصk.\7oClll6\?Q\r۶mc޼yݻwcڵz*__CCfffo1c >Dpp0|:Ig)СC8{,"""p-@R 6mQFa-^,Y'Owww@TT&LoVVVŬYG/_fƑ~Iaׯ_cѢEubccU ԪU mڴq榲˗qܸq̄H$5aoo???)sssʕ+ /hӦ ~'|GȰb zёHHH}  @TTT[n9gddccc#ʊ8tSNɽ^HHWF HӦMBEqG$/^p...mҤ )X_ulٲ [  f0G4^z.QÇFa]WWW^n8mmm׮];ÇL]Xmڴ300`/]g.++׫S<++>lmmmn\||<_'==;}4׼yswSEȑ#+Wr{\__ݺuCDD ԩSx96n܈ۛT>Kmmmܹhܺuk:u :uӧOdž   bV͛ףu|_D"ڵkfbb"l"#3**#߾}~~~022׬Y۷7_zs*ԙJ 00wExx8Ґ>`۶mU_?77qS]qN8'N< \j%CnݘիW… qY$''͛ذa+ǂ OX~nT\nnn` /u `gg?qL+W`ݺu̱=zرcEjj*pEL4qIôiӔ>eDGG&LK<|nƌ +իWܹ3:cee5kʕ+x5_A|ܹsJu&.F{ȟr +VBjآEsŒdffSNsN:Fxx8RRRpU^͛7b`׮]3''^^^x9jժٳ'6o*ܽ{wfcd:t߿?N>Ǐ#-- O>1fucbbxb6 UVԩSq1$%%޽{سg?ߗ+޽{pwwgi7nč7gb…011뤧ÃUbݺuJ 6<)k/Z+j*iFa}]]]_͚5㏝?^X,1<[= PI6IWk׮1c.hiia$'dAGG;w8R@Z{nǖ/_NܿDJcYV0&Eq-xijjr˗/W(ɓ'\*U"KMM[WwJ޴iSSN\BBܺٳ7}}}.11Qn}IիǍ%''+K%swwWZ_~=̜9SiZj|͛+sRJ|]nbXMsr3eddȭ*ӼysnRr-EqDΝ;ʾ~:Zj``bu}”=g~>}oʭō?^82sL`fff|C*+yܺug\]]]֍?' }͛r~ct[oer999r&%%q=z3Eu͙3G56 1q1UĻwʕ+uqw9dHA_]hhºzͿ$]dѸqc^֑q َ|F1.^Xh)bLC nWÇ+ ިQ#eδBVV\ _=z\5kkFFf̘Q޿ׯVZ# fϞ  wܺfBNN_>z(t"H$¢EЮ];e܎ccc—]]]yffEFjժaΝСݻشi$!D`` =̗FTƓWAWW+Wd_zU $ȸq0gfUH#2*tݻwL6455pHKK ׯGƍcc!A~SVZ8|0*,,T~ gll\׽s[pN:1yomѣGՔ'>̿$]d!nʐturrB۶m4qDnnnׯ_gzbBXJ@GGG'^[VVXD\r%3+_ecǎ ^Fì$x-Ujժ P٪U+Fǎ;3:vTjX]PPrssk׮-T_H3\###L8zn%1̟?OP=mz匁)lggKMYyqD-[+˰0>}/{zz*\3PC_~_zuA}RR%_9F 6Tu'ɜDB\ ++^x &^Ç _X~=֯_k2Fn``-ZkeeŸ&7Q~|[+K/S\$ԩOOOA? 66JM8DW<}bBf _644 Jl[ݻ3}!IdR(iӦpvvj\|bXiXzUV-^i#y۴i#vݻoFjj* BJr;!;ٳ'7711OOOgx{{M$:Ғ@vڅ,-$ % >|XU}I'!D%[999ws RWbɘA yHǧÕ8J6NJjr介fJ4P'ʨ"$D";A$%%1yQ@MKKClll&ԇz%2//O>* TX*mS^8x`?~̿QSvvv I޲eKÇ&/!$ Zkkk3{3˒TDEE… 8s ]ƻ+Cr)E k*. TB]xDeff*<ƍ+4u UV)KHH`ʦ֓w!$Dł TBiH̆uɤjQfЪUV"ka$''шWH>XƬ% O4sI%R s+/^a:/^0cHTTFUbQK4BK,)ʕ+W" tJ*M6pttannΟߺuRU:~Ԣ"PNرc,O]DA*n#ť˒+7o" ) 4m+WV8u,˺߉/e)P*q$,, |fEhiiE۷{~%gY;A2d)qjaaӧcJsHQ'-0Puuu+lڐVԹsbaD+i:#//O\ZxWGGGmUVƍ"UT]Q~߿fյrFdB=<<¢* 6(L-S^=~vvvR fϞ@\2D"QZ{ u!ieee͛7%ʦʼn'rÆ qڵ+D5)) aaa|cǎ3CƍqyY%2P !=+#FI# 01N4iI&k׮ oii +YKIMƤ!A9vXSM˗/˸i'NDNPn"ɭ\2֭*qdɒ%Lw9<<\pNVXk{[jN:l#_E544޾} 8{`mƤz^S ^~Dwss+7눒 TBmԫW::: FҥKG#F JqE6P߽{GTjԨ˔$!! T"}3-bRJM>X{i%,k~MWW_d[[[)K/_.@=x Əϗ8P*:ƫWdnVe$$$J׮];~͛8wtRh'Oׯ}m׮^,cժUXl .]E64|/J^H f,::Z)))_k˂3gMLLfAƩ5YYY b1,--ajj SSSTzũn<sDiall̸}NLLL.94ov!iL͛7duǑ˕ =]:e A%oŋĜ9srjQ*:򸸸Bۈb5JYf1 ŋp_NoթS'tܙ/_ 8 IDATiϟ35K,:9Q~ P+yI?.hP-֭MMPQ)!11/ˤaQ}cHm]| ׯ_ǻwrzzz`aA*kڵ}قDi"sN&(2b1vJy-BƑ͛ssszjACCCAr177gYFL ?ҹ-華%{sSNEɓz0deeo߾s˫XԺuoBB;fP}ˡ߿?3nI+ 2 {XVwbb"\]]ؠA'*.djey/'uq6o̔%,_x!8u󾾾Di2e}FFO.ݾ}˗51lBǑfUC ]|cRу$H$BrTTm&H%Kd#;ƯIU^ӦMc͜9nnnXx1._o"22+WDƍqiU #… իn޼>}BTT,Y͕͛z)f]3}zo+V+W[DDD 88M6e"}:Tnlmۛ/ڵkqm 5#W^9r =ET@%JeQ#ѷo_~ 0@Ç PN˖-}6z3%g f\ѻwo( g\MƸ);uTlhh=z(mC%tFprr˫WƸq㔺ݸqyBKFYՅ_r zqZjL7oޠo߾*4ɉYa@&˰ad&Ǥپ}; J ,066F͚51sL?&Lx/>}pqq1O?˗|5jرc2eoLt:u m۶EʕѠAM6矑mmm̝;1̳РA8::ʤk֬݋U"##ggg&MbX4hk͚5ktnn.كEZ 0سgR }DsU.\$߿?zC1Xff&;v+lY"p>}0n?cڴiÃdKJ#Gę3gdL4 App`wf(.׮]Sz~Ly͚5c/55sA׮]ٳQf͒UZ^B~<}cǎܕE+ƍc">Nkٲ%nܸӧcoy^^]Tkxyy!""WƀpUƨǎòe=Shjjb8}4AT@8+2~AǼK&Z*:^]p\yw}N<)ɓ'z'N$?::iw!8Hs={fŊ#ոqčY޽;P}u---g|[wwwRSS 0zD"ΎYXX0rcp~~~͛eq5TnXXX(쟀zbnذarejjjr͚5LLLUKuի}s,+ hqG46l2ut :&&Fn-[0?qիr28gggnРA6ӦM 8kkkN:rϊС,̌o;tP^xU^]9___Ã]67}tX5#Fp5N:?-RONn;mmmE˗/W/e }/{l30@ON0` .&h8 +utR>~ȝ?ݸqCa[n_.)?  llld{gV.11={6wqcg9.44TYYYL|R𽦦2m߿_h/_r#G6lihh7jԈVZ0m/^u+W̝={V=s>>>\ƍqW$q\Ϟ=3gsuue mmmI&w,WH}}}Ѯ];ڵkr555y1A/#G2,jժ%^VvڂK?Փ[OCC탷Jѽɓ'ҥKr~jhhGТEBe>ݻw&&L?ٯ_?ire˖!!baafƭ[jժI&fR?DFFѣG0335,,,],eMRRRCgG"55 GH \F.[J(8w={RXC9xƙrI L00'O̚5kB\$qzꩧXz5@*IR{%5Uh mvƓ܇iy&0seee_Ү];^z%L4qU*_B8X_1 ػzj:wĉ9~xO+"?~'ҹsgU&aMӬY<@tPd֭Kǎر# DQD0Mm۶rJ֯_Ojjפ&p Iu/ȩ}ᚸ )m@zg*f1fn W`4PWgq:u]vTR8?~zV\{F.{rRpŐǐ$\dȏ#0Ǒ@e\%@LL ڵSN4oޜ#l8pΝ;SbHq!VZŪU8|v&[侶'E^9kY\ QəZ =^!y.ꮦ%9s)AB/ 1@3!;H suA$by IULuAT%XB~ g1"\9bfG_X琟Zd\Š#V bY *Gu*@ N_DJ,8+ OY9gcyEl,UsuqhUɬQ iDܯj h\ f xV255ösx °4ʥkqNUH~ &?vcV\ ȐhU slۼf:87<ɩ^\@cWL"pC(:²ȯ8owbdA #JPn:hu[Tգbh:/0NmW5EJƙP:f3jؓT{OGEu˘~?q5zU#Up{Q=r²H+b\H '8v?5W +Ē_ϕ'q]$2\{C^rA jxkQW~qJ_ 眽( |]Jj{ؓTgG{> ]$2qĊ!j lső-?Sq#iC*İ6k(U3j0/|w)u>GDT?x(xKP}%8Zz uD&*=)x^ p{i47rY1Gz#$Ӛʥ`{<T 5{j]$Xn%E&X\E\\< +.\`%1yV-W^~ا|𼂆c}qKw2 АH rڗ'k=UCW9+D*{jmi*K9ˊ!xZ@#‡}$%y}Z$ێ UPZt({N 3AUGհ=O5D_KrA6,/PCj\ jW*zHq<Ϫ\Z=86K^PZV7W"Kl֚9 -AEr%}ʥ=;{F?HPVбk-["yA(XJPʥՓj=vX@.>H6{+T$OG"}*{CUu&ƜgiaE*?b^W {z5tEo|5{l\ JP~sS /{+ Wmdh "VP9{s`Ϙ\zkWM|m$P #t<79{?]媬Pg{p6ri}cZW"݊A_KUʽZnR;%EP=dn^ɩ{2EA"VK+;[.anyl{@I1mzTők%\vqĞbiXe <=AE=A=H)h$U2UX99FQ$9{QlLWC 0B*e?׊!SU,CG j6+X \4\8lXW. >F>{y-:=KJPEYt^(>tA,yhHM5yavLcmȯc(M0xIR8cx4Lv|ŐPV*#HMhg S{ #\fFq$8߽g,?w6RSPZl>TVLPU*u! V<ކ8/wP@qN~b/;U -g8ټ2`6p0x&Q'zn3 /YRF2!#H j pap&ΤTSk \UװRp<Őt!8T֨e-'_H*JaG<*ĚK"z3f(1-RR@ j gΊe%"ykt!L.^b(D őaGV o!ő%b"B j)+ij Dh9[-$ {#!x8bQ]$9+D6C8ǑPR ͼ]Pg, IDAT 奈Q (h8GM j3vAHFÏDG∔kuDDDDDDDBB %""""""HXP*""""""aA %""""""HX( Q^""""""B?ku0j$""""""hJQ e!A d>oʂv_y0%)OPMlʲ+ARC'|)غܯQ%% 0=s_ eY$Ӏq!.HLC\" nj<3YÚ^`ba4-1EDDDDD"aCs_ggU9fw3JA彆t쩸&KbFR:HX3 `ߛVmˁR;Mph3A5J'A͝ܗ hY WatVZ$ t`zټu>XpT#0fbbf h 0 caK""""""0a |4g溂?Yyټt> 8OUj'k$5av4Nal0V́ٸ[dƹLhn{0DD"} HYID$R=0RKU!qpi;pΠi⸾O 4fJރ+[ !"RU>ߋk3y+[lu/ٸ& Xef`GAea/6Lq{/Ky8i7at i3BY3RN8yZ`MDDDDD$\Έ7m`|BܞMXDDDDD$$:z cp>ii1כu7 R  ŬkxD=.`ø& ;Lc/ ڿVEDDDDDJAOJ6bDGӟC0ꞙf0ë<~1\<9;0}8^ej-qk mD$⼛G0\]`K$"CYjS66*@ * F@=(Q1HMb;xTWJ"ά{Ms"Mv< kmCs0(^_uϻ""EVGb#٘&xԬk?q.oa=IRAL9T|p7?EDDDDDʁ`4ښqF;@T;өFjFL.B|M$"""""R6m>#Ϩ'3'*OP̘=3ơ8bb`\aE 1hĄ""""""%aj{`%QTܛ~?}1^ 9ɪ9CSB 3Snmh $"痴?s?ŕ7@"qV$;nGiO01c> s1{21B]0?羼4,D0 s_3MPGD"a3Q/y  $IDDDDDD%""""""HXP*""""""aA %""""""HXP*""""""aA %""""""HXP*""""""aA %""""""HXP*""""""aB Pa wcwUi š˰ zW٦8"AH1$aQ@e_ y-UH@zۼv^"aFE|hő,V Y'1HW][|P ?xHH3f彈I tq!-HwX Ճ D|_L/`F7qBmV\qd5Xf->< \+~XqA($R᪋ڶ^m]$\(A-0qHFsV(M/hF ? Ѹ,x/10 ׀&8&Sq5yXđp 8xpгgOtB.]mE04Yb˗/gѢElٲ\A`{q<:vЂaW/ubcc8p ]tk׮tЁ*Uv)4֬YòeX|9~))))G{swͱ}uƒV0s{Mn-Zp%ХK:w̙gne |r/_Β%KX4n#=IUZ atU?PKdĉL<,@_t<0GGzٳi޼yO+">l۶#Fd뭧)xl#aT~WPx &Pb@VD 2zh<ێ. ᬓ PT%EdFe\=p@fϞM:uB],VZŕW^Ɏ;ffcPZ_m"@`/ at~~!L_䮻"'''?\9c=0~:>;v ԩD9ˆ#Ou{? u{ G*A-"0&5kƆ $k"GjP&,kg$5c5fԩ} ){?ہAzBqC8.0?DEEEt޽O!"%B۶mپ};n=zG&]1I u"aٳݻ3fpŷ)@-\jW5m *?:w_וuj֬&M E&MDfp+TG*?G@.A:5f%"a(66ٳg[FɯXUU+XRUZ4CC겈&MAѲPW3$ժ`zMRzӧK$L2}te\1:7vH +]4hF.0СC.=➤Zu{ZnrSG.,5ZE! K0+WL޽K"{rp+X 1~DCU ʯD›-gh $?IIU jќ ХKPCD a?. օm$uh߾=*idpVR%ڷool+$^@V.;""Ϻ`HS'0jӴiPGD ѵkWis+U"C};:&"a WbI#+Ee0մiS+5`k4FED~.Z  o9{0REl"lHεQ3J,(HD:p;vy5k֌:+*}~.߀}a#κHTR!B MHc鰆X֢#+wrnkHd>^P ]!Yz57o&))'ORvm_> qꖏ~qƱj*RSS=7'|2% X5ժx[* ]@ SHdB-9.煡]Dī *{b+6& bȤI8qbl29R(Qf&| | yGbb"zsTX1H #G2k֬P#એؓcQJ:ts1 !!UIQԪP?7?Ʈ2ɓٳ$233~Ԯ]iIpݳngU*q݃򆮬,K|rP'.;v0l0.]gRRRXx1/n{59][}VZ,VUG{K$bD?=f8sUJǒvڕ!/OL:uׯ駟fٲe er¤۷ٳgJrV?i JP aX??0d^͛3|pZlIƍIHH %%Cf~g }:?mzspr-TTo?ѣG>7w܀%ߒxط;&őkڴi{zbcc!z표֮]ÇsҤI5jDVVIIIlذe˖_{b !QxzQs{=|SovʛoINNN;GgڠA$ Go#%p999qniLL sL-ѣwy'|̜9O?+4i֭s{o_~jذ!iii=z_ 믿m۶lذHYAz8Ro#B^R9C=d]gҤIs=xLΝ>|8VW_7(HHy!Xr())>#FxDnb/|C j(45%p ֭{?+VH>}ӧv*v92fM~rr2?|ɭRJ <8o{^xgόӲeؼysaÆq;X琼@\%%''3w\Əσ>1:vHǎ4iR)g_{CVA\ϖ6m֭[^N:y'}o3+[laO:tGRjUر#]v;v,q7n… _8|0IIITVxte]Fv:֟:::ۗX}J*~}ɓnH6nܸʕ+ٻwo>}krrrٽ{70 wɓϤye˖lٲ!##KpBl‘#GHNNnݺ4jԈ޽{s饗չ`زeK?z_xؾ};#33O?>sbܯ1MS̮]R4=lfnzCsf>۶m[YmM+Wt{oܹ~yԨQyQ}vcլYc_0xx Mx:{B dq$ x0NItEL6T;o߱cGڴi>d iii;y ss{kѢ۰\4Z RM2K.=-r1.bnfWiiiL2?ߣ_ݺuyU O]g)历J9g 7g}{t[D Æ ˛:oa݅M4l0 ,rxgE^$MAi#k֬ ##p}gӶm[ `۶mXs׿8q{qqquִhтn: 222뉍eРA﮻^p{Fу֭[sYgo߾XI2x`-ZD޽;66O?m V5{…?ѧVZmۖ> 6oLRRիWꫯ^ٓ-IIIуM6߼ystB֭IHH`ӦM]%K%7n$11UViC&۴iC6mf۶mO~ΆbIRs$,5o^pӸqcA IDAT?~~/ \_קo߾̟?(|MԽ{M$Ӡ@<9[w;Ð!CBT" 3uٺu+޿zy3SN\\ .9}ҙdn&SLy:uG1j(^~e=ɓ&(t:z(v{3f[ny.\5\û y|N8yd>S민֭[GW={W^tޝmFljzz: 6/: %[o'佮Qk׮iӦ>?sfڴi7 oرc*==X,XsR>[ngu({NSSS0`>, Qs;cFаaD3gNPh"|-[$..SN7j(>NH$ZfDz(9 MPNW{ݖիW1y'裏>k1qDjժK/6 j&0:u* ,K/?uڵk3sLJ233ݶ:t>>*UбcG?|#*a}G|&mʔ)n_|SKzx'n{9̙ovɩ&Iر#]w{۶mcРAk׎ pĉرc̚5W 0mPA%ٷӇ \"?dڵ~?|MQre&Oމ';i޼9FfDZjN0Ջi7}t֪U{'uJJ K,̙3^~AnIII,[dڵ&iiiL2K/ƍcwBiӦ=߁\Kdذanj|~6..)Axay.2j֬I֭9s&7n Z;<*T^{m{~G~ "eٓO>I~w^^~eZ6lH&M+xg=zPDʺPvm=|AnݺHsmš۷gϞ-[,^U?֭Yt)s̡_~~Oz}Qn\K*]vO?9%vi̟?I& ?gXzܹ,mQQQ̞==-8si۶mY:_}*YrbKDZj p ?%Kxv?/?QIVbEj֬Y@ᆪ}yKؿzzmIJJ*3F̜9E:Ӱaø{5Q?#4{H9r$^z{[ZʛN:xb;_|ꫯf&==ɓ'b ͛p?HTzv9ۭ+]V?,UBTT-[e˖ :lϟϽ5P2d2nݚmۺgqqqԬYƍөS'=Rԯ_~1o<u׮]|yT$r5nܘƍ3h OΊ+;v,K.uoܹ5??D% [gƍǭZczv;&Mr8-1SR >-A=|0۷a$4ʕ+3k,pҿ5Q_{5x㍼~(#<1t?RH j˃r+V)=&Z;pJHyqyZsN(Doƍ>|So1lQkKټys "^xk5/JP%"8πI}uҿPID bH pW5 Rvm!YYY,o=3^N-f Ǚ}ׯ\_}U-SR)Aѷ듆Bh׮^{mPg:rk0J}IHCxJ<?shѢR=Ǿ}3fLz28hٲuP;tzƌzz( 3Jƍ=z %4j(D%)o3jxHٶrJn="Unݸ޻yF$++~~K.K.q{kW^{Fی ^uӦMy >K}_.cf5֭[СCL09333y衇={_e '׿:$˅^[ ~Əϳ>"J(A?/uֹwEXAuw-~8DYvm?c}EʳYfqg>y$wu;v䭷bڵm7Mw}Ç^[[o6\6))[nݻ{aȜ6o[oСC禛nbӦM'##^{={ҬY3Fɫʕ+ٽ{7'OdΝ\z饴mۖիW}~>>KYd IIIdeeqFz-wO*VHBBB!"~Zv-Fb}cM_/.R:u]D8-Z\n]aԯ_ N8Q%U,k櫯bРAlذ!e˖ZIOOĉ~sNf͚ŬY>/Бcǎfƍy-X :Gffg?|Nʹ[2Z߾}yw馛HNN\3gd̙kŇ:up N:J)Q*AΌ31c۷窫ϧSNƺ훒Š+:u*sk?~< 4fE7oͣq\s5\ptm(plݺ_|7x㞰~ѷo_ϻ}v}br=\$\5jԈK4iJ*Uݻwi֬ cǎe֬Y[a牍W^^ժUoO>سGEEqmOPzBT . /t4hz +VZ1j(~'ڸqc9@QJT k ɉnݺԭ[(KRR^uU<*{aʔ)L2pԭ[X}֮];c{b.S*y6m6EլY3cdʕ+2vXMΆ |?խ[.ӯ_?:Oll,7nӦMc…ۭ_>}a\r%xݯZj+̘1~%K߳zj w֭4h^{켅ի:u* {ا[nzy %%%ޜk:nm*UKڵ|6m?_3k,/^AB y 8ӹs}駻/&,Jhтʕ+{]/;;cTX￟{@UDpתUgѣG |Z ‹/X Hi=fC-[n[nT٠Ax x <Ȯ]8rԯ_ аaC5TNgg޽#GFBB 6ATTHǍgϞnΡCضmǎԩSPF i޼y3qqqL0 &eɓ'IHHiӦ͚6mZDcȑ#9rdлwozilݺCq*VW}1b#F(IP*7n8F΢Eٶm__>W_}5v-[ pIED\o?>=ׯU2p@FIbbb +R$$$t~zQ^߮Eh"( a}8q6m~c޽?~tUF5F6mhٲe{yng)/JHX& 7իW[nt-E)%""""""HXP*""""""aA %""RlZIDJB1DDHӧswܾe^C \2-Z/gA,8}ۗ)S#Gзo_*^KRegg3d>|ᇌ1]#ܩS'FۃXZqzGҷo_w!QL)㔠ǏUVLdڴitЁ{֭[7o gϞ D߅J*q9жmPE"PP@DDBoǎĸm۸q#ӦM#::?qn;w:t(~!~) ^ u$M>{r=xl{'پ};7fk.o[FhԨ|9Pr`~ RJׯڵkC] PA&La'1cor5ЬY3B{?rrrHMM-VYSRR|nKKK#33HKKK9QKNNNNjt~Kzz:~_rRq~K:~8'O/M6nvO? ꖜ:=C;vᡩisS|̂SSSX N*SG3$+++@PS*"Rmܸ~!C~o%::xDGG3qDvÇ= /yGjՈE >GzrJygIMMo3Ϥjժ4i҄n-2uTڷoOjՈSN,]㘉\wu tܙjժQvmǾ}믹 QFf̘NNNM6RfMVyǻJbb"?x^9zA5UV{>uwҪU+∏gYu/0ewWvg<_~I>}[.qqqt֍GyHJzg9z)SJ׮]袋 MRjU5kƸq0MӦM∉GZk97miҤ UV%&&ur믿z?m4駟HJJ[ne˖TVUҿl\֭cԯ_8ZlwXTGKQ[bA 7"KX%j4*Fc75164`$El{}9wfν{;g?ѣG7Cbb"-ZcǎZhтg?Я_?\]]U &dĉݶk׮,\Po F!_͓W+@xzz DRҀM@ X~F.@ss04ѓE```A\7Nb:ٳg @x{{>:uRakk+\]]FprrgΜsN^z ///aaa!Zl)J,G} @Ԯ][ԫWO(QBܼySkL@TXQ9R ի}j֬)fΜ)LLLD具011Q[l/66V8;;r+++Qreo@̙3GѣG z-&N(4Qh֬022RmܸQ_TTQ*/YpppPϵ_~}#aggުU+]4ho]hҤ>j4cyƏ^z]MMMŚ5ks~\kF`-X||:`Re 1pqqqqI &4HNN֑;Kq Q|y;PR%aooݥKG^LoݺU˩2dBd<<< 2dD2eӧEffB;w?Pʉ +++sNU7kL_= _?}T{{{aoo/8ʢkԱcG/KKKLDFFYB:yC999+[󒙙>iڴQe:G@@V?!|GDڵ3,@ܻwOOqPѤIq]!ɢH"b׮]j} 6YYYC={7n/Ǝu^joo/7ny8|ϛ7Omw/^ ЦM?~̼yEɫg߾}TREGpvv~9V^ͅ (Y$vAjՊ_~NƤ̗ &M~Gs~!;Sy 011/0lG&L-m7 UҦM-Z3^LL oԔʗ/5jpݤ$֭[=I&o@+Lz$&&RR%vAٲe055eС=Z֭[6mڨv%aaaXŋoQreUVF mۆ0B-yD"DGGz[n`ees=z$TBNt䖖;vȭڵV[qrro߾:}PtMKJJ}wjC R ֢E >wFAVkZji9SSSIHH%J{~)^YIp@kkcΝ|':(_< ?~\xZmwhѢKKfccCvooo:uPX1>#>HIIIÇzx%y!ҦMD>KGH":} :RD_-[2'o#23D迱z/_&**PV ԩcpnݺ@vqssL]Y SA!r<Bə3g8|0۶mSB3RLJ(._ d'1DÆ PYfM43{l7yEM>ǥ\rlذAx7oxϘ`QP}Q9W_ ͡V<1#}:Ϲّڵk?h4Z-55s¾}ضmAhȎ*U gggއ_[vm5Epuu۞B֭[^~u*H$o1JJ*W^?#OL>L|||Qzh"{o.VZŒ%K8{,III@vrk MPK^aN~w٣>cƌvPGoss%zo{/F#J(QCdd$x/^~C0rḪ9C`Fi;;v,{%<<\uprr J'$$IߊO#˗/f7G:D+ PZ5 Hzz3ÿBBB=:tɭ>RΈ# lw7գXbp=Ü.]>x@mzKG(aUV-~\h4!m/,;Rzubbb8z(oҥL>jժ{0mmm}dM#mڴ!44"Eжm[|}}iҤ ժUc֭:[^+++-Jjj*vVl@ ϴ#dɛtP%-FI>MӦM111!&&+V0p\SZ[[SV-VȔCa_{nʻRdxJų숷7vbŌ3F}b?@+Wʕt]7юl޼P8ws,>FFFTP/i6<}}Zk)cRSS-P]$LTHٸqVNԩSސYf^gTff:rINʮ]^f\|ɓ'tW>v~r}Nٳ ZWWW55.\Z-Sl=׮]S3Ivd޽@v'}_\Yx^ip Q~ClOJӦMqqq5!BHTT}!--Mm;vcƌa̘1 4(כDP3&UΓO^8|0cƌ~{EeƷ~ܹs_͝;w3f 'N,hU򄹹9۷ogر)J.M 2tܸq8;;M 8{1wܡUVDDDPxqMiݺ5iii >\ʕ+9z(/)o޽%KII]͍Sѣ9^atBCBB4iQQQjNɛtGIf4Ee,[ݻissN\ݺuSeC ̌KUjj*Ç'33N:}Q.]|Y֮]֭[d=@ddsrM$'ysj!dܸq+W 6mq eL|/_F{cNY|9&&&̝;V F'|RЪ4#F`ڵ*hٲ%`xXb,_hذ!5bС3͛S|yΝ;̙3z 'OH"_&M0m4/^{ァ|oWhذ!%K$99̙Ν;9s&5k$88X;5|rϋ?#+WҴiSLB`` mڴ ,L7[[[;F=z4K.gϞxyy/;v,hU%/Hzʕ+֭[͛qqqSN| 4jժѾ}{RSSeҥZ˗/ϗ_~ @^Yd 4nܘ[bffɓ_ B:w do0`7ndӦM1wwwucؿ?/}8vǎSevb…L>7n0k֬Ϗm۶3׏1c,9Q.]3gЫW/;F@@*322̙3獲gmiٲ%Zer@6m㏉bl߾]k4>CMF)S(S | VbժU͍ j׮O5_̙3Yl˖-#F`ԨQ( [z5j]"EqFΟ?e |׏]vim租~zO$Ѽe4>k uSe߾}ܸqC+ ݼy2d\rXn޼&&xPiܸ1oի޽֭[~˗/ݛ5k֨iii-ZȈ#OՐeݺu___lJ~gK… Zfܹ|:tH*ue˖H@$I} Dx /h0 222^:\r噎Bdd$aaaL*UpssN:L©S !116lF>Mbb";.]tԭ[OOOHNNf۶mѡC5jݿ`N8y$gϞښuR~}8p7oޤz4jH-ŦM̤_~/\K6';v˗/SN4i@a8q233Z*mڴ)ԙ5j<="&<&F<{~$ll[ؑ,  ID>#WP ǎc֭ >fΜɆ /g޼y}6',,Wbaa#7>d޳s1~x*TG}s;~8AAA\t7oRT)|i6l޽{9{,KAS}|]̙P:uSjUzmaaȑ#uƏ`ՄKթW ILL$00˅ شiZj|zoY`"::+++\]])ׯ}GÆ6I>cbbĉի־/}TX+iσ9͚5Yf/nʊ>}+V={i߾=۷niР 4(hU$FѥK-Zw}gzxxBs-[w}foWnAU7 kkkڶmK۶m ZD!_^)^8|lܹZǏhҤT"4+Wؽ{1ޗBooo!jGa<+V:~̙}C\v4%%Eo:u!ptt48{ff6~x˗c @cccu5kd|W'KKKe!/^{L.]BܸqC^G5kֈ%J_~}vu///qIaii)aff311%Σh/zS",,5) `];P( X)`̓y=!FO_,ᑫ]H6;r V />@G1Pp F" ʅ ;wnA"<==;rXs)(`Ѓl>Ppl ;y;䆏BjYN 6ŅG#|2M41X8!!}r=:uĖ-[رcL0M6sNJ,Ixx8?#:u"<<+W͛ $++o>ϴ2R*\+V'µkX`%JP3=EΌ3hժ,w޼y̝;u֑DLL qyTÇgժU*U bbbضm]v%)) ???PjО={j8dxx~!dzc8u]t!++Kott4۷wƒ_jժEFF_}}͛ԫW 6pMO޽B0l0̦o}={<׵^4 SLȑ#ZC%$T^?YfH$ BBDDQQQPdgB':`ӧOs},,,X|9)SF5㪉 UVUaiiIժUY^q۶mˤIh֬eʔcǎ,YŋǏY`'p9~Nh4TZrPD VKʩSXz5={ܜ2e_Μ9{U.] :wƍquu%++[RHVjժ8(3rJ<==)Uڵȑ#,YP ׮]E,\ebjj$;w QBtwNrb޼y+VǏktP /ڵE̜9UH$o(Ǐƍ2D"yaZHP&P4֯_ݻw7ؿF߿_C8)))y1'K,ёckkkpbŊ1zhv'UVջOɄ,YSVFFFL8OOO(͛7ٻw/&&&N1i̎ʹjjjZ2))IxIHHKgt+I&HNdBR/jݺu\2'Nڵkjֻ+W奮vlٲ: Ν;Ǟ={W%IJJlܸh}=<<+ܹ3k׮}agUQV-:m.]0IgϞ/LE)^reӺuk&L@dd^UQYYYQn]BBBڵ+}>~kah|9;;2GٙiӦ1m4Yl3f`߾};e˖iσ(^x?`H$D".E!A }WQWa=wyG!}{;cbbH"̘1C9}1\SVZHNNYQ8{+_~_ *Э[_a0y׮]Z(ǎc̟?_tҌ3FMs)^Y塌D")x.^X[&$ɛKff&:%A:ߛA]~= HLL4(dݺuϥ>e-Z۷ SSStڏ9h4 #:Dhh(iӆ,zA5v `kk&9r$6lѣGtܙ0RvmzMzz:M4aŊ\z7ҡCΟ?CU({F/\@rr3(O4 Fʕ+ٳ'ѰaC޽Kzr-A<==)Y$t҅]vq |w@b=O:~W7o;#F %%???.]\sؘgOũSǏ篿|d ϝ;wׯу;vPD -[}h49Buxxxi&Y~=-ZB Ջh5k֭[_z%˖-̌GҮ];bҤI3x` W|KU")ŋD͵d]vFz~4m W^Mrr2j[2egΜ9?~DԩCܹ֭3gӦMXYY+yʕcƍlذ(Zhdgݷo`k׮;wyFbŨ[.zZjܻw?NlAj۷/k֬ȑ#\tڵkӼysw\NV˖-ٷoeBCC7oXZZҸqc\2-Zg틗A}۷OgDѫW/8ٳgf͚|駔+WNʕ+n:6oLll,M4K۷"EݵkWիǺu8s ԬYOOO飾 eʔa߾}6G5_Νd…=*U⣏>RsΝ;iժgV"ʜ9s駟4iRA#H0LMM/;v,|M$eoی_/%𗧧'm^zm6nܸkG")f̘_ɓ'[n)ҁm@@pH=7x<?9> 'vD,_𹽩r1S=ȑ#>|KKK4h;Ca:))J.MjHJJbDFFS&88X]6M4ʠ^III>|`ݝDΞ=Kٲe>|%JP^=RSS?8tSNhѢ/^ɓ\|Ǐއ7111\zWWWx""""wwwZlfgUpYZZ;C߿ϖ-[vɸкuk عs'60k,p8M=I%ێ<_;Ŏ<$Τ>v XŽnܸADDUT\rpBCCZ*5Zj:^JLL jΎ+WDZҥzSN•+W\2k֩v4׮]СC\p:uТE ʖ-˗]]dTT GGGbcc x 8P3++sE"""(^8NNNt{{{BBBHJJI&!8vG%##C-<>}4DFF쌿Shh( ** *UD׮])Qα 8::Ͷm ^FqqC-mcmCQGɶ-ȶ52!ېI7J=D"Rx\|Y)Sy$&##C899ݻ|qqqd; `];P( X)`̓z/zbC-6 7o.._,ʗ/Gwnw1^zHQdI2eʨdeeӧ SSS1˗//9WӧO >{۷zݻw ֭+߿/UIJJB#1l0add3. |||Djj3f={>}NGGG-Oaii#;vι\RX[[kll,+c۶mƍ{N``rݮ `4N=P0#;bcofE\\\A\gԩ ,K,=?oС[n/V>|z̝;wDΝ~o;w,޽W3fQhQm61h ;wϛ7Obڴi⯿&&&;vT 7֫? 4:{=.KY%ѣGKOOh4:kSN#7-Snj`)0=7U<##oK2\]]0`s嫯ze*%fӦMܼySJy&^^^0zh9{,-ѣ;iF|OOOOXX͚5#99-Z9۷ogodffҫW/)WM6UCǎ?Lҥ ^^^n:mƏ?7… vSLYfܹ p yر#C ̟?Gr^'N`dddйsgڴiCbسgAAA̘1 -PVv}HիWs<<iӦűl2uk+W0epwwm۶бcG_NʕaÆDFF{nϐ!Ch޼d;wFu밵eŊYF2iݺ5{eܺu#Grauckkki֬QQQlٲӧO{qevUe\_ BܼySUV$S|6\A-|(+ϟג_xQ)SF]ITPVPQ\9ƍLbZ { @xyyiz衮*dddhFΩoŊ:ioo/0#GhժV +V5nXK )"i&ѡ]v|,##C uEmF>T@kNh.\`Ĩ ONN7{l%V#>,X߻wOhزeVΩo'Q#{n5ʕ+Z4QbEŋkɔTsssq=2@̚5KKtRue633SmWK~oݺڝ۷ Ց,rʱeG?CJJ }}ue%o6C y7|[i/_N ϟǏtmll̴i055s>J饻wD:琳\S~dx{{7z %###t%rfx"CAӦMugXXXZF - cccL”)StDEE1k֬\СŊj˙ \_ c}k֬ d O0UVR%|}}Od؎UW qwwzze}]i<==jKNNV*קؗk׮lO.]t~J*zԭ[S*H$|ؘ+ب O{wvvٗ 97J L322} ʌTAݻwߙ8q"ݺuB d@_J_ӦMRRR?~Ύ֭[3sL +QK&++۷oy>DA:D"WrM>k111K{}s룯^gϡC]6'Of|XXXk.BCC߿?E%44ٳgSF |}}9w޾ʵJNN]%Ҡ̐ vDq#ي4$dTV?@֭ ^z*5kFDD6liӦ$''g+;vNrn))) J$$_y^YVV)O)+7n0x_UVXSfCHtP%D=z>@um>#mۆ'OX4hЀӲeK(33͛7ӿF!TK@ 0۷+t֍"Ed %{5k2>zQFoC*ӦMZ|2},YRݻwIHHQDR9r$+W$-- !OYf/uܜT>KOOjhl 'HtP%DzjJ,Ir(U+RvsҥTTӧOSJϏн{wO?LJ$:uꄭ-KqTX#FPd<PjUU+Wnݺ3{lLxzzr:ϼ2}t,--x"uԡTR888ƺuoFdyc))t'''틕ԯ_+Wȯ 777̙C"E9s&TX̙ ,޽h" '''ꫯ>>0j(&OL2e %!!ƍ3n8t3)ƍcܸqzÞvʸqWX" |shݺXwȷ,ɋʉ'8p NNNܿWWWӧqww:C7N.ǧ~ѣG֭NNN\~'''wNpp0 SV-Μ9Cqvv֭[ԫWE1j(;3YP ~6mʁ‚gbii7|ٳgڵ+YYY8qB7p@ƍƍ3XcܸqZ!~);vUV۫e{j֬ɓ9uެ{\rzVK$yAo_4[yzzо$Dܺueۀ H$>{o"x<~r|d4OX /|s{SٙRJgdd(H=2H;" dddYHF:y'cBCC ZD N8~A!D")^)vD"y 1rR IDATɀo A}>ͥDpI9o,S>8_z8D"yYz*d%懳zlwі63 Q`A@Hf CPPQx*` u>Ow~Z=Օ:]kb@i2U$?ҿ*z̝;uB0uT&O|& f3j^nswN9.d/SN9wxcdEg\0ydNڅu0w\N=gvTsoZ__p "yq&5/31;:h8ꨣ뮻S.:gsPRΚ9s&p 'Z:ʉ7Pޏe8a >)SFBatI\uUf+or߀3չb0zB`̙qziP čBё׹QEI&qwאSn&MW_ a++G!(zqB-3gu;cM> WV )HAF\O{3mڴ>WM+M6ل;s:$q)qNp1+7ޘN8A[X '̙ 'oW\A9u3397sʒnɓϝ;wq&l]M{;˓O> 4p[r:6ʌX~3j3320X7AY]Xk||͙0aB*,(ݹ{x/7hH"t:_!R*ٵ hl:>Eon Y.B! |K9Ȇ'KrNær!B'f YP_$+G@rD:ȓ!l)#|G=RP$!m?3RFY45BT#vFNلo3>iN]cXAPFS9bt( !!O2t45誵c̢qlw&r$tIQ<LG*KAXAzf#9BpoVg;c)n yy潩YMzu.6~;evc3ȓwi_d&K3u|1/7pΠ`Wh |) DG*<UPn,6M;;IIM|heH-()O!:#ΠyױZt5O% ,1f]i$!fP| s9u` Ϋ u,ABKGQ*=ot.S1FHy4*,*C:HRYUPُ<92+ۃ\)Er$SViӾ,1<7k}^ RP% Blr*ٵc9p^àB|b!?o|sgO| q2>UPX tIQL39}Kv!)Cr$"5m6X.9"D9doMUv fOV z>uZ7A>bxj9+{N tq2[ߘx눼%rur"P3%5{zұֳg;s #v?t ɑ}S qr/S4f՚5Bão0jis9 $o,+=s΢gҼ}]#*cDhT2DrɑYԼH־~}!"9#Gܨ&_1me+FuR;7u͕Լ"o#BOl,% f#-3OdԂ++C͌g曧ɐa6{ `5B]D t l4=9 !Z9}ΓF){eeHVf n_!DyH,C<S4/R$CH2#!#Fge|Ԏh0dxՌ!aR B` !lCrD}|SXoHpC"/"DgrC|Ԏi~Pyj1B1GIFE5!BO,Gh߾v*ij#BKlT_dZ-HA;> #BX^ |5&<9Rd+^!K[r2dPZ#LPC DSL# lBt!D'!0xrHAMՆh]'-Hh$G!BNrHA!% !D͌@2D!9""CzT!B!!B!G B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B@ B!B`l+ 3],4bޚ zݯW}FfQ&w_B!A`4w_I=ffcfW2Ah~ _B!2`l_39wA<֋zu*!}قfh$g:wjgM7{<3;6kke|&oY 2l e~/د!:nag}FcOZ@LЭO#/x'g)??r`M`wkfv=p_"m.kbp Bu3ǚ},cF1?т$$UfvPfPE |'~UDiݯ!$Nv/8*+Ϛ)pW606-XD3{Ϫb!U z1z!|Oz)g{a^]࿲o>zL|B!Ț>L˄efK"8=:2o/ )B!Bv: 1ąhOGab@('D^~w`L>B16ۀ eE r\w\e ޮx _c=}KwQ%2AϹ+_A```LsuyU %y, <Lu제%ױ}twfgU3[.4ݧdί!S'<hX xnn^Vf=K!4.nf{5I 01tߪQ;Ձe a~"Ȯ rui`wW|v$_`R-ޏ&dZwqa70p!p?*LRw>9g ׊koR-Y=>l_.[Ϝ2"tvL ~8_L.Ki%[햔7z:|ޕs3*0'ɽ؇{CYW";+{p.pI;,Ct³pߞ-FwB Lw?de5mUڧ]-[8 âx=kݟ1h63[\&<}z6S 2|Dqwz lvM(}|M<9XR U{RrK]s<49qEq.O-IrkXP^!tj)k|uf_Wo~# _8I|@P˾3{ek(g*%EI&]|E〃Vy /yNooKHA^_'(R9=qm<]{[a{ 6s+Af?[⚞#rXd޿үoFP0JuiZuz<@6#^̔,pQ6n_0uvz3eV;`A~[IXlmvNa jLъ')ey밂<)I9:%?LA6 6(oy_mQǾ+'ۨӽyFkv{8X_kRZmJDM̧=I]^OxM$,3Iw޳e rpdO30BG˺vg{`R5kZ9:uݽ0cAf\ӛ[]S}܎Fo2״/Aa϶s y{rM gf6ݿޢ JxY&k 3~&eaͽC:H.F3yvrs6n؊]dޛrp~Rr{tAe߿pD` Oq8KRHې9Q-Yǀjf_75g j*_NҝW}ZFSP'(VD%x`߷ֲ%' 4&Ӣ̠/e˹XD;>Q^UlxSD;A-r=*̠Eq6)Q̀ha);G+é2\i|آEe_A-9-:T+qVsqGK[F O1\AMhd4JӪޜ`5Ic c5=S:NEXAM/Te*7,T"] w3Q`xZ;Ǘ*N#Qtjv(,_"j4͡I f&9?"da6l'ȉ -uB{g;EzXĽXA M fM[ |N4WEiN Ow&qbS1-?>I^)_٬J9>娂x7)-7k\XEAX]~:J&zA%)G(Ԧ?xcɺ}3d6^m &Yټ)σ|2 ]-sv*OAN@xE<ںEKizLq_6޽ΛahKv) ̅n4;[!I)ov:a|K^[&'Dqے3V;eًƵge0R_(č/ypAXϚsɞuqr;Vbv1H;]Z!#"=55:& wBN*+ ;FٗZ`>OXQ8v:gR*}6Q:`V'݌$!-i!luR*fmǝ\xPI~ el%ʁSylqYTsݯ xe}-K$Y^ 9RL|}y LMXo;滑lf`nL10 ʇ#:g黲-ApLmAg=/.nf?7If{1ʟ G)J6/"V`V+0ĝ%rc WtpYwu({yW!n.f@af?3ݔ= EX'  IDATn0#ڌh~zU2;TXb mn]sga ew,̮ۛ᳃URYc *dkg O8#zNh>\v6C˾A0q.3;86]ghGmfůӁ]+:`.~32DhCl7FxD,OB<~+(Wd[`f E3Lp+~T>~IpSiq\3[xw&qXVbH!;>7ƪ[ꤔp3F}?IJpm NMLPvNU[Ȳup;lEV>tXA}'frb/"W2kc4qf-L"v|WB90Cfs$0̮!lYurb-Ny1ۘ<M!>cfv뮏m lޕ?_ ؇jk!lSϊij%q8E3=3ɟ0 A=@ !ZD2@qq$Jf##Lpw(_ ((ORP!;GS^x*=(f`ӧ=V Gq=IJTpV@AVe> I'TY;^ny3OSe۸ǝʆJk>k&ƵCˉkʼ uPy/.IV p297x@uu`pl,_Vsق33ݽ!RPGq\UwilB)UCBY(mlgs'kX†{pg٧J ku:2#29:G6qs<<~m)VS3n'5]3-wؓ ȣL{D+l3{^u v齏[; bt7i{dɑa/)fJm|:e{3[{U$f$aYflȋ![SNG h; SUP W -Ep<Ѩt #M)۲g̖qޔ ) ܚm'+˙~{mF\-RlG))qouQ^#^[7,wѨЭmfP?v e*Δ֤F> xmEyoŻh\2}zĊ'[ $' \1|xen~,;LJ[\]WcoӸcSi\חΚOs_x w$#VzVL#ҵ{X'߈@lnȉL.ާrRGDIJ*nc'4eKV[uMvph /jLk'8Qw+%޾zV"V{l\X҈d-e\>Bу{O{X|BvW#dyjG#s֢)3(e.~Ep:%: f(^˞f6`6-0eI4*Qwͬh 6Xf669xvdnqV_3̎֙p,.f-󶬼`f%4Ja= ZG8dWSl=Qyq0 EoѪ.QYѨh!< t";ՅrN3k9SefK &-Ҹu۶f*^k#4/sG(7[׌6 z.$RPG?ý=f#fvix^#1ys3+٢כ3h4:p3;̪] ,}e"[WѸsbeМ#epGefVjVu&y3$}BJ$9' >̎(3hefKY+f{L*#w+df%\-11f^3ke}?fU;&0כ̬kUN#&HY(mLjVe.bдIn]ъ#iм)+k鵬f3;nђߌ7ͬ^f]P/&.nfkf%iy>C`ER\[(E覘d3@ e\^&t8]d 3;>iaK2!|Yz.,c@gH(WoN,lMi SkQΞQ_j/d חL⭑\r~IL\ ' f#Yˁ yINM%6<3 3^&KXgV&oـ3.9~s}'w[<<"/EW|F95D Qx{l ,o}?'xEulSeq9ǎy?'لqv%Z&bC yI7Qޚ)+=گ =9"yqg(g9ѯ ~cQ%xν63ac/A= K?ޤFUឿ!J{z%I4+}`YI9kzmW N& n@hקi׿M;w7ŪN.f֤MNNoBr>T`v[&7缇!ߛ|.`ﭝS!Q7{gywn9 } :Z<~W@GӞjSX φ"46YRY`XTg y`mfFoO Bza:kmS5IS{>繂BAMk|Ёs69iQPg7)付sN P ƚ= |BMԤ?|?~$1\ ]UP|f/7TLnLc ٰ*\S^N5Ihw) ̫M*I97'f#+܃ݍɻ5vVFA Nhk'+ۢz:="x}$B3]2<| l*t0.)`Oh])W62=mJ?}ow6;wa2kw<8Bhmج'r^>iCJ+"ցy&.-.A.>zƱ9}޵w0 -^e<$X1lw%a\WJ%]O" Z|O{l٬?6d%/+${RgH4:+K[E{.Cp`ιy+' 8`p̳fyGB#6ٟBTBE} 4:vJ*]jwjޛZlթdkgAOѸg=>\6{?0H0w3$΅"KKikj{"*cY%o:?ckg JϽu}w7~"AiVaK$f,NxBl"U6Ify䇁Փ+!F-ӓu+ 1$׫ ,)[&B!z֠ >D)b>!UǩRN(4*RW!D*D 2A2|W!F+W 1pewNoF'/j\(RPHht:rا* 3/= Cp`2I9_!l11jOBAEњ #T!˫auBB!#_!g sM+"br{ %T!L|hMvO~!K\ !)aoe {xNsM!#U!B!@ _!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!T!B!~W`$`fK&z*9ǂB!bD+pp0ܽu(l 9srl,J !B!D`6pp-p1p?*ՇTV LkB!XrT]98Xo|/|3wS #wSnS!B!k:MK$97 m|x7`7&Boj(B!0~s&oSݯ]z̶voztLw'B!BH =?׫Ua*fpaiތi~W/&B!"Xˉ688L!u jM̧c{/-¢a95B!BJB9Q>7bMU m wi̝>B!B>af+"x 28 8b&Fjf!(FS!B!|'>jd~^f?hTN8X$T!B1? ,;כY5j3f65aO,rk{^)!B!pgJdN |җzT5><:u1?Z !B! .׀czwD*fA9]!:C >B!Bnt,`wWuqkPl?2'`)B!BQw l\VU]FjfqJ!B!F6<,י꽨Lj15Qx?ЗJ !B!(~, _j#AA5k%3 B!B 38X.|n:Nx_3[`7^F+T!B!jݯ Eؖk ޒ ݟOxSΩ=u^lMiE` `q:^C qΩsBz0Gw3QndZ#'Ш)T06#({1[Á]Q#%suEBԇ6v]uSc! '9OU@~QM.D[GYIyTW9Bw]ՇOqSPB12"pxf?^gAQWO;f<2fؾ=ZT3[إu!i] 1*YZ9e !wgfEp\~ 3_sdfѥ>U&OեS&ox'I-ίA2|O ClF{fW&X: !B ̶25er}RL.7̈́\ۖ#P"ur y;FN3~_ lֱB!H/1Á%A6s9Mb`OKH?S>V9=T`"HA57E_١ vS!*rZ󾩋y|LuEBt"#3(Zp p=0<0^| '2,w,ӑΖi-|/ IDAT:p{]!'q$Knfg=kPli2Aϸ>UN&olf0<@EfV4YI᷺-j9aLuEB!2N05U_߯LumafcwFsNgQttiw!BƗ)5m6Kw'()cTq Nw4n,Jfβl]֟lf_y-B!Ĉݟ~ ZB'y]AY~UKL.AyiK6SP. )h^Yb0B!#2LpE_$Yaݧt/W1ْ|y͈Ί&]̖msI73Zbf;[</, L v˩ 3[ x'zg敁% [<pQdK] .Gx&fw>~G)Ff6 VV%X< <\\ >%Ȧǒ&o|?&C I*|wlLx&1x:087q2%#w~|9 Z pT xqZ?\;GkLrο?VP﷙{ &E%>~Z39s'^wn:nYP5_5lJ]Nj~+qXA-%2`vwAK;(Ԃ{y}IyLF}>RLQ:^Y -BXrsw%;]X IͰD_X>[sb|>W9qw.) v}/2|6܄С2ƽj15uy\Z1- τ+$_ 87}턱B9kD2cz1p:VZ[af 9~z&ɸx 5)"/n|N3Idt>q6=4lC髓F=}*#a;(I2ڻCΩݟ5h4E3BeJT>Lb3;9g"'tW$4]6?p77Ma&k3f6Pdu;"A nE*>ƭbf'"/Nx&m6_(s1V6ؖjfe]_0*of-xu3>tAIf&Albۅ_N,ةmԧ p;AV-@x7? )Kc3 <\D0ߒQ;QcfT(乆࠭[L˖VE{ NB^+"Uht ~/u?Bc*iND<'=BYk)܉!jg2f<"xe-61y=so "- u$,,;9I! b]E=X*i<Ҥ>$`tA>uL;|>Rt}SN/k6yhhJ`l#zY:nS9x"P6"8,*wi|mdڅ0PP{ک:t`PY뗉NgrA yow)'uEqF 1qG6Ef?vruEefiOV$iQy1ϱhH3.ߘ"u>ݿM.2KUjnLisdQoY;iV?m3B}Q80SbAQ#(~~KlS ~]gO[-o㪂({c| W6}%/&(,dݺB 4̺%C2KR3=FffufL ן)2teAXj}OYw&k/#ncfs9el3gCSjʯ2fFsN;p)3ܔ6^i휻hfo'(ks D>OoY0% 7`w:jJb?)e~`f2[_Wc~?wOUI7QsN̖rg Zp|#hVްGpf!YEݯ*8ed_ w4aMj$ .A񯲉AA}S=z1W(4ݟ0h\e٭*KKK= QUw̾( G}VNw (KK~ug §RYo_cfLc7% %t^ Lc aˊVlJ;K~}9Gx e}$}}0]u~2`47S 1;PAA%k;i4KGƁX!ng*(=7Mݭ _fO)"sI%؈y4߰-3_3{Ή&H;غarSංsv"cry6} gv4qYdlh$dw7<1hŴ<&t6/׺CѹE)R7]~J}`rAx<K5ؖQK_Ob7K?2*T R򭨄bH|dgLc)PP׋~o{M.߬I9aEOSfP7LkFyK+z h-fU.L3[ C֬o=k 'n2@ %I=Uxf<&=q[G*k; (Z?\ oqۨOӤ:ͧGEFAp6g-7LG3RonU^a wIdJ[cyE|yy&E3l2 k|92Єbi{̬FRPhf̖)֟6Q}&scZFvk*Efsy[1Tuq1k?{Fuq{ {'t$$$BBg)$` $`B'B7f{1F3ZiWYhX+ku{4aCiǫwݿ6-lZ̖ uHޕҝt j4w=iB
rT,W2krVOIEDQ<׫d? de|J TqJ %%AeO!=t}J )FueJf67ե{#=YM@*mIWM?ܥ9RR5x*J0V7#WzOif\]{FdjdZk=mMu CԺl0ҥlzZ^;l&Uf6Js' ϭ,CU+K{>pzbxNxWz'[6sacmHP&+B7p.lx//HfUa@3I>?06s%QZQCj5N'[7Nw njۈ~: AZYZҥ%{TC^%u.̀ K :p̖'yRqUIkϵ8axiJ $Aejeٔf)ݏ{"R07pSǐ;LV$I:R3NОys2?YO[_Zo]+FKuF!"ޠJ;AzK+ 4SG/xݻVDOPzOiK ,S1*jJ{S^vCR6 ffiU[Mp"A5'Iش湮tP~)/3V5C=C0Zf$CԨmP lQ mJ͏NVS'SZ)H^.gp%i%ic )ӻaisI71պW L26.(A}*p]jpjp_)Y:""R!%>ӗ>/mW*U~bwqI!fLQ}6V̖&R'kԃ[wx&aӚvt}e`XHNP?&$ZҾmۃhxoo\ $ _XDDZN|/{ UmSݿѺl7L[}"6qH$lFUQ—ktD[gҽ`>7r0+Z&yi!Bn НߛfpN l]xDDZOiJr'Jz/镱RڢO١f43=_Jk<\Hw؉vC+r3@+O48_l Ľ|UW* V&F=ZsnŢ6%}Ivwq6sEZnJsP҇K{6S= 7Ł5#%-"2(Am ,AނJ<Uwjf;zac~of/h-HHJ]m7Ѥ;&JRod'E[YvIYM^!|ឍZFKT7wsڌIj6w6"-H; 0vr&aD;w?=Z8mo'why#CjQ^D)Am +ܯV ꃄ?C{o|_OOc݌𥾧/]-ݟ6 HRal8p`o6"wO{|3ɽz։n8ӆƝ/TS .œn(I X4W`fOj/:,"#Am=CIU}]{54|O߭𜕘 Ċa)Z? D^ΗFgrp5v"y]SDio8_0^.|O;^n{բ/旚5[PӀǀs5 ./FǏ+asW aP3V&91t6T-IŚ|g -I aGN$m}8E' qDG-ˬ IDATieSg?CCED*9"""TT7s  Ie:~EDD4913{ Ir<.2>˭,""ү4բf6FXos u kԝT1"U"""T7O:4#ǁC{y8OQl"""}UDDIqqr3kD倥 sOG~ |L:p0ݟ"""=S*""҄3&""4UDDDDDDTiJPEDDDDD!(AUDDDDDDTiJPEDDDDD!(AUDDDDDDTi@Dj̆sE>/}FFቈU0`Y`)`vB͚60?0IHV }AHv_^pj/)U$#f;?:|nKym_EmxBz?ݛxDDDD(A-KHH I>p_=4݇1;Ps?n{݄.w """"afQ.S&Q7~ DѭMHX$}Of: 4 %"1QOVz˙OHjR58SL#T[ %af/֛D/""""U05 =xu ń!w(N4:ֵ]lt x̮-pwJPe2ESvDDDDIK0!fK:WRڣa}ҿ%\}ϸ` ``y$71ݢ%DDDD(Affãy/ecXp{3Ug]axj/پf6KH%Ҕlv3;x8 X4y poW)r)~oIY=(X 8x32SDDDDbJS1Xw |9湀ͬ:""""RJP%sQeUcv-33eq~2a-wc&DSL<TɌ Xi:,Wg l\NQ= mj#O}v*>VU2afs6 rCL'Mٽfr6щ%RWf6 <|'}`gwݟ&:}"]BGMY͚Ip""""-@ ԍDλ?0(j |/*6JE~ 5ODDD)A~gf+]eM5}_w4Dz?r=uM Wmfl6щ4'%үl—b{c&R#anئ̀'W&"""҄J09ZL`h:hY+bUf6{6щ4%Rsf8XzMd"qMb~ Li0:pyyq>3;,DDDDT3'O,8_π/!@pSJDDDd@S*U3k~/li~6uք!eHcP*60Li2$u"f6FC~EDDdR*1s'/_eEw;$T0\` N}HR*=2Y Mk/amKJ5EZ;כوlɆT)65_eHk*^ 3ּ5p}#DDDDO 2%ݧfH*_K;hI~HkQ*l~`XQ~{Fa Q12: LwplyE?fBFaԍT I `Ow?!fv 9l)3شh`_`fԴ('uJPm5M~.{숙}@(г pMG?EMswVE%"""ҿJ'3۝Pe3` w> /D=f]  q/jq| wP,{hf.*U0#R\]`w7Օ =oY5u*lfَfv=C3BbZpQq;Q`b3;$DDDDࢡgǚ_6wWJ"〽 !m=|oyE{8+U?jf/afp %Xv1a[cV~6QI?hf,p`AJ\$ f]V`P`>3ݧgHmhefme&w6qu8.THsw bͻGEDDD \g;_C9Ԉ lk8+DDDDjG df~kݧ5ِB3 b`ɼӡTd`;]Q .3.2i6JP[kkpi`~>XEff)OqAyg׼w΃(f\ BqpUѺ"3Ś;d4݁GsGg4 %-"*s {=`Is_pk4>w <5 .7J7(0*") 9c C& y좒5Q5yriO(_4.RNEd;ES"ҍPoeqxL°oUºtGf42LEtKD*36v)t@6KfߜUe(U(9 bUӓr0Br?"|1a=9)(4FJ((yit%`,JxoOOkql4$ mf68k"᪫6J|qu EA;΋4)^t_T?ވ-]#C\u+|ކ&[JWH ."99x*%@u~))a)E)?vUE$RbTO~lwO|C j|]Z zzbXu-[2A ƚ&k"5! ! )8ݟ*iX7_R\JwQ<҄i3; 83[y"& |:4GM{wE3,x)$3 s= x{O'"fxy/3oH"G j;86XAWfNy\FHsKb0AYiJ6qtQp5:G |x6m眱8B jJԞejBɩԆff[PZ~wU<밑!GsS!QB:bo] a:&ߥ}`_sfv P(330,^CN'|ߙL ( "T=Yz-z.eTJf棯3;y-9722v̢iqJP kݯKۿeuj$Jn)"(갧sxݟ:zr٭%g1Qu4_YO`@u\&GMVޙ3DM+xz{,gLCx6xѪDXX2T6AC W"]o#\DOCs\)Q⍣xTƚ>%. 'mhX"ӫWΘZH(4x7e{XhqھsR\B諼N}Sq03^E6)B a,a  da[:f`/Ԓ=*R#|i:89ӈDfHN|Դglf2OP#wP&CMS Un%$|-m=}#-m%k4_;W y}x;ls!y2-{I!.K J|9bu'#ο6_FXFeNw!b?G9c2%,0aHݨo21⩻x%8<7w8){3[4xDz0dJٽxkrƼ)Kq(y~'tKLX@-WDhiqad8a{rƺe쫜"੤$o!v RA悹)_y} u1 ka a78:&:l?c# ED oǚk<5vy o<Ӝ1_tc .1Eq )c|HH8{3!> lw>L(SȮWb?|9ι\y!@|Bc36EU'%%Rw`~0; <ŁsO(&R ms/L֣_Kdqf?7-=i4; ILPΌq'i+\N9c~B`㽶KE[@GCԶ[θ=\O.. ,wz33" =w>08g6}Pr.8s`wx'g\L6gܑwb{!g IDAT<*F)=<ݟGJP%sJP-OoPpa˒k-n M}+`w{%[k:̖N_DCy:hKq9c!9-L iݿ2T2}ux??7s}! #&By9c`{ѱEN j8:+=IXuXp0"a.$/$j$l%Q,@2KXD_soTٵ07qG3zV~AiSL~5]~ٖdn¿Rk͉'ƶͤX'W??x.nMѱEN j0)p`waH k]JkX4v*aFtXjyVӀĚ5rHcHLP >?JO1;(Q|(=)͝|Z1!v=wq-{( 0faD,S+kmfb>c}Np0O/ppf.s cj4yE]lQfDCʦ0\I?Îb6t }F]̖wW3Fw><9cő[^ hM)'nZ)b,Ux~E;KB̀^?kf1 /se:7~pbB!ŵ`opnHPj="Qp%4itH19y0n)H^:p6Wsۺ-=U=h@q οVt%ߞj3u0 eC4-B2V(xGiG .,/5psq %Cwow'`l|l`YZC`9801fY8ОwH" '$wx$z8*8q-BrO;ODEe{!_-9ݟJ5kCog! cw,Y"k_*gl #貯SQD(WxwT5X$3JP3bfK?5 S(I0Hx_Xv&' F I6+}Dz0\k9F{#]?Fj>Hh+?v54n!Y\EzLP#DtQm3(:~^1gOrRV 9[gy=ִN 9cm`X?̓7i x+S)z asrN5 g2<@[8σ%ց2 b+qI_,<F<i$glN,D>W(Z{lX.pJiǜqCκ_;. e_xNiOvAꄞ±~w^"xAqc kho & ӠVV=g2ST)Qy&g,k w^K`H5j~ ]ma戆=Uѽ0ilɡC鰫cGl A:zN9p r9{ ) K{Ma}bʌe/Tۅ< Fb'ƹq:lb2 WWvIE&I6v8PpetXÞgWS,1~)"w8A;RZ*HP#NdoV0CX;y矕V 0(ۻ647JzJ&\_ $yﬠ^N I&Ҷix4%uff{ǚNpwO_*4M) µ=0$ :WAy,n]GlǠqv9m~A p8ZD:mN7ɶFj1?0v80b'%j{Fr+:8c8qo{(.;l@(AlDj0X[⡾aXy_e01S <0'ƹU9zpӢ}͡^,?.`EY8Xgw c[. |㷜npw$ջ8~qF$"}|}xsr܄5'ƗTq;(ߋwŋIo%X!Q`acࣼpdޙP.A$ԃZGf6Nt0m,?7Ρs2sݰmOZ a#Bϟ38e!!p N }ciOsIY9:`Q~h4j΄|eFg,ŵqXA*u|Qѣk$AO \Ɍ&B5,t*iy磼\y䴕Egz %9ttH_6 ɍ|U=؟% -T-{ p_K/dFpXfB,e`[V<Ͻ59K=KXfp%S4bÐ*#Wb(9Y8:5nlA-/0.H:(SCuSj]H67O$dMF>Ȁ!ubfCbM'G=W#K/w_Մ1ֆvk=`[0hyX8+D=Leἀ!iBޅ(.](Kz0ggj_>g12*ojZ*`.NqHe6""#%#+0>Hz#|}Vgp0#؜\6~@sePFzTL*Z٭mev,%Iqw%ty8+2c1cq^,k-W~=A* Br:0Qm"ѣnˀ TJPgKS LftVo59՞Y T[y'Js;v,W;9=U7y1pd!ݪ._%q=O0DN-=I5C-%ݕd3ޝQ*oak|Դ7lУs֛i$+&藨DD:Z*]Tc\ l6/!l8ΕO1AŁF&ԋpŢtVM̋N啍򏣪hc&19<ڧao`= [uyXboB9m{b\B1ApG5#VT5$8 ZًjWPZu``ф,Ci4[= aib)$V^|>59=YqHP3dK-ɏ).r]j4glѣޭ9)VgM]beY8ɾ 35e6 u3 @(:_mS0O9Jmkm|Eļԁ2pFu`lP39chsV޺bl,?JO 2n:۟P9prOh4ķ{rdIOnv椃h穪0 תg#뷏&DzYZ"E~HPz`C U& GzGE@zeҥI.d9ٜddfd~]Wș9so؝g+AΚ0V˺4 lH7K{8glKq. Yivm:h໴d~X/=H?Go-q#ثsI'Zu!:iDlKhSbߴM8U};9Mɽy^MH=x43o4]M'baI蟉+"^!ɢW{"V|dUhաD~+jIt|pvn̪ 2${].+K͛syoC z+2d^cc?f8{NK\E>LZXMigxa8ftpbA+"?%m}>A15F$6"QY3=9+i$*@JkѪ ]dNZ|JXX\Qff@-M8kYj11V |8wsF S9`GۣsSIKt;kmؙdoFO1+82>U?榳|OawՀw`8ߟEr^KPWIG"myZNN*4;b`ׁ5~i)p0H>lx\(!מw7Z6DD(b /HR~W ,#iY9ği՝-HV~ͶɅdY;vgIhfV\WvH_#bZtįv6UK?Yؙ^㐸c۠^.-ѻWtFJ_7/c1ᭇƃOa,Ucb IqsFNaV!)΢3ؠDǒn{eC riׁiUGါdpl$[@)Zvemfff6 A$`L T3Ys,tm0)̬sHZ40fffV=\N#mBDt/e܋jfff IffJfff6yŌYq<ժE]Ǜ C?(_o=">|z3up\FR#)5`|J3uO138̬8٭f"b'`>:u-nI7KZekfVk\d_;-I$ X-s$ԱlDD}Q,yD ^8̜ &|I_*W3j[ 3+9wn)lHeIdjGsy䋈Έx8"J2K/S%c&3!j[ 3+9gg#F2ߴk].VFD|DF$K83*x.PMҏ[~;0>"vWI6k`Dl@3M>x.P &I"bb~&"n `Z<p܂j\ TIwg֊[I58Ӂ-ג.Hr53.P7Ge_;,3 ݒ1`L r +>ۇs^""iއdKffj܃jf68A"iS!`LIQOJ: )ڲ Wuf!ir>Yw!@Y4p ݽSoEĕezʋ+qez"b_ǐN%zI땡 73*A- T3qj3%iNaM8%"ʺ]I+H t+HR43\ARДirjf6pjJ^M:*T$^4meWGf~Yff8O;"bJnĬl iLnID2aqk{.I/sz$G3JpZl:)ffCXDS3MkH8hk<-4r4,sU Tr=_U%YT˻@Q/5sh)ҪiqH-iK sjq?iV &؍ag2zt/bQRjk:t,v"̆8iWhՒ_8-%&WӪE\XLZffC @ɷĬ\9I4;PN*EiN `I`^"ݗwc$)M@3x8Uq\8Ml9}ƵK[n } hC VMhh: 1HM1q#=Ql<1Xxhns>մ: \CËD< KD4#>ac)o#Vs{B+,x$c̸=$ѱ%1t }ѹ[~^/ Yo.PdߏL*:FKt :UŒ$~wW96,YF38.PkcľSc  <q+뉦uNGFsa9  tQғÀ=!nEc\a|4FC Jj}ۖd%ZEĝVW/l1+k$`FΒV\$3|@-Τ̱ Z΃cW&y:,;qO1pFאX;>swN^}~_4ΊUge'S"b\qrƏqxZx4=3. |(ff%9~$)" ix({36ijkzXR/iD_;Sqzp4w8sX1O^N O}iW][3LFRxBP` ~+h}Xd5nHE2˵ye[yf6@-BDLA9rd >aLI;@EKCgDː-Pb>y( ?`q^ @h8s7΃ ZB;ANZf YzɎ@%#6sڿ?>%bX+<jZ,sTg%K@DsfWSߪrR {P& :Cs"TWil3XG@IjSѳOqo`h { ͸UAUKGt*@4ߡ'FBC| {%~a4Gjk416&%) #fXǮDL`?k)ޚx! _!:ǂxi±@@_d.~! |ͺ#|y,]ِx.Pk@٫37c 3yk izތu޸“޿Z}>9~g_NU$W^V.P™9Z>9U~ٟ"efV<ķx2.P.9߳Ud.PlHrZ2_-Е}߳[̟i*gj,=̆$Wx~23d_;[ ~15KC|lsZ2lZ.s|n)Ҳ{OK+;w~I^k̆K4X>Ko6gmf6$@-RD|t K,c3fY<ܙ=ˊ~!ffEr/{fWὒ:#lz/s`n)jz.@^皙U+ӻ@53ٓ}|!}{L@'e*gkZ]v|1 Bx+1W3s:8^(̬8 ԧaC/s,?Y~usy`Ƣv $ iԅ2DD̬H.P'[[ 3'[>f(Hk ιǟt0O{O 7 oU=o{Ys:8A53+Nvφtmnsisg7\vzגݫ8/ RqjfC^C~0y cf6Hbzx%82Z:ٟ[03L{Qf-Z֥lޢeY̆$b٬yx<<̆<煒 V^[.V鵜 BIffcRXno5Jb}`c@53ֹxx99 ضLO5yTߥkW3F9&Sp}4?p`zr<,3HK7ہ[afV g}͆ǛB43m9~ V [7 0YmM'i!Jwc 8dKybe{O=̬ʹ4o[ 3}߳ED'= r &>$n\R?ìdvkfC ȾH{zI_74@0߯ch.tFsag`Q^UJ\xםmV4ݐG3Rq!Um$~X桘U5H^#ލg cU` IbNNeM4..R[p`^@E>O)\P3=kZ2ODY\@DtJ!6̬޻sKaU#">xIGgE\)q3}4"_yf~3Dff9ětn۪1too4`'E#x#r9Xc霱|ܡX>% fI HC %/Izɪz,wҽ"Y\^8I_-Yq $lǁoeS~EY%m_4d H."S33˛ KV+lc3mE#<W'ftX^y̙=Z14 П^d,}#bb̬j@-3{斢6SXV_??Y+s|sg$]N'"6!\]$ t[z63f.P#1VմC{f5 xFm}$iA`LEk֗zJ3%#GV^D|{K]kId2 .YsZЛƇO+I !%*P'T^X1`L]>infV\řsKQGPS+@mM yJgd_.,Ykg7R.f""Nt>NҶҙ況"bJiSU's еy F=G۷ִuœ155{3{IvX meN JҘ/pdJE#bZyӚUenL^k%{>/P[WT5 \Es!{zWR88t?"brD|䃹$-I K>?T.YupZ^AwEseU{[MmMV[ӚjkYh5OmM_V[ӷg8ʦH^_IGo*"N9/H:x fg#{Y]t-0 dM٫j}ܷ_5MmMoW2X 3s:}02,g_N?n1ը_Z $A;E5%"MWދ<T$Qu8UJh.ܟw+#bjnIEkƓ _pD5F&Y8ਈkŃU9-жWZͅ>w:)h.w Is;f<*"#Lo d繯%IߗXIZIHBdDUv+853K@-{79HZQ7&ُԣ;𧼃XE J_ c#">:2wo !qIGKZ9XWҩ^$)L 9S߽HzV e&[A\o$o"gAFB``h.o$>c}jDķIp\)JzYi4V}t$m+-!P`^6tDW_ }n3Z9qеQW$-nCcͅHWU[HL Nh2Mҗқ TQD<~XiKiW)$ u}}IaZX$s<;$V.:ާmfuj| h7UB,RYj^#<ØD$;OnGRn4$Äɴ :(%">EƏ$mK2I;l3W[9مB,3!HzGoFɊHzjɰg[EĮqLΈ8d{%UYsj\4+K:"n9YlO|ρrb6K^W_Hj%ƻMϓ }oM*Ւ^tZDWk .P+$"+`hd IDATՊeϟ^#j$sVG|kqAs̪ p%Rz<mY*>OU+T:Lj,pl\Q0HHF~\c3<"&/u3U ":C2MH2<̕m$p%Z‰֪eH}{ -Q[H]tU81p;٬@qpSLI}--q=pQ 3-?S(L5*f6D]W$,Ù5ayhXX/ӀokZ:oLq,f6D`WIKzͿ0!j57IHmrMT -q'8X`EY% GKܙw 9hJN1@D"IvIߋ;dfC|LDLi畧|+Q?+sR=; E須W3+ hԒw3..PPIՙfHK|@KL2׬LKFK|wJ4=8"+^D'tA43\@^?!YK/rRy-`g98pBw ,_3+</i#Y*oI:hM~"颈x&X''ѪŁ5I3<coso0:*kwhŀ$Lob#ݼYy{ KqO$+ ;`_`e88@iׁQ΁Kt;DsgCt~xCY̬B"FItnD>Ij6SfVV[H4m$iؐ2;{}QI+ɪ ӛ"[:ՉxwVΗ4"k] 3qZ"~LScCKD\A{楀%mO*Vk˯3MEcy1|Dv[~J2I"i4p5k k~2jp%ɇ}#I+U6CH5Ȯ0}˕I挙 HD(i^xLRs>,'gҽ5Q;͈h&Y#ˆ؆d;~20|fVH9pRt#Wbl6Ikc$=VҽpijT6.+kķY(՚ЪT3U.P磍kJID|PoKd/$#iy1-VrH>NjQ$y!p4Cu]vmF Jn%9$ aq?L$WM d{Ld>ɬ$NI#"ѼYYm| V)@K|ww`|Y^\}' cC_Dd~sv%ױ's3M7Docfe7>~-fIfGKLKo٩fV\q{oM_ ڔ_*"+$O* I#IΑ6[ 3b`~_zgYrZ"}`7t5~jVFD}1K3b"`RcAm7M?qߞ̺yoMK7TV+"Umh47{$D$sJFs2j'FJg<Oy 6vDYSY sj =+B2mᾛG|Pp|;sס2)OnI߈KefU!SzAik>|:u!җ(4LUfʽ5."/Hj)ՐZގ}'YIi> "n)U!=5ԾԧJUh6m]kY}pZ"?4m\ 4 "X݋ӀٞcIU?!՗f sdfժi r2xj :'gdvN)ՠ~CLb'U8)sqj^a̬5D{n7cTlsZ_&ˏ$W=1dL'Umt<=_ S333AqZG"";2'J:0HV"-2 o%-ɋ?4 /nfff6@3N2WL9C,]%zG7JsU>UmI;̆$u("&/M /՚茈ClweK!IK䑫H:=B'[GPNE2'H:ëZ)EĹ$|i^ xT:$ t.09+ߜb :2?.ԔO*EqjynI'Вsz5ADO*333rZҞM;3ͻ7I#TV"Y~4.Kd^o[#|R T#">2ӼI|Z~ pyY$/r$- li."̬<\Qv4<(ZS#bw^w)ibU%IK͙Ӏ}"bZߏ2333\t骫?羊ˑrNFE1L$'-O!iIqTa}N̬V@D/IbH~~Eĥ@vڥ%mO{Ӧi~BU TSD $RU TZ=?ipϒVJ"0I+2$tpݯoF%3333,6K$R$'-."Eā$Eig殯JZ8d#i9warVǿwtA4A!H҉"HQTAHS"HZH̀ U^%!3n;;3󹮹fdwg󜻁 ͏+cC!F$Wl?KJRo*4/SVDZU.ieŲ KUIx(~O7zN_('B!Dzatn\ dIz<8>})ry^vIUmHF_ss$PҜӷʊ-BL>k CǼTYBKB~VNT#i1`B+0ۓ?2BEbF`8pCyiAI)'Њl \Qh..'p?Bup7uT!ФAWS.MS qequF%Ef5`=``bn8W).-Rll ˦WJG tp&0]nXBh/ 0iڡn1; YkZҐ-GB+}gHI_k ؾ:3p2P\*`(B(, U^4K;v!HPC~CH?_gKZ; _l_'i%` 0n"pl_PZp q]J`!x!6aCZIeO9Cm%PSyCF"i Ig99 X:B ϋE|-??3ٽB-+P7yMtT-8˦%ZK^ұZW+lzMmSB59K7Fib;eB(O$ ,F*T1 8xT҈R -#LJJ6-iz\WcH:L8 X_qBH(^r<=aK+7BYbjhfꎒN,7-\!r`ρLHhϒp P9]nnZ\GBn!'0H|# p+i:ģe'a0is~sEB(U$lߚ 8T`c`mIGl!Lr+w53plW}W^Ԥ*FTp՞?jnb6NL*Zo/sLM^x؇>&g !4HPC%_g!P#}5HǪZVfD 1ͦ_?e~NJGm;,UF2pp #j!HPCill%i=$PLH^K:ˊ14/䟭S] 폧tI6q`w7*B8;?"iB`4l#/wBsHƑFs~XYܵ2Ъ"A 퉶N{_aMHs Z1!iuRʕ|iMϋJ!v!N7Ő* TnBhaRn˺7"!|Fnn&/ X ! $!UE$SZS$)پ^l^F҃4[ 9$%i,p5gJ`%kG# !!C:^C+ۏ++I@F5D;mo,MZ<8$siDeIKRҰZ%m#J%x`. HxGؾ8Ca (CA!x͢VyEZN>-l4BUIФ&TU<`C:Wml?3BR ij!Uo$mވkG$~"/>/.DZvIt4:Pt8 iN/tm,s`߳D !P CFV_KHPCKQ$ =xn OKCnnr$*'+嗴mw!ԥҟˊ#/03pˊ%0pDZ^s`nҰ󀏻p $mU[ҿ$eBZ  !Bu&ivB?kCΑ4m1 j!B6pS*;I P! j!!O*ҕz0@:I.;BB!*C:YCroE b{Rqz'zPC!\ `"a$g`eUy B5BaŐn#C3!m\%ic !LY !BgKZxx)? :S|} %ac !,B!I3#H;5z8t$؞Pe\?~L*t5g.I+KZ !|^$!Bh 66_ɏU݁w%]EJVA/bbHuf;VI[~BEB!rnFJJ4C>^WtnH=s|?Kl$^nnbxT )։O}$mCJRèCX"A !BˑH`kzNJ_.'%/$t(\w% f'% f-6 0?N{hs,N}Jn#Kz@鶏-;B j!Zف|GIvʉk1``5R&|CvttDo6PۏJZ8CElBhwBI0cO=soʏ=%-C^-i*1 zzf}IeB;uPC!д$ +pӇHs:W}\~b^W "+iF'Gnp\%ic EB!$iR_H*vaB\|q.X.4]}=i9%k,B!4I}vQr1ہ lD[`\a-;$)B/ 0 p}BhHPC!T$m̙  | Ǘ\7$} سR[!lX Oʍ4g{W\Ҽˎ)v j!A~ EZ#`+ۿ~yuOOtW7mJJnF>_HŭΖ ]DB!O /+۾z&iH|WTO"=[i}"IBhuBa@47з w[#Iݴum,%i&jRus2|HcM`=U#ix eEB!K4eӁ5mVnSґ H:TwBl_¤bH#kXk5H m@Z&IwTI_-!Zΐ!Bk`_g uuVQ Y1??nio~CZ).y;I>}IZAY#iB?Dj!$I[m@r;tsl;kKZRҏIjŐa+0|M{? K:p9Rv*3ZA$!Bp$mVh8!5Hא*2bH}e@eHe^k@1)=V,%BK5BHl+v [EDz/s|ےUEJTkMsfBY{w4;B eDB!CaWMl8l? Vhے5G%'iRY+u|nXTN3z[,U#B! $glXR,gvz?`n}ցؾQiUR`_,p4D]DHCBBa@n|#``6: ɒv)h*eAtLiKgfJ#1jb7I_# 3I S0J30Kw:DoH_[f8Sp p t1ixw$EB!F@olUũ/]rZWG' g~ͣlxrZd EDB!24/R Ug|K#j1f"pkr;α]j.gJoϳ3JJ$5B44PUys ^ϳMv~ Kg5{/sEB! L_ Vb,e%]Z$!PB,^_a ݲ蘋BM)B!4AtN.)+Pۤ(B"A !BV_)+&kIHBbB!8۟I_y K;tZ`-`8k{a{LW>Х}g|oo!ִ`u$Q5`{;rӂĿm_ur 'h}I ;tNjB+54 Z䴯&ё= EGg{\|m{>~9y X{~z>KQk< ܘ~/1Z\/EyKEќ<|+;= zЍHPCB. R{848Dԡ9G{3Y9]%c`7ljOa`+&O/E)[F:3Cm'z!W]tY&c)iF[9}$?:Şsk+CB7A21TҽtNв=N^$!tCT{t`7)T9@꾯ۻ ^XˡCH7V>t Q2'I[;dҢ!~5,WXv @br{ jhZJ6t?p5)-m[[lԃ-p]Vt$uW0+P'ҹpO 󭝨Bhz#iGe 'KǕg(mg{=`iƤ.˒Kw% .#P b=! jhI$W ?mK2b FZfba~ ih1F{"ABhzOtHcX1l?^Fy~" xF^!~!! j$$`RUQt~?l1e?$%'6 <'tCvӒEz^x j)EIPgIKZ>p40mp/0?ͳyFR5ssF*+V!i!t %BU5 (& 坩-`˶FwLJP-ۯ2pUͳwHBh=C[zM ouBh !=X5`$)1=ڂ! {eE~I'HqZR3c MK )^VZͫR}HS?q{"A :~&''gò A,lGe0'v0SLP7!Փ4'BbibO&W?hЯ&HBF$4&Vt`¦;elCB#H*~BO Αtc>>K6*4M3o63݃_(3&u}~>Lc|*8%yU{C#izPgt|mZhC6z}kV~H`=s޴&mԘCh>fz͛w16ءCHKuB(54ߒWL"9ۥڒc(.= 8E>tl'i/ۗ=H7&w^vvsz@ 5<8#VǿCW!}YY=ùChpNM?wCHJWKMclGuUwt*:-*`6Qm Ŕְ}sI6]H=t[f0?C~fka??7Nn!9g2Bc7iҚa&-68QZxz6J~ @ P3. xBHT]6va=k{ I7>P̚7$iǵCh 7km?~YPIge٦1f#p#iu#t !|4u1߅>?(4h{Bk#mFݤQ^0J//ԓ̠AVSTnB\O:!HPCMHg`BMwmRNTowt8pH#ڵ}uV+^! =1?t)`+yߐ*E*>vt_N0<F4G-@J$NE%N+m1= `]^ǩ&zMc7E*4񴯑74&L՞{Y/p R"4,p?#{? Vt"t\LڟwCW^`B^ˉ}u]{Hsъ/ھ2Ig lc a`F,^K|罗".xh?,'שyˁ3m_R IŠ*>v-V*9mg^R*4 Iйk۟LW< 4I99i(C.r(H19f)kIso)4/ <ɇ0H.Oxo0p?9zTck@eRҋ~X X[)UV/FTWIC $ \Mn$!N$O$M-Oo:TMp=ۯ\h[%%y(߹j]Vr,"! 87蝺ǃCh I L<ȭiڳ}'︯ Ƥ*I_hus>/7# yIZԓ=?-;ZI$$-C*bQ -ۇ|PIyNloכ$f{Ik{v4_GPsRÀž 4*ۧ/g=IzR]mӀ.$mvUuCHPCHZ X|[?*z a7|eIx^ ,jW4_ 8NҺ%-'KZ^בsgzgFj=c eEH9ss榉/BО$mL%m7l MꥪB3RPCt5))}{2Q9!M /_ԒMsm pLi p@&O0Ig [eB+5LIW|LrT#֑ +I$M[y'CDV=ۓlJ\YrffI#J ,9 IDATX^6EBHZ8/F*-I2T~F^V(wC4?tB$`_S4<{u:֊B> DҚ;%l?|٪5+~ 7JnCZIL?ټit35 4J?yeB;5tOVۑU,^skI?h5{KҪ+m{T9QO2w7(=FHZ@uloj?%+#UؔTTDՙ_"/c߹輾/uT4M. gm_UNTpL=9p!Dǀ5sbPb$pJ  #[7'D?A9NIRTn\.`$\l]hR(qIJ} 5$<ܑzU{.X+0fRU %KBȉώ%[,ldˍz?i.z`?$]&i됇o i&RYG4$|mw=nJDŽ#Ԁ9Hwy6Q)A9ϓO$-3$4S5KI%1Ro֤9gTo^{@ZNa.kZ]k#7s6n4k9Q.NE~cG+pٍ%&ir"`)RJ:F Hw$@ZO|!dKeIkn؊FBh61L; ju;Tb(6$Ino`IZ X~[OE;>.&<'DRѕ~pP5IF=^vyyvIP!KI=!hܨA̓I7̶{hے \#=RS@Wld !D$-Ac8gn2"qE .Nf R!Vq}u[vI^9djroLky1NIÐ3ISU[ONH7XWɛ~ Bۏ6:kMk 26V"0׺!bo#9vnޛ.:j='tNNoQ^eH<4_yQ2IڂTWdgaǴCrZdn۫=*fQV I$m(ˤ99|̶6Vq=0Dچl_M?BInVT :nr-k}=!N&Gu\`*ckYJZ'RcnRsRۈ$\DZoC+Uut'\6UImIfI瑦]\ b]"-a2 áA9BhU1ķHTw)kʋ*LM yrN5oy^]79f! <ĉCl_^Hٸ(poB=އmm7KInJ=m&rʏ/uy+n _\d{K$-d}a2$m d !t/6MZܗ]m)/09C٤yeRm/o &K1ۣSrE6=B?F];&,ۜ$if`C `+qlJZ 8O"?ѵCʕCT mba`;~v{<+$|MKZS:~}n:'oDr7DJX*N˿EHеblc[*o@r /oh``nۻپ55m4'@4V4/zPv}lY޳{IU k7(r G;y2MY,(m5E`ܴ=0B-HZi)UchbRǏ%Hz.ҔC; AJs.BhI 2TE*>A[q?:K:"/IC?{= z{'nGҫ|s4gۤ=Z\1$? v}WYI۾xBC|[] MW]ĦsP%#F_YnI}wON6rB1v()PI$\υIC6F-U3ҚSU$}ߪHUk'[ x 74u %i 0SHf{ܻڄ{I:xIs,iUlB} jyt7Qhj,ihmINfs$m _zj[7z4Tw #i~I Xƶ¡E~XXRY@ L]Jz! j ʋP_T 5p5iZ8/ʼno|?ܪC#-s0-BbE4#HEʍ, ?&)4/uC$- \o{*BH$-F|%*0&%/Ukt>  Ѥ!W/TqG"{iVZI_*/iٯ<7I`4`a'P@௤ q I>cweBHP[H^RY|HzKJjb}_ow4 Q\li~'I~Γ4B=I&pw1`mUnda }:c`Jg'icA#k2l)!xB~xkqyyPCo*W~U^DIҗ$A*~4;m9 ۷^R[=4Tҩʶ,;BD"rޟC멦n$T$$)4KI7*^YI$<l,OB\8kURc$iJǴIsz4J)Bh2A$BӍ@Bk:X7~V 8&u0b>j}Iڜ4bz=B3%_,iڒCjIK>}/ ! `6<|`:X[u~{*iu`*39c%ùCm7s1$-)FRR*nf{7KMb{;9! j;X!lO a.͎y U\tj*j;1u5"j-fGA``{^% /l ܒ{[Nz}pF,SBӣmW۳˥t,[uz@#>BZ:*I#$ m5ۚ\LC!v&8\9=كC'n\j#S ŁbC$M*;=u&o)/H?!iEz]%D0o}Uw I@ *R QCX"?D)QB)&Eņ|) A EJH'nf̝y?<Ν;~fܝ=|W/HMk|fspSV{IK]\ GyI% 86Y97wG1̬66 IŲ]/u3anb `3l+; 8ӈ9gW]QD%] {D5`GAD D~ҐI2 Z`f##iGܮٜ2kN d{> ED<"^y-'"N:\ES$Hڇ4L`8UӭD܈8)F48Z% l j=LZݚt冈@ IڅL<FYUY+av [G)[rN৤e=`j23I@IҢ〭#⥂C29Amyv |&KfM*"3tT$^!-RDWZESՀ#bdfM j l{ Ywap~>PpxGM RnIX *8X;".62{IstdI 8 "*:3NP@Vjܮ3"Ⲣⱺ{R"ZʥFoΆs1搾dI*vgD\Ƒ]SJ>ƽh.rO/ViL:/ϑ?UhTVʵPhD$.+gʬ"zI?I:5"~]7#G}OV{ p<𹈸;'uJwr pb b#?e޻+kkwhMYC. \\4)ٮ#$6۰$}8Veٚ\D<$%>HY~ O`эȦ d#u#UXD'iA6s8,]F\D[lsZg;v]l냵% lIvqQ[t0א/vH4w9f}<5:Л$}8$8༈tYA?G@Bzp\XXgCi~iѽ^/#?Zχyopy^:hUt̓ K3Sk5SLarS}1TBgu#bzSHjzMS5]| (E5`׎5lv5p P?7/kDtc#bNq@r;q4vj`"!_^# /l=&q2|l%9l~Рr~=|#PX ݼ#بx$lUׁC hJ6ɩ#]IZϯe4&断|4&☕=("o6tS.:ZWt?s_mV*ӱ<&20 kf j$3!=3"WT<Rotu0ftWÁzieҿẙYMBv7trpIENI#c4 ȈhBUfeyt:7J4=MIa)if\$x%TcvJZ[;3jN"}r8[5O&i}I7wX/"qrj6(k3OT*9i9EIKޘYrZ Ig&=ݜ7aS@/M>Zp ϓz0QHZJү[I{D[`6$19/2A˞*hfM jvV϶%L4O5zB,< /v~cJ8Nc4&+0qqaDFh_hCӵU?fX_F4\]K:/J=ͧ]K*d0kl}q?밶MSw5+.~GZzŇ;aQ. J`.6ob)DDֵbq`:;b^U.Ά13`5SD˭`Qpe |dIoy m Qmg @vR--/_*3Ic&GE;],8͆Wgëc+c*Yv3:}?̀v?])1 ;u|ކA80zZeDĝչ]=zQ%GɤtcD'fI5",Y?ːnğ\m\f62$C=ҬuAԋڣMgwp{} [Q7]ӝXV|'+̄ ktOۉ2 zix5a.{O%#RŮ}O)\X@WFJ[i9mI?۞m׀-ߔk ?_(^ՀHO1cx, RHu0SmbzgNPfVgᚈH&P6GksF! ZhBz]ǏȆؓ;W7L-gf@Xao-%l s,ƍNg?B=<#xo_ וc~á%2 09X`%o?"&̾,x]LˆRg FsϽX:.lVYq.C80"/0D3hz[b;Ljzkk8A5! ܮ㊊ŒiLZ4)dW;ѵs雳aî蠳#`lג ݻ> lEp~GpJG>blM?t|r:\zud)+f֘ZvI.ngw{Şٳa 5m͚]"b['fueet9Ua~23 jI`l;kvx'ֵA`v6JPս 3J8R\\)_BG8_A.3lh܀ }C!؅wJ4䓀}J3-iomILg,cڳcl5fO[s"~bfhY)ifCFuaQXBI EtOwe1ߚ] 뇶SZeh!Un%d}ZmTVxp6,vuñW2lM ùJy/ 'wULFBd_7g瘗fknQffff}pZ%Ec9WsoW:_ព\ږ͋ 蜿@R]ijV`^IvQUAlo_ݟMsჹzٶ}v7zCzs-N+oaR@I>333'wn_a|DJb' Yz.dVt},;ū ̹$~Nn-Xr͸,xs RVH?[a˽lNP և}oKwO{[^n+@DuAQX|dwyf̅s#YwlkDc6Gil}ùN.Yk;zٖ(@Fw#.<]ؕ^~ tC[_m^Ϙ٠CmGZ:UrVGZ3`u]׺YJ6lߣg-,)+#>6 ~>IvKlc#^ZmǛDֻ}tЩj[z[֛K7e)"Feoy+e~ϷQ:<3dO^]Qpz{sU0S?Ϯ_?&+N$@d;=^x<^GNP-?őX \O @3Kkz\o&U]&ᕝĿJ&<> ej'qh/3 1@kӺkGjv:{-+A9gzDDafff6(NPDJd˖d~ST,6lyN䳔ҰݮJ΀w|GpzGHkE..)1}'W-Q'Ʀ$vDW>K)^X.\is)c*|KZ.]1J>AR2Ebfff cT`41"`+^G0lCyϕ+ssl^k_Cٯ+"ڗY}w~>5UYU5;aL g0i5b&]\\8P[VxXƤ{zz<-_3=3dEau|R !U iәZjf~Gw ?'ձWn#"J13ɲk]{u8AImG2fN(3333V\VwJ,afDJNt5_3332}{DUX$ff7Y4kb133 ̬~䯅[y5='pjfլ[5GgQs$,=}pM%VY1! lZ|<{|0hT0#"13񮤛a%C2kZ1qLMopC'YSGd/Pf$VZeMD@`]`LxR 虠X̚zU~RB$v9Aᙪ󀓁7lii`7.j}& lHmYb4mܴ9aNfU]3kNP+g`l-cF7U'7 1U/nxfetm\XJc2kZ1qk7̬7IFĜ>4Z+HN~ESnVs5]kV}]k/RhCdX!3kpNP+O&TSU\Vr3fuwYQ F ! i.'Vq6XCTE3^#Ѭzdik6>=P̬8NP+c}`lU`zm*">a_c*6!NF.J?ЈFSu2p(j3A*L%,O*vLK ̊2)z[s8Kbl"S!];UR#^@̚VZ5}@)@Lu gYy<լ&,&ǗvRSGT+_39^(83+{PIsX+C8n`*b6\k&Eal&ǕEaf6rl]?k%Wd0fffV_ߪ=̬o5ܮՊņlVnUP-۞]XV_>A}(GZjGY]fofO,23kH_,, {NP d63+eEau 9A536s5sO='Ea#Yp+'v{aQY#)}MGYs: v=ױfp#߁Y'+Jj+,rۻ ݙـŇH<]d/5ؾE|88Ј̬dsz8a8AZ5}4O>R3cG_{K3*Yy\ɷuwHsmtD#3kY%fٮ;7TQ`_>Ң=4m~e*s;yq19crtIJq:_gv} ]XPHfVŎmt'Ee=esgsrH ??=H4`D `EiYR2{ZDx:(`W+%#⅂c;TǀOwLh6\^H`֐/peT I-Mq{xZ`k՛%qٮU' #kwoh: OFux$Yk&K&^<|#ƱYsZ>H p0ˈx1 8Ag%Zk0" U;tc|# (_ԛEa#Og HwD\Wʖv:A:;;jC8)  U"rۉΈ8h$U.Jaqjfff=8A-߂m'VQG*fD\"=oVk}efffM j܃jU!i9R1S*4T IkVm2zpZ>'Vq6v8GD̈)v9 |NPdq8kz#"~ 'VY1S qpp%kyn3Y/NP"I6lGWPE1ޒ6)"kZNP̬'e*vNPm$-  OKD̉$}X䯝c%' .vtɆ.(IcF|k59'S+]"ZC7pp\ٿ 'fffM jyڐeŐu17HR}yfff6@ IV 0x#% 5t>233/q e3.T+ppEFPr:C &Ƿ TP$A^̬`ٵFRm5@q9A-O. tY &?);1xY|"B *ڦP6{OFČ"13.#B }xj" |DyVAS6Ʃt ޭp-t繁Y$-  JP# L>^IbބyUM/0: 6 ")׳kxċϙxY}08n;88KYȬir϶݃:BiWCziw&tιؤ5>'ffuB/GğވxEүK+5 's%fA;&>y0f=lpՏISCJPT's >~&c68A5 z^? lH*pbD<Ы]lC|/'= W'UwDuem= \ ê!i- i#"n0q6̷ҵp';J^D#b`K`CCQcx0۹%{yXsZ> {PͬH:)+`KD̅y˨BJZkmcQ`x1T=wWI1_H_r&Es)ir0aAD<C^}8mYrZs ̬q寝5"ⴢ)ᚈx"pagaѥ*Ж5!'{0jaQ5涇ulZnȆwDq8C9A-T3_;(33*ޘ eM j:%%-\d0ffDEsG ̬ieEdO@]mgЪ:z܀xh9k Z+ڢhI%kfV?"%Ikkmx6"ޭD`|σU{>8nX% $8"ՂN"8Y>PXfP$-GZf;:xРI=pg# Ǜَ51AJ,`Nau>l.!ߎG+` ?ATm Ұ:Sc/+0W&@kpA5r | 8h+i3)^fQ ޥNJN~d=u::fq`.g28YE̷p,ƷN--X 8oR8 IDATsfu ӘBK6k;F܍,*El vT{B?ic!ཹ]cђ&#b0h/~) 1;>\,|ImYlE %>IN$ <s{۟xZҍfDzH$vfՆ <^YRKDty5`ONX:->d%iH@W9&n%%+nȽ|-"Εt7K瑴\ӟ̖pD;v<d(N%%{QF ΉWxuXMo3.Kj<;zEҎ_Jqa #ƿ{~ΌE"__/+`fևH7lo$*><*.+4H4.CLIwihE|f=vϦ,4e7Id~N+fJxX7^'M)"aIgtI"ݡ3"n,:kHkfeW??*TocU"~^;{ZG:"BH{{%mFzAY\$i<a8𨣁K0Y?WS G35T'=,gl1UT:'isC|[~ɽC%}qNipuS-i]Ґ)7au_oK:8PҴBÐ9! ZI>AuY}C`2߿*CqJ*Pz 3[xq*IE3ZլEKfeAU#ȡ.mc'×_jfiI),;`/PI-PxldȮKgORfVgQQ89AdxrEb)" ,黒bXŁ4]t<6䯑^GDi9̬8Ai- VDTw鏤}c#Ţ'QfV"j*]cfyNP+om'VR6"nݑ#ADF5o}ef``v}Q52%-ZX$V"bfDLIIx7"~Yt,62IZ = 1j%ED5_3k`NP+ [벫@ ⢱Fg$}X$t4pm6_֬Z>N#bfCs,Pɡ*؞NP+|mH"s/*IGEaM{TDYfSKf֠VT #%%m }06'f]h笈fVʹ4 ` IK5/i%-QsJ |D857Iճs!}q0y RplrZ!\z[5x4˒VrIj4x<"~W󘕐&o-"\<&3zV?6U_\T}xx274L̬ONP+Pm"IT>H*Hq|D<]v͆ OSx<ͶWBXJ[T6qLDslX$= X`8fV?rlxYVPDܑ^T+[D!+8 MZxo5|w|<*&"N&KZ@҂}/IcH@GM5뛇@q!p =)"v.[c%cG}5 ֬ -+)bf#ʻ4 `I >G?$}Q&ptD\JnLȞ Z`8fVaqp^?|'lf6(NP+,[_Eb#$m{mc{Y {oمEbfr8Pjm"ZcfmTP6϶N(0kt'iYZY3NB/'_ɆW?N˅3^-beU~Y=bp3"?5rI(,  gwi"dfkB-"n OOEEUMNP"T`4#cFsVmX|A8/G=K,Yh ?ލ^ŀz4X6G{Ն̀/(5]HZE`6RI8;"Wt< "eIW  8Aig/sj4-\g_ήdf+}YD^˓gkQO33I"dബf/9?I iZؒ1L)A b bg60Ics*A w\!p6rz:Hv./4"kF<$T͙1s7 " _jqRIm99mnyҜ2. &-:B\b`9`l_oJ?"~^* jD,I '6X\^)yQ39>a^M54RqwYe|T/ "W!NP|ԝ$-5&=f:cwScjYo RR:9L(33s%ۉ{=IDVQD,a`k>ֹFeFw˃Gݣbx63t~/" %}4ߴ)Έ33d. hu$=>qZ}϶ EԘ8AnېNn`f};]SI2z̊oH (9#bv1ѕǕ 7pSI+w|Be*Fd'I='tWN`G9953f\=ݰa8Axgn^}jf6qEij"433[Pl%i")uL73돤EOvbxֽ? ^󚙙ճK{=lVD pZ;dۋYtݝ} Vs ^̚]Dkwߙu jD\`jn!YdÆuRť{=433k'YֹK5 k@<^xX,w^ϝ%'Yz/lnU3$FZTے~\'Ң?ѣQ=}'Dgl؜1_ #&98kjD̬zzkffVzN;n9A. |Yq5^9x;uW6H|- kzkקt2Uy׬?ׯKf/?-0333kpNPk,"ސt&pTwaTU~ ,,,Hλ䜔 "I®T>1`EA"*A"I%D$"@K9q5ՓSLݺuTխ:2ՑD)C.E3 g_b_`~91.e7{o@d,XP̐KQ1f|RAAńj5  e"u:ڛ0=r6mY8J_8 r$PAe."AA}&fP+^;Y~8#)OMk*Yϛ,8#NŸc7kW #Ib4šG*i:`ߤ83 `đE}OjfӪ'` ju, | 8RsDN@{O>ʝd ?G8BYV4㪔';܏[G~RK44):>. L|+̞NJ~P,U0x`'Z h ʮl|fBU Cv l_,II_2=)ךY(ftEe01IߨZnDҒ~Z-i&֥ݹ'.!j)P X-/(y;A &q.jY*f`w#+%AHZ@ҙ߁<ʤ`$m ܅Du% Xjy Ic$ |Dۀ!PP+NOJ8 ],AP}q.@*I->N2" PfW&"i<}I3}h1?ߴ-nff+P!9TX[FU 3L6x;T#*cpIj%Hd˥2j!jH79xRdfZ.fA{=SHk֥tpgzF\YHCOP?X28w`Ez&1MaԟfHe Sle]1{ ]31uHU 3L6N-شI{n &Qx]`Yd~idp ]AP5vfv;pnRt^,(8/&U1IL-"#o\Iљfv_UAP >) ķs-/pP '&| ~iqKT&U?yl7|OtaG$}>efafesi&cYʊgf?#Y2g `1`*<~~WdM\ `fgq|{x S4䐴~ jQ!)i~Vڵy>P=3:+_K}U>V/LK%fvF}ifH ] e}3Kӗk%k_o`KS?އ?[X9kC%frWVJ6͙7}`%2ScAO Sͬ5{ϮݦxޏWn7_ n8\:̦<<{Al\dYe( ڼ ,QLCLľ9ԉ3^&b'bOĦ5_ZΡ\wӷ)kJ7eXDkԹ0ӸBcMMZڍ}8 ݢ}/y=u\n-ιSKp00}Ijq}k9-HnKʼnœmK7jׁC[_;Rh-ڪɔQ 7Z2uǹȱS~Xo*yݾ<Ѣ͛gv2>s~'qs2a2ogq>>R er&~`(&+'oMORDt 'ĬY_k ?:8~$~EO0?"i/\עŒ6,4p"*&73p~㑴7[X/X?|rnUnAq/} (sx$} >ЊYK2OKX3{Wdp#n׊9  iy\9HcpcJy >@vYaAٻvǗe3IۙG-L67I I !r0Fbt\?6о6 zu-9j-_7+i$P3GLY7g[w !YY+G} Xp=[Aöٴ/Yx8+YOdqn-Dz{`WzL>gF~Hcxꋢz>iږw4nEzL.̳ײģ!~zؚ6=}v/m+t}ů@L;*$=}fR~k}n1Ug6hRx7->۱6nWZs~kݝ};~+IW5y9޿Z7Z[kKd[ELF_ކxzp$ywIGcE fy_-7emtFy8BA0JIg r*J&$,oU,38bUq`ƿd(4f'[ dv5[m?HGzV2h_X`+39_࿒-o'iC3fv)|w4UPo7F ;s-4su.|\C! l]af$Lrz3M0Yq$+ 8WҊCoC{|&+gC%d[/ٷ_'P3 ٢/fғ:Wf}RT|8`]s!IYoJ:RI IrS@3{')*=̳.JfZl0Y_GA}רWP'`%[>NB,rm6t:|@W 9I52U5~?[RoQ L|;AՉR kqpߠ WGrq=D/CA5|6~y0rEN9M۾\;FȞ\~@-&i&mo(駁ri*ób:4nL))1;劏(A} h,8@4=p33yyDO4lߜr{wNl̚`83;h0<'rōޝSfx0B 9v QLbUTdơԿ`0y hZ|ƛv6$1)2ePߘ-\[Pɶ76`#:r%iMohT747eh-+g抿l"3kAn:M˳JyfZJ(/ddW\Q,EiT0L)QID;heRtSPğs+NK[`l6V/M_>)[7#7QțS*m36:)%뽜[vv#PP;᡼?tNO h)iM:}۩>OrtAKȐ?)I:CL#Y$ ZӼY0"΂J}n:Y~"֢GY嚞%3k.QK]:/>;x%Ӥ^;Ͻ p!nЈp3%M/Kw7cLnNV;~!;<Vk02ud#;IOYT Y`3E6dOOgfvj"CC '^v|RUdJiiYMq3M)6+] Lfv_n[SIWYʚ"[+E;t: 6} {pJݖ9QK W.cggyfLA![_ : ̪'Ĝ+AP 3;D}[mtmFI :1{i23 MiV2tZ&HG4zQsps?2Vc[~FSs;}i3yE})=ϲ~Cb c7K_lx_̲lAjv iZpװ40R`0u)ySPP0&dɭI>#i8&*3`"+oǀňٽ{ 꼑?`{C ̦uf'uIi}IsY3|v|~qS"m<*CPʜٷYФ/ ~f$$^7q0Aϭ/]XR@(3;7qAАltM?f&Kj;.$q_<3K9^(*5{Y?RdIWIz,kܨJƸfj6{ pyLH7)?C[VA->{3Q֋RzI*8(i/=Y4n9zeflNA,%B|}ՄԜ:ߟ6/; ivIwHRI#j ;3 IK[PJ(Ï]6/ ,:Itq~Y $sՔ lUA|xn.Wdi)dhVm>-gg$m@s3^"(!<Ի2Ca۲ ltV&o˚)f)K{h/R@(Î,:Jd/rv.4Б|lF$KڡFIGQ{8ZJd,(9ϧ>j桭gMnfj U%=zhV%@oE|,ДcbvV%u$5Tϣ4?5n?ݾi%RŨ9^@f%-L}NY_#1_Cw3vg)?ͯKߢՁ&EEO_X38w "%#iw J 0nGlgIH`cfS3݁ɦpU >}SJecI_k⨱pa#HI+H4fy߅ٽ"Vc Ij Î,bjBz52'NNIJ'kЗk pI)ѭ ҜplvN^IO3ˏE%84+y~LI_> ~ ݫ Tc̦ס\rY4PSS/ORLmpNͬH,%-bn;ixI6h#ف 23ʞ~=v0![}ڲP|v-ZX{F֮ZX^0YU_Zr?|m:&u^hg.nP碤R[7c; i`2p߫ڶs{\/,hԛ3w-;5xD MZ<`Tmq/Fӓ5^W`+s+0Kgݝ߿ᦳi dZwm/?~[w}|Կsja}VI][vNߢ/;ߗ.LR37x#pjvrun4hw>zC O9s\D}\_; N&u>w٤u>.jr~zr֖';{P7'9f*  ^9r/;b,<{N&ex/x{?0[r\W>l}Dк_Bx.vRLA] GX֏ru+=cr\loP;#D#Éӊ)]ܷu\yXOzKo%dPn*yݮgS塙 ekṺє۹}0/w+̕o.XA aD?}#QfMn#@й#oNPs~ƹRgY6ꗕ6`">g\ݻ/l4meff|kOʚ+k)W75/jl`e\a(jԵȍ9)m ws2]#x]n%DtjO Ly_|_Y3;^#6f6)K̞\vna.f4jf + x`k 6/WPhcAVf}F:  z7j>Aʴۡ9/>PcU ~#i/}]>gfJ`#iFwNT'`2OAAWv7FA.:ISϧ59-t=@_0m೔(˱F ޒƿcjm/T>eWTw4Wʖy-JO{sIGqxȱJ۝G}_۝ kw~n5G),{l?*[}H-G x_?ڸ?/6^ XWlҗK7ZDJvɎ0~_OyPk`n)aFH6+o; Έ+gs)H:QY p}~(p<ǔ Kt!Ea3+ H!p0N7[p0dbn'_̬a@(}LA nCր0}E &QXQ?KӸzMҗZ%9z֔q  q𦤻EΓYai>(uv Oꏺ2%7KgLlQTAY$̀؆iAϓ _Y^VG(uT jaf7A)jL~mb{w 2+H"{OU; 4AAy&'/%Nҽ j)bv02fEaا:n{bV<pl T 8&ٯ& 1?]fwt:g1wd dv'жֿ+Y3;B.^I L&y.5s8 jcfJ컉Zn*im/'7JZx@&UY{.f  ք`f>.MuR݈X x0) sib`lVI3;:  P|](iS<bfV)[P,u)&Ճ.'fP::\ax2+8A*Fܦge?V#I~ X/ c~¯~h4ˁ;'V(^ i9T9y#PP^:}I$"5ぺGU"XH%GVu잊 ۑ4p &"q&Hp/tղÃPPB|FW#UЭdLnR5R =f_Ϝ>^TAAa4Wx;u 1t p0oÇPPFIg%QTAbf5MFC\xKHACjf-`fSv 3 5hLL7-ij 39OR<@ҷj>~ijSj hI88a!dyZq* 5h")^V $)zI(hIKr8]0t_dArxcf/ٔdG20gO .ʄ 3{8*+fzIs?ƣe?[_0}9)3'Dt]3 fI7\ˊ'3:n̦ߑtk|@`_&ٰ !i `N`3*4+$88p@,> (ό*@-=3mfȵ ~ٓMw*lG}{ݶߓ%w3!oI9f>2k&TXz@ JG!x _ g1![}Te<~,z/gNZXkuoW-[1 !o-BzZL}8Ձ? ?f}8A{w'ugҶ6,WC7/SmyX͗twp[o> Lߢr/ ][xL[8yozn"ra; ,\pL5OOg}hs"I(jg%ܸM0SdwrDk]? 9 X9MJygV6?puN9:I;'y

3ps``;1fO>y {mp|N^v~C,d(]?j%\UKw-Kղ^?5`e \Ase6HIONhf:Mn|#%eJ!eڽI۽T7̒y:m3<Lsf+ H$m~%;w;Cɶ$8͕0dƶqN+b{8cp%Q{Jʯf3&y`(VfM -cA+a͖^elt}wLn&΅U3-nIGg ?Ӱ˶hu7S~ jNt3g`Ŭx f 3OҚD8E ; I?j7齯*t$w-V#bY|F)"k,\Gٹy=q tXB9k \+/fz5 $mlr?Ϟx zX71%n+, y|;V&m|Ʃ85k+gܧK꼁ߧZ3E33p V8 !L//GyE,I",O|-xjI˚Y-4z5lT-ʻ>a p=]HR# UXaA %aˀ-yѨ_u>R>5'o(9K;%w] *!=-'sM8F̠&gˏ_@n+q\iP7?ʶ}A5̴1 Ϡ֖cs'}R3AݧsC{h޵>ԧ&=M L jj2|><}RwYzvqK 0]Aqe1m-9vfGoܠrmL_Ng~ɺYA 6݆ަ~56?89WUfii`+77_]Iݖ3ouz)΃u\ؒ??/e6zCN3C-d(];ۄr`e9{u0ղe͇ ϚV._7, CW}mӤV IDATM(|ck\}?7OVg*+i^&+YIPGĽ-!Ho&\=Q OWܧ0 }@+ C~/e?;4T8{/ݤ^CTV 9v7)c^A-՛T߱M٣4`%5 M0Kk3n7_-Atg(̦^A-W<ރ6xҁ->+>zΛ5lH3 (fvnߤHt#fS`[z$ijr$}6Lo}\TA |9fv^f>ר٩fm>̚bf5FQoBی_-M~ QR4={ɶ&k6|leifSsܞ}zk]@o2(73$Xo;<9xx0&f?2ZqTc Pfbuk&5Γtq&_ͭp#iI}I6˴8Mʹ)˯xJ9`ynxYɶʭ+Uo\YeWpVܖ[/ O%}<a+E"!O5UyfCmdYy Y fG5_&,iFj(} [Z--fvyS57m4שh]io*xPm'ex8^_IK5M B3{yѬB7K؃g mf_ÃV[wJ:`FE6#cUc,pBI3K1>kQ%Sf@(]pȷ9M1{fvfs@YKsPB:5(Lr3x$>T?(\ 3N0}6="qoz[t>xǁ$m)il(uJYM}Qk9P&AW} Sڇ)؁bsqӾ/J!mnZWhu#~"?碜㠒 b +:ܛOQw="}&Wtk)/:~#y1@:3;޹{i5;X'RopyfMJ鏂ZYaGg;4)zϝZ&eJz:EqEuoʿUAҊ|v;Po~29˳_m?>n-j3@RC/]uoP9YJp+Mc$(w4^6w}Upo(Ӌ峰Ri(VʂaP"KZȤ-BeB2O()+_:؎(YIѐ*rqIy :3; {Ng3yL{JEߢiw$ٻg5`)/3&5̾ Tҧ͞ `%Ԡc0l3`{w-3"AP3{̾ҧ/OJ@Ҹ,#ԏ?l%.}5r?#V;d3-,_L7HZ$|8)< ,iR3ȁQߧn~l(fRS\^>H`67;,oG<@KF!>uug,3dYe(_nȟwKV-g,K+?GW-g,=j2!h|F&iQ }1sAIY?ǝ+lܑm/*Ĺy? Dr-yr4@Me:m_.'N,Q4\QMv)h+: {n?5#|=OeKKȺَ O7is\%A- IKԟ/w ̎G=O`~}*xZeMMIcr59CПVjnk'SpvN:?}m2a2 j00w\+<Gҙ^AU% =Meg1[yOQЭpc%z һzeyW)+ykfv/'%pZT] if7IџHEL`ox& yg<8˫ɶ+K;" >y㗸%O?W5Rg&;B i7#p3Ԃ 7|p-!>8`1Ok4䞁W[Tqe.aAY̓F.6x˚(3g$݆4y oB`ŭُG 9fv pJ62Dd*>;tUr6RPJONҁ5fIAl&9'~PチWg+q RܮXKZX.;hcNfvs _>M\V33I_?,'H1|d==~\f;I>_EY\ϞOy/53UtdjP97Al%~YYwJ8&#6oٳ9>#͌> 8-]_AMѣI:*ڙҶmygƟ{yIv dn r#y4}.~?qf'gfH$d=>ޟN݈[^L{c| j5٥6}6GAў*/?blxL>S-šg2,A0H E̮lK|T  BA LliT5YYӜ,>S9D0:EHZ̚e0SURA`A̞6l\A={% iwIst$MI7?ߧr3`3iA}+[Xp5&8AAYf\8x+WmMSΗ%I AIKHMExcqɔ7pME{fvex8Y9z6S QY o'~84A&"Zx9~V<.xXuh[e Ń\ \ifO I IkK#ixSs0NQu7b<$x`w$=ox2) `X j0b1$ϸ3t"xxHj3 @K[]`,Q_x-Si  5*"S̖Kmf=?Dos۱eEufM__ktAAAЋPPFbl,BhAAt,A)/V-GAAeAAAA)BA   :0 /Z3;bq54j$,nfٝ/$- V77 &Vb5 *@Á-+*wQ%IJcȤ] ~϶In_^DY fBA !FEVO9Q *8yJ{""MP]EQP(*(""BoR.E) - !1p;aSO?)yVI_&:~pNV{7&'.?= >z}NHɩk˨N~'RM+۳l9m*p0SiQ2J 'M;9*5K IDAT~_3؂:ʾR86Tmڴi֑xrN]<] *"R$؎ڤ˩^$-kAσ.EJ̞g/B2.+e6(xO+e&$3:O>'G:>_ΏJsozNHW,\+2/gMuB0"*389*s/0EnTTffQG9}>*7%u|16]S?n>U 0R7'8x-|\ 680y p VnI~WnQלTvP$z(:~߽嘨H¬qus^2$ ,C^z^\:D߶v76Tmڴi֑ MRnai .A*XI.fo1m8rFh\Ʌh3͂m{(ϣ2$[甝@qATv9j/W(j2BB2Qf?'KuHsPT -*b|['NHDg(7x&*wTF˫od߰*ypiTDO/٩c0[!״Yy>LFoBz"'@R6:89PNn Ut 8j  9wVӾUE||3~wvRkI~ʩ$sקu %s`ww ʝv<^p cT+fs7+Q0Y c;+~ԩrr$F~WloF]S2[ z! sf6p^qyAr݊N\/z2x‹g9;zdTW8I%yMK5Y9/tR'f[pcYOLU]z,DJt]}v 7:0["z,;.g.߄q>$E뷩~.tĊKDmmwz]wrS͡K)AOF*( Fny6zlPm)*/J_O4Y:eǩ9^t~d=s|o` jeRǖ7wݧX3['UB[j$Hܒhcm3uluSiĻtNb+Iwv9k\$;ܡѮ?Ez0!Ք+n.@hm|B>%놖7%ce7~wr3{l+5M^)q™;7S*In4K+^'*""-+E-L_/ffO7piMVy3[ZPe:j@VH{aq:/b?f,!Y^0.ud-)e\B ׮ksl0ߜ7wz kf(5R_hfϕ~],w3d0!z<9@,El.Nx*f싿W;q-E;k|OK{0>B۳z$ZWzs^g7~镲 xZsEsK딠tT/&'f]Eiy]El9#VMlvCH*>zONrBm,OZM~-L#mcq  Jw鮔Мto#pf|IHu8J݈ߧ{c̶~IE;œs-A5yN\E7O/BBW03oH?7]̽O[5ԎU}f #?q!MR*""qwIPBwf\dMR6V{z$dzN,n5EOLƿ^ |(C0ag}J5q5zG:㇚+NH ;9*R"'NH:QUbf?Js`RUR1lE|!|!t/|2zp\V'z7{'iQ IRuf!/5f6noׁQw8ɫ-vwC"%"""ĽTPnN])m`vc.i޽78/x.뵊-_sCیi % B›hBrWINovMfhΫ;nK'$n8aِ.~.qcP; -*w?S%V&lf'?M&7[it+Τzmy݀פ.A70D`_3;8 gf׻9H_?Kc&킐7W' a.8Am4@$tGQPn2v45l#3[+E 3隷GZ :l2[:p ˳TJ12OنfBunJyibK72^wRWv\ra7q3[D šcN3~!h:mcf0ffnf6mkfkbf",RI4:q>yVZ:ϵ3maߓgK^ro^gF .J1oJn=.;AJP@ Hw4&ILܺQ8#M0-C]l e;\z *q+5AT'VMxlEBU%7798g]#ebߣڒ~Ijmxr2&iv# I-(s ܔ?ωW+JfX2KUk k?R hW{v:zx&gcc6T;)n}NF]U9U׿:՜:mLo^;FZUDD;a4E *u833K#A1APXn$0h3uwǃ?Lu\HIo9%`vLvYtق4tc)Tz0NX@$tG]bf'%3 I#\@}j;\ENTr9IvO8{̦Q]YʘĬZfv^g5b}`Js9 eLuQ_cf"\qǟ"9^hfZg&mASv26=@׿ۓmM[8k_$tw|pCrQ>,T[o&\ gf&e4Yr&j}$-lGm蜺a|XBs0x `kߺ{zf^,,r* ~rؙnKz61I889bOu_fOε$p_yx̚CӄJxe-:3YУc BKye~}9>S_L?)3-a\\^{R;2u:q2s؊ZPw:.GUB1~]kp-bBOJ'vmX{]>G:̒7'3;ڙVu8EDD.qUw?p0#=V)ؕ$$JEokE^8[i$]݋}IxDJ=L2+JQbBsZ$mdf}F:笄/4y/~fvs ;9^HƺKXC839FN]"8jgfn|U{8 7\+z.jAwbf+Kوf.2=Afv5Xe8-yT;|Kcܩzyޢ՝>ۄAVo"rRS]>.1T =h0$T#t\{Q6pFVa4vNp5wDbfR-L,/]W(23"""ש,SŞIf!a3VOhfܓ߷ZH[O[]XY 7Ԃ*""=ȭCu,g~M#Ϙ}qV˄,K5901J;6uڬGl.K"Lg<ت̹tI23ɏK&>*KW555'!Qݛg$9%4(A"w&z-u|v&" pˎAj*nK3I^'SS?EDDz'ԗ %~t~#OXf A/4x _DDDDDD/hU-""ҪwS?UJ"""'b^s}T^J<)Q3{aP$o~#킥D!""G5!Asɍ> o+EDdx2f6HDDDǚu\c3fv4Au)sѮezy~Ry.`2#[~~1,FT .Hn~@DDDvMGk/ u͗[RDD$GE_05ʈGDD| X H4 *% ""Cqg5Q%#""R 3[88iGS͠_Ǡ&K^ 1vш3p{o2ѮGvjt 1q `DDDzf:tG<6'>Haf:`C?1Sltc3[ 26;p}3O.+AMgџ-% JLmfnȠb1H`TWʞǟ7UGf6x; OHdbf"2as~d &[I~E/bf;V际([2 ѮMR!6|XDDDz_ۭ3$ڵ?LeusJBDDw;pj!)`W`>HNv&PbR`Vf6xDDdhqP`q b""""D˸ݽہ0nTRZ_3;84upDDd3 xƕH> ̙" pO)5d]3Ŧ*9A]ЊZ9` wDDDDDDafpA2wߴ? ZEL~V+*Vv=,'EDDDDD$}]WR[P ^^f)`ǒ0O۩T3[ )UK!3[.w_:KoApm^8pDDDDDDT3[./*I3coF.wMڭ/ZP1ѮQo5uDDDDDDGGKܲIPGOF?XDDDDDD$bfg+t':Rt0MK]|+)$aFEVr8GvpRRH""""""d`N%Ї-r3g5}b9Q OfNZP}:=f6DDDDDD3pbjO:BV2SK%$"""""2lhޅd/[P+QBKhfv@I! f6 p!}HNTwNjfe񈈈 u3բ-nTw?8%50Bl^Z`h6lWcPcI ԡ3;9p,ya؞ŷE *l\:+pDDDDDD3t:^$0Tw6jz5;죽JDDDDDdPl`F~|b,]|l?8`T{ rY""""""bfHefcT]|BއSffk>*ef[ LN&<9AނZaf3Sf<0>bfk'+M>A݂Zd`Q3<8fvO&/땝iA:;+f~ 88i`""""" L IDAT"=ff+F wز Bj`P`֌"0423#D[Q.~WCkH&f 3`G-p>ቈtLZ 3PNwQC:A0%sGN5di|؊3ƞ~ݧ "iQ=Nk+keޒn Ky#_}ZWaV܄A»h/ooo&HEDDDDdO X.EFCǮyeKDŋ 6Wg%Ӓal`h[܈DDDDDD2MnIiL j3[XX&ږ*3.6fy0d*1P3/|aJ_5WEDDDDDFʿF e """"""JPEDDDDDO(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/(AUDDDDDDT JPEDDDDD/*;FKƧفYM}M`ro=<^2w/;A>lcˌKDDDDDKwVjT%R33`U`S`BrPi$J2rwXr<=Af6nl WnD"""""29pp)pWr<]3[a,i_d{ ѹhEDDDDO:Lف9mۀs}jBJP3$]x?G[T?2T?4""""">3l7~Ow-(AH`{x\Nhnnt"""""2ԙ2FMug~ S0̀fp pۓDDDDDdX2Y mOVg{^W ~Sāw齊MDDDDD&pN)^I6A5__,k~'MDDDDD$ ./QN׀CߺuX&f#a93o40ٚr. ~ lq؁wg{Hlk(5io= E&A5&8Z F%""""">3 #`"_v4& Fƚ~#𠺛 """""R$5zaߴ~uojܐNPlv3LEDDDDDaf;7ݙ'>F@٢- LN|Zɩ E~&"pE(fvt2#p_-fq"`ԡG%"""""{fp,0SЅ.>lC.A5SSAHNt +f.p>0W=btaf{c`rzݷCsv-BrH9QH?1+3Q1':lCGn>qh6w VfvnwT7+MN10^]yED"YӁI勈 R|.w]v}"ݧ{z~0py q N-}%,@~#p 3l!`d;v}vJEDdfK{fIK f%lY&r[;Y~a7 |&{_wxD%?֖H~|vHDDf]}gf7t#/uPlngrX6>\hf?/;>e hn>noTNb@3; DDD?q{ٲ>W_v5 3bヶեR"l7`l2'}aw?-# ΟF*y:!w5G?I2A%O7{2HOPdIIf]WD5?7+$/Pz}{3ѩ]_3[=HNp#fh"""@X2NTB3[5tUDDd}SUjfkf.#L9e0"""}D1~YJ$2(X}`"_ew 7TYu<F~w'&۫>H 3 <^ܡ3OgGXF5//Rdb \s^0``n`6`VD*i퀙W7v+#̼;X:3[ >~E1!!K{8쵁M O2+ˀ͞"`5 z*+}5fwPFIԓSX`COYX#ڵtN5̬w5xơ_{Gf6~֢G93`I{;f:ӗv"A*T{@w37[^*vN_B-SMBS.`sO=_j"?h2;f#L':3KFciT߅y^S>0 LZUD fv^ׁTܽ/6GvK1u5-zMfqapsV\+uWڿB}# -&{:4RUǀy<ﴌ:k''ƫ<6߃:"yvX{XqlS/RK98Vw=/;PsyޥMpMN= twFwZN<|"U׉9epqUϬM֓y4Uf$cͼ]:X7|"^OڴiJcPn_I>X] 6qofk?;_Mַ P+fM:w}xm5wdf6̎ $ x~af#nC҂Y-EnIZVGI.6-<}3Vg3[pC30w | =DD yi}s?ebNdO,pB+<ߨqКnIL0pm҉PlOLϘ,nΝH*G1;PeBՌ.N"6qbf'jf~ifLTt0YݬhEanM/;`Az瓼q"w8]5Z_2!id'kS&:fHb4{ʄ !.2-FI68Ɩx a7 sK֙$WSuBW7S,xf9)f&p7Nb|_h3[;0q(z/^YD2orO^ mj=&K E-6ɉE÷DNJM"80d9r?/a>?\c]^Ԙ`k1G&Ɋm`&:d\;yh`CBkR^mFc]ߛFԳabs+?8q X`hTzu۱1IlE߫ %m RcZ+o _9d}?>B)A'cVo[j *dr,;0>s?yA?A} opVQ ޷GM6KwM!@ݨ^yQ$'QhF'>wڴi瞹zAgS_['I+YqO '\Ȫ&Y*Xɪk%6[x6˹pl'ZR40 5r)x;+Ni6YE-|b5 9Lf祉okݽu?PPu;ԢN$6:X?%k4XumDl1ذx’N6mc#N͠~\`iBX"kZAf6'aެ10OfmH<͙dw9zX 8|qxf&I^dwrO8^/wB߳}/Ӂ3<,1t0iұ Or3ؐ`fV9wh6'Ӏ/z]=-`ua&;)7)&νe=&oq/&"2@r1`V+5A5% c+"t7.3J0~-a,{'k9U5Bk&BْvhduL&K,??4wϚaYo;8,w_~*mlxmOng6gn?~%aR7k6ewLXJ,ff YkO%L6:"ҼS?ojEe/.uVh.BNos)󳜔Paf u?o~˾<%7)f?ocfqwBk5rm<&,imWld2齄y.s 7=^wow{Gϒ㠮$( V36c,E$">KXDݢJKPl!SK G.dG] 8f-ZyZkS+X'\BzV8g6$u][''׬&.W˚zfgfZ"\aWy7onͽ4ğZm1ܓѱYCgVLI)[Tf fލ.ޤycff[-q|:c2F]v }hќTMr:QOFk&6=*7K?D""vB MvslWz__vibeBqCF]MwLcO9 euk#ubh5Amf)"yUU#\֦\y; ?AO2QNR7mBq& .eҽ*5h'IfvaW"ʻ5V(A͋/ .Ywa` فZ-:C BWamM<\kl`]wW'fv'uxMc*x:[KBu$PtKws̃y]!?pA%]wtW3p^,pw+F7sJNOiLƂC63ۺED %Ce^vM0sg*+AM*AoRfV5/^M0j"yg鑼5; K϶ݏ~K7 4: [ܮn=L:qHclg`:V~Ʃ3y1-eӓg"lKX{]`&ZXYD[*>A]1uIj[ rKC0, 3[]w.-Iy Pdɻof֩scsO9Dru3FYӞH&>:=* GX&oB8l3[Հ "H/b3O.4Y!3 N1[YB R߱ԮaT1*=73ke>omҥd҆sQr|YZ5O &Ss5wyc7C9<4U 9>e@nz w/JnS&yƁ*5TdS0vYNu#kӍ95=jb>nf6dYyݡ;%/IzKJwZ4kqM;Zffs۴SҜitV:kxj b3% َ"n^33inF $لD"^M-yc/ @"TnIwy j]\MrG#;P9@2u8s IDAT\ٱ:#<fe;scm5Y7z18ԤE-O9w[\z6sؼ!9^1c$5I Ti6[QiŸM8y^k]:AMOܒTP$KĤ `+W^v$a陆Z9Wn73[NiiWZ[ifQ#I/uilt8N2w8?f~^V(w.cOtf>2of6SF11g7ٺef[vm`j̒sf8=.n~y޸F뻖s-qiwo?{ggS?͌oٗ."{Z硤HHDȒ}0d ca|{s̝;~s{~Y4MXF_Ͻ i$=8к=\J)iZowS+=>QJ_k[gf8k4i}RVZϿ3?KDvkV ] QJ٭g&?ՠ~@M*vMj\(}XD3oǤ.Sˢ"FӴE+R]V h<X3oC> Gniڷ1:rCc=9 i0D?iZwvF`,s> +ym^пs=xf1eMӾCO7p](n>qيga_vU`/Z]Ai`zOi8kig;~R #J5M{XIS`iЕ]Iǹ +޾\wAPJ4mFZ i_˼0tSOÀt{8sW`-%p4 i5^ 92z½yzlzɅ>iZs .S X@cѓziTBu]N1P7DpXۉRji-K+ojA)婞}hXB-SQJmr qvR=k02zKi?QA~C5M{}$ sj i.Jx$RE!w=n&2RR4MKLC)uPӴFpx[@_}D)yO3' %4A *~fv /ޅ*S)G >B0DGJǟ/Rj-ИH"ڈJޕJ+7&e}3ԇ:~AՑXLPJ] Aӻ"87V6J)g?:b̨pkJF{Ij (t70;NoGɌOxqqJPJA_T'08>z} u8|#Ʌc}JxWN>z R O w]_{ 9&i-񑈞CqiA)RoHFR\{%Vs袔z1r3pB@Ɵ/GW=(~s|k/G_ շ+RpXT^)Z)D}WJ G7'6#u 7izv p8yc%6CQJVJUFOD zewP;?XRKuTJ(v(;n'57 9e~ ?{PtJ+;jbsS)%.()u1&}nпi{/\m=M4k[̜$4m0KJq!ǠiZnt%aXeSJDГf䊂Kj|FOFTԌWѕ~m(k3cf[dSѓGB_Q;iEHMFS{-w84G;] XAk[X qEp tJ%)VJ.sNSݣc+}vA:-Ɵ0B1PAAR<8f) A jf  Xo A!UA,ÑS2/ B#  YxKږٝA1PAAMӴ k0A!   PӴ5M/7tF=4<A/b (k5M۬i "M4M^j`AYAA-[r[SF_=?PJ-ɨ   {:?A!{".  d5 zD)u=;# db AVJ{ qAoRU4M t r~&#RjgFSAȞ* B(nwlEX 7p}{:[_ǁcS ygm+Ip.7"NNb1 q=H2AY.MY:ţˏ8trr__kDvi#E0 0okYvXuoːcI roAlBPc ЍM/ʸ&p,ܨ`4ZH5^] qI"d'ae\KOpejz@z̲š+71S!d#1冫pnz#ұK}n'0VY>PĮBzUH3^"@5i`t0k*86O\"u8N]NU)|(a@Uϲ+NVdtO ;r؇>Qp~g"7ƋTǬ{ʐ"%ԿWsiǽvWD!^@=V.plvl[g< W!FxP WN8Pʱcs ]vlvM!rC2<ȑ<2)?DNńjΟ!Upngt BUlA& ;wnbbbȝ;iq]fl}%$$b  .ǎjn ޔO (@bށK]"222MrXtt4ׯ_Op=v?ߺc _HMMB&b#Gb:ntYRbbb%7\MvZF ŀv_'ͷ-;D!`` b;IB є)SeRlYeRtibcC{Aڵk_řcǎ򎭛c_Nꀱ+* G ɬz@x~ѢEM2*C-JDDw)));w$/2ԩSPBcU.W —iE䆐&,3QNQ(?䨨(J.Q)SE||p?q7o[+ 8.CVˀrKi̡1J)%@q[+ҙ9\rԯ_Sn]*VHҥ IJJ⯿СClݺ͛7yft؀h.EOl6A# 9Q KfS!CjժEr(RVg.^ѣGٳg!?n ,x!v.r0}b~ҙdɒW*UL2V0HNNĉ>|ؤ8ptL'ul>^? $$Uˀ\敦iRJsE2c8<[l1͛7[n#X݅@mp}@c@ktKDEEQV-YfHfWRRR8pI~\v-hzg8?1Z"7r,.r4EO0ŊsAJ( \tdn޼C{'E_eMFt! uUm6/^<Ƚ6 7oĉiQR_W[2PXBhg-f؍z]xVd!ݻא;v 1ѧ`% ]~,E<i}q#uIT6# ^z&L2 nGM˭N ˀuǝ@c@ȅt{#22MҨQ#c (]tV&'O47?rƍ@nqX>PBO xy5!EhUŋӦMnׯϝwILLVռ%))]vUҲJTW`w뫁ȌCDHۚ QF4iA*TVN\Ξ=kȏM6z@=5.Kzg1(ГE|h׮;w@M!|2=-bٲery"9=v$ŸBf4 .Fin7$,իӹsg:wL n#wfѢE,Z~-8C,`6 R6]_ Dn.9R ]y(111mۖΝ;ӱcG-Q2V\ɢEXd gΜ}Q*IWV1Oz^^ Ddbޢhn{]ҥԩ;wynùy&ׯ7#Gr.`&0=XM̦ YaZ]|kv*<<ƍFiŊ3BpI,Y¢EXjU [ O0gtu@FiZ]4EұcG:wL۶m"/b C:@R UA®$;=%XcCʕ+,_EtRΟ?WDO(7Dfd Rx±RJҸqc‚^V]&߿Exb6noDɮOѓ+YcWAtLC [ǠPxx=G"""gB/Yx1-7nUǙM@_]MuI Kl#xܹ3[r Ɇwŋ-ux}U*l^C^Bzi 64jժeF7lƙ3goYh? \lDtF LiÆ y'ҥ  "\t%KG~z_}0pƉK$Eill,{GQFO*Ν;3gӧOܹs`ozؼ;"32 MaZJ B^$׮]?fŊĽPuCbj $8C4:*> Ә0tPqΝ;2e sիޚ*z_cW}G4? *U0tPO3B|L:M6j~swMb`byo r#xhV4""Ν;3tPZlir.?dƌ>Y b04M aKJ2d> ¥K5kSNe߾}ޚ'1 \@<  pvtԉCҪULpkuVN}0 ̄S2 CӴ\pUS%KdРA<,Y2':$$$0w\N͛}5_ N*Co AD C LK5k2zhw..xBbѢE3m۶ykzxX$AB 4'Q_~RcP4Ξ=o͔)Sr劷?!jiZ TP#Gҿ"##3s-͏?ѣYnf~gq!)6d0i5P%ZiZٟx0NkԨyرc=zT44MK.lݺ rwzjZI4˱El.ȹi͵g؋MGFF2p@'|"Ʃ)RqqaN<u讥N0?4mi 6mTT7oڵkYz5M6,/*"N$TYMy"h6=#jGժUcܹرzH S!˰kԴ z'09H  W43ƷYۄa* VCC '[߈ /h|X?e˖TZhuXr%M4Ԭ5<񦃸 $ 54-ZӴOг涞fǎK !:qDk,{@AtC:+g9ص_>B4T@I*dpGӴpMƢ{n%dݻW S!hժ6l`Μ9v,`(0(A,$k' q ;ߥK˿/""k؋b\z-4M.X 1ׯf͚9AH `ԩlܸ:u5)zHƹ['@&2\Mڢ'CrsС۶mcGPTAy:vȎ;ҥXZeWC׊]|*pk EsGbbb9s&͓B$o޼L4+VPhQ&@a̲úb F*Aϔ6ްaCoOͥK(\0EB TV-(YrQO>={͛7sΜ9CDD˗7 8s Ǐ篿ܼyERX1j׮M"E2[oȑ#INNJ^67כdὕr~ xMπswq ,4s,َǏC/؝ކ~]^Xe<ȍ.3hVXmСCyw B0{l̵k׬0Up%veni$- JL\ހ@)RDXB;wT PŋM[ٲeСCվ}F)@EDD/l5aոqc6mڨŋtѕG7xCUT5vXOɓ֥Kti?PJS"*Hcll潕Lnᣏ>˨Ql/ڶmwiGYpt3Po~mmSRRԌ3Tb=+Vqض>|xP~[VwNSۈ\V*ԗ!7n9#( ~~jݺuob|Μ9Wn'?Re\u;d3nAҺFUbP3GB30 fʕAs IIIу)SعeEmS'%%1~xuFRRR@kcDzСC9uB QF *Wltazg}F:uݮDԬYZjQX17wxN=~zϮTX5kEF h+o-4~1}t'BZz5ΝKӵϟgŊz~fдiS 3g;+Wqlذ!K;ukr:09NVxS ?X-[FӦMK9-ZUVQre) @ԼN O¥M& K2X(P+VPV,=?<}FygZ*EA4RRR8y$۷ogر&j <ӧ˗~6W^M6lܸtr 6S|yxJJ Ga|>}нx o裏2d0 wу֭[?s K.eʔ)lٲ87гgO-Z׳N|Iۑu0+Uիi޼ur $>D%%pbW*-3!i$xG1cFP޼y | J/_O?ag^9.]?-[/^HϞ=ٻw/ϰ>fW^~e=z`W^&\Hq7dDNV4Mܹsr}eQ!s)^8VYf矮 >I.?0!9N22ysE8xWׯWƩSTttϘqF={U4k.]dIsJJm>}\3ɓrMLLT?{Lj#ߏ?"""L6k۔'H=kX]|=7lٲs@#ѕ.H )V?Tӻ9/w_V..iumٲqHN<ΛŋU||W׮]m^7y^|ŀq1p@;W}W=sLjwCwUbϟ/%(DwQr&V !\t@-Rsc8u,eCCӴ0tGޛ%}ٳILLM{q=u#<ˆ#}o_ܹ'Nжm[,ӦM/_>֮]O?Mxx[{+rbL2~n+Vׯ7o4ٓ+VPJ4矛_ro֭k୮QlRo3l:ț7/3gݻw7ާԩSF6l>@(PzI&͞=swߵ}oVYs 2xGѣGu'ɕ+.P@UN23ydS Pk <2xvĉC1P3g^ .;<$ؽ{^4uI[v_}{;DFe:6c gW /`'''ӿnܸz8cL2L>=M{A׮]Ç~1bWdB~+Hu}cӱ>ɵrYǫSŽqu_nv2(11(ʅsαzjtE;qi p1֯_ɓ'{~4o-E }t;W ?ey5i)389k~j:i&\TTRJ%J0]iڵݵkٳ׺ѕ+WVKHHPRVPA}gAE}1u}zLjmtolr#"TȁlgSF}AV9rqo߾J)}Lѣ]%J?0KzyDGG~8LUSկ__-Y\4qDCwsnS ^|Eez@7 M=tC8u|0$r { MzL?8㮻SN4mڔ5j}~Ն{]ysαsNcz4h >ѱcGjȒ%KL:N ~Ϟ=lu70tPn3;2díHunv.62fWKgT\-ZAݺu3Vq/=z[~ӾӧOӦMN|~ѬY3URti:t`=\(9LUv)H5UԮ]z{lyf|Aƌ㳭3l0ШK.y E dӒ|Di8|MvʕZ3߿?3mF 3f W<5Y~4n8(ύ5_zH7|c 0ѪU+dSGfMT`Sf@}=SA/H' uսGl:wLΝ]oNe7otSR}%`6lǤV}57)+WGeĉ 4oWb ^í؎8ׯ֭[c SN=I&OB"6l@߾}Mr==Lj@%>{wNJ);v,~!Fvs~78t">>gq+m a,p;UVn1ͩMv䤍Lygw4o<( 3y2uTpE^{5^{5jժEi׮78gj|hԨ~ʹ,76H=ztիתURhѠ=_c5kfP"IuIﭯζ!8pr@6m2az2;.\c;K.MӦM3ob&k*U*SҥKSF oܸaLhт޽{SNV^MK,_6yy饗V~7za$۽{7=GxСF\q_|+Э[7~W@.>x5kp t2ܰnsZ)ZݺuMЂf*7ܪ\|$CLX;v,K,1U'|b;Gr RRRxǩWkf# bĉ'O䩧28b*qhy֭[iРQbÆ nі,YҥKVZ|rqTR|7y睆L5kVP XJ(auu }UY~rIvm4Ho+fΜi1o޼|4nܘ]vO?%,;u_dI~g|IVJ˖-IIIҥKk|gv|~ƍ:uGD|gTR0j-[ǧ3n8 KM2~W8g)K"0,i^AƱ_}p4.*3S-[ƾ}j_[`$$VcغzPNSy{fY[V!Lntѹ:"IH]E ~pȮI`dիazsݱc|rʷ'\WGD̛7hm5j?`;v_իǏ7bKW\͛7 /7n۵kWA*UZj\mdH éTFZ6ʮrI,ISP>X K.qMBWtixc7Xj{6ηhт8tߥ\쯁Ӽf<1AzHN%YGe jԨ 0aGeXUVfxLJJf\ңQwu֬Y豝ʕmgT]gӋU[WT WL'R|y+V>lw"+NC90D99mlםG} tԉ>֟[ Mh׮|ߡz*TTWWڶmkW\adnR4i}˗/ѣj62$Rqxtj 'tWFVܹs&@BvԩiO?d2PA|pM~7mf:^qzBBt:aD9dYe1\΢'{0y3ʕcРA 4d~WVX+_L 6mDݍz+ׯ7ʺx31}xi:_lYӾuE5;+ puP[ ɮI`$%%q-wRȓ'@MJJb̙^˶W^O?4i'?xZ N w@YekI ctl21f3q_n9ose„ Y}+S`AӾRvb<-,^^z c̘1bG}d=t>-2$'!A}O];vqeQwSzu}RRۥ߸fOSWyo˗/3~xӱ}ڶWۙ8qb-)[1Ǐ:Aa!`J=vؠaѮ];ý͛ڨNk&+#eW\W5=jk5j0@UJѸqc:tSO=Ųek׮z8@\ Ҝ$CL3 >aҥYԝ[ &w6nf z+W{w>}7}B PJ[E79u;DN'I`끤$z}&cqӦM~^yL`ߚ1}tSɓMIIg?7{7%qZo 7|z+ks ^~3:j9籜fmLƲ?0uT̓rrJ;ýUV-Z4vRƍ̙3'ϳ;wn~NvٳiW_K0a).Gaaat߹s_/4GDDƎ~)SX']Hc=m U^xO?矛SՀ2eJ6{lN>m:vO<]5faGN'ϫ)ݻ8p`V(3 M9rشih֬۷oySׯӧz5,Yu2Vʕ+om:ӳgOdv۷ۻcǎpNXX_}]tڕܮL L6*U0binJsN瞳"8xo;vVZ/^ܹssi:Ě5kXlIEEE1{ltN5""֭[ԿAqen(Xgɒ%ٓΝ;swRxqRRR8u7of|7n3Æ {\r,_xM>ӧSn]:uD (S rĉűj*֬YcWe2sL≭[裏_o : ns4MI&iw}ϟĨɓ'ǏW-[L05`u:ydc}7 /O///Ϡ+Xi6@]P(9~sASmf@4(e (6ow0j(;www[6.\cM60*THU\YĘ?};vϿtom .p+V87` >tgV6mdz_+VPnoUUJ :}Ume=ܣ5j&ҥKn]&9(`$0,0!;z5k%r#/De'aNAnݺJ6qD}|Mϻ={ȑ6>{5ӧOlZLպukUN79)S/>>^)S6**JnZ ~ګUfƬYo}fiӦl۶6@~pk]vl޼{h˕+{fǎʕ+mٲ1cPtigKNcĉn /XӉO:,nvz~l$OqX\Ao&lʔ)ٳ/Tk.+Сmڴaʕ,]K4=ִK.tޝ-Zx4jGŊ>}:o&1 .?te˛7/*UwMN+zW^>|ٳgd>lxPBjՊvѾ}{J(3ܲ+cFrYÊ+NCOM<C6ªh;@o \\i޼9)VԎ *슽;`ذaI޿N:F\ɗӲeKƏ_mrی੧2\jժe)S~ͮ}'O}]vK(Z(e˖ܹshܸ1TZ^~Uc2_|*ի޽{5kseÆ *ԨQү_?Z?fȐ!L4+VpI|tt4*U[g㣨pCP@gADěZZbUEPDi b*V!A;rcv636|<1g7\裏R^=O^d v[ ԑ{R;Di`OaYCCչC?O.-ZĨIV0P%&&ftW'[?uTnF׿Æ  ׯg̈́ :ujP0k/^`ܹٳPѴiSJ&Md|qq1%%%$''%-EEELُR"|zLQ4D|o^ u޳: &Μ9Ç9|0Ne˖mճvZN8K/`TBv؁CpQTTĞ={8z(-[uֆjWn#q;jUA(/gP H pZ>?w}F4fjPM^2p!6?~ 5kGL"ADCeCs_?ӧOgϞV]3$e׮]9n*8 T2@~B3gPk'7Y_%H'OXY_?AQSQ8[kJEMib}p^YVV9XdIdJ,ToZ%Vvo'nҡ1ͰC䀗˜2߰}vθq8x`$*x=ztR%!-B\_>#=2@^:\k7cotޝ:K$р(|tޝ^zj4m8NьOV>'PyݻkeΝ+DÇ3e."6nhu*&y'D߼Wx =v-k:@dkt֍3f@H?>}cY͕]SQXZ AK" PfzV5!Qw|`…q9 $Pŋӯ_?Ǝ޽{.]2n6"`U b5Tҡ ,@m虋/k׮L8QSN̚5*{c}C6T%ŬV-iԹb@} ?~{w]o@U|g\z 6<˾Gmy(JuCDyojd1ҿ'Q܅6 //n^z@Uq^ˆ .CL+'8X FO/ci6Q蕘RZZʜ9sd*Qi2PPk;^Au0EջPb5 ҍXJj9^Vy)E8iA;w䗿 T%5e]СC/.;Na'ʦZi$8ғ!GFFӏ̩3:D yƎkJ" 7,ZiӦJP}~bݶ+gglN FHR7ڑ:bTgԭ[[o_dddTאHO̘1W_}*(8|'85Q7~צѪpcՅ6>HhEՐ$THƠjƝ:u⡇[nөI"CEE999Obʕv+=}є2Xעz*r(Dd$ R5gp/0-[r]w1yd:v!ݻ^{ٳgow6;;fêϩ*0 N0HpY~ hmY\r s_}D73gtRqNP9SͩTMiLNGD4hAj+`0F1azY DR[9|0s^sg;..EڮiS1E5'.UdX8yD5"r.p+p 6jBBW_}5SNemD>3fm5𑆂o%;24 LE}Pg }IJq)Hɨs^\pZҺuk&NɓIOO=wf͚A3!9:kY7D-4nZ"L1$ʵ=hW`\zL2E&$U믿f̙,\+fv+Kk 8ojk18u/g̘1=0~+Ij_z3 vnڍiX֚Tæa CP^p:ҿʢW^aVx`ǎ,Yl P@riڎZPzADZb֔v>`-5bĈdeeqWӤI|1ILSZZʕ+fɒ%-ތ:x LSM#D r%FUj nk7vMmv90 MK7eIII+$++#GҺm4Iqa˭ Q/'1mme5ڠ&NSSDċq!Ikpرc XKILtu$P@P겖GԠu(݌)k;: ǕA#\vedee1zh:t|$n(,,$''l/ IDAT-[z uںIP|"Jr[aĭRUd RE0LT#[|[ f׮]%QʶmW_}嶖TASYn:g6 A2<09OkaPkU]ѴiSꪫhذa$9s }Yž}^:e̫vu*`(U.vj¼~W'7ԯ'bAVX3god7~䡾^S79HG굪9՗) 2@lT'bj-p)jH\{.YYY5(EEE]e˖-&S3~n ՚ֺTC_2m$n\uMKT'3u%W$''s嗓ŕW^gw,}'Nr4t*jU?1+)U+S >hG}jhǛ&Zw)pRVVƍ>ڵk>0ѰQKN bNrZ$Td#x4Vf7ڒ膇fbb"=z 333Ktei&rssYf y%Udw[K E)2V45e]@`C nA X3^ڼyso92pq!rss >$vlAmu! JmkUᶶԪIoN5BA /@UN)>Hw9K.dff2`233۷lecƍnIӣ{ԐæsQ0&&ET_k*S5pa PL'#ajKC&8}#zz")) ٳgO-Bw\6mDiii(+AuӺsvM`j6 "`HhD"u&Huӟ*Xկ-7ujڅ u Xnڠic$!RPPڵk O?`bovekۚn:0صaC=Q՞@#NHH[n533>}^~ׯ2RԖ[Qԝ⃘SsU"+_ĪiO Pc BlZQ2Z_ 셇>gfRRRݻ޽֦TTTe˖Xf 6l4SHe-*֋siWaUsj6VSG5 .TF6A T3Pi9Kծ]E O[L?Y;̺a LI7ZZfզZiYWטu.j ʎDziiȠn] $6( n:͜B FZv]#kr)AVG̵  . U&l$(H?=jj/\N;aGݻ7]t!==۷qUĉٳݻw۷ogÆ n(**bϞ=AsN֯_ϱc1GLj_^sIOt)%%2@Q, vr6N9"gjNF=NN=76 IKKݻWj0b`_co n2l HY4 C-1Ui @5}:vh.,ԩSm~ݲYcǎYjݻٷo_PPkEQ#{9Ӎv[C)nv0~@6sj'ku$`ss`;4h`:֛ ~Cʬ!Gq~7Π&5 ه^[n2K%v . PcٴhZfGSo$떨\HM}>[tRSS_pWxJJ ?3O"öri?nDSQQaSji5ٌ&4l(82vJ28cK @ R %U35 USTKbGzHKK hH۶miРVXI(m?lp ؔ c?j kgMpv!bQuJtFԢˬ#uP!GCҩĹ-[֦M=CA]' I{,+#50AS'-1'ǭST!d28l 64vgAk*QUlH8}t8Ņk6᜕4ù=-"`Le#!jS*eܲr0͋YTc ɯqHqhUnI/aBtD[}Qjr%/-LW{5RKjFBd_F*iejEjFvvgY}GRp6oe5'Հ$j1Zq4} [?(~1T]9꣄hk>]˩ԭH$j" k#pkFMo{ԫ)r<}07%Rp v\X!j Σ^ r}V\v, `ejKF*iؿN~ 5g HG5plj A#"8pӼm>f%fvbh\귝 %h @R+ 4 ю4莉U{j^[^KվY{/jUKanHfaA|XU'q 4D;86km1|.> ե#沙o`2@SBhzbgS /F$ԭd:oetd:~ ʩ!DwK*]V74BtɡXzq.4J?4\vln[C}'vA%q PkU0`-NNSNŮ8愝absNMch}?4qfdjkΦ@3T=nmmжC7I(7mUHaA&c:'d 'vü]yPjI!ZKS`Z9H^C:&ڷÍiXxɉt2Hlq ιZy·zUYDevɱou.COYDR7$4U3cy2asLo0ރ`nm6 $t8A,fvr@R}mǭD0\Φ~m`tN-ZEAi $Y;©v׻Y`mu̫^[?(WJݐymQjn~ۍ/N}mmѾzPjIB <43 PnnEN j(B[=کH<)0s"GSV؝ E7ι!U+B `#`Bq-w}{hph- %J`,nn.v{>I猦y`y//JICKWs!{!EKBOJT#An9h"僄rJxj嘫H-xEjJ<6ͱ30"GyyMzuLJ# qBt6ǝO#|kâ C_DAp\'|Z" 2@DF#pݣ[a+U?{&ìn+nH$&>|Z"}J7b*[mHHO~X!FtBϣ}:"d*1 4K"^"YQ; <&"|R7$u-I-"2@$! *n* DyYGB)fZ6#h@# BN3xGgY 5RG,n:"x"ND$FALP[RLHx(\f$ʋBAGԡR/ڡ߯nhKi-5# DRf#@ :r֑kHJ&@  S 68-?/}Fnj];f;K#!BБ-h:hX? Ia*Q3Dڡ/j''-[U :'HǥhN?-pm;%e"V>~$v"юq\$"TIp! @Kb޴߆iS5 ~c*Zvp y`ǥ)5\:MבvрVR cuc/ɰ2i;yaHb:Rci֑t5B U xhrZw'wZ"q P%!a# @7bF>V@kJPKl6fAΦӶPHgSuH":PZnMjj*.ճҨ[7ZSZZʾ}ؽ{7{;VՏIDM n;a48+GS&G:2F#5ܟY^=,u$== cEQ8x𠥆ٳ|v/i d#pJwJO!VI1 8=+ܟHviKӦS!{Q[d9?ȞpqG $$ԑځ/W[C)| 1`zkP @HJJ 8VAhƍ11ɓ'm ޽{).2+%Ũ"`mWơ֣Ӓ(`$p1 N:miLOOej8*Μ9޽{mQ[l|#_umyԑZNGPkC2-ZMbuY$$$br8Çq{ȧ'$ەD>ow( Pk:Wi4j`ڭ*m޼9_~Sk~֭[ǚ5k%77CU;eRM~m֗_ZNKjhTM ë_>}a-ҥ W ظqc@Crssپ}{U{5,Bꈤ tjrk4pU胞ygIz#qOQQ}$//<X#ˀq!RGb J,TjZ&Mпw1<駟N5kXvmU dɴ!A]yPï% T&F$))޽{4dwyԩ#X. Xvܽ{w+>Gu0sv2ʃRGOҕJHH{߿?}^=9ZuQTT >?j Z&2@7QۍFm2#VFׯ!ܹaEaqh IDATZuqI*VT@`[_^ibDՑILLG@433 /:tP˚>Xd.Cu2alAHܡKn]@ergp}Lxu_;vQ?RS]H T 0J^i_ڷoȑ# ̤k׮2***زe |7,[]vyz`9a+Υ~ y[= CLבT&CT*.b233eFqrssYz5999]#Q#kPՊx V䖾VRRC +tmV2G $VX+(--èe5.NZڀԒGHwիcƌ!++~UG%5ƍfŬ_νZAa@!| SLAu(]wLKKcь3gБ+Wzu2o64K`[= u$$#TF1c1b HǏl2/^G}u {>5K`:QdZ $.\VN.@PکSj,$Zؽ{7dggrJ/o֌U;0D=~i ܆PSAVVYYYdffVW%QYt)p :èn_<>/ 5uڵkБ!CV-̙3|,^%KxV?T7j7%Ov5ԑ#Zߡgdee1rH7رc'eU|OkghJu7p0f֩S/8:쳫(̙3|dggd8QkU&>u#CMnMڸG̔]j1f͚@+< CIRN t2I 2@QSҲeKFEVVW^y&RRRg}ŋ:QU+!(#Pud _>Æ #++QFD(^ŋ+[TnLHf|>YoQ[q%]E]Ę1c3f ;wH%Ǐ?V+***n)FB7 ?f5ԑ#8FWc4`ێ.))뮻ɓ'3hРZ5鴤(W_}믿… ).vԐ<@5VN~/4 aį#='qL̔)S5jLnI_[ZkuwiU/$$C7ofb֭NFՐWP5k\H5 8 ]ױcG&M]wEVfH9r7|Yf}v7>F5 zàӮ6DVwQ ><ӱzL2ݻG|>W_}UV9]@u2K)Z`%nݺ3)S0dȐO|駼dgg;Qڅ`&pO"$ujApjLˆ#dmZP˗3sL;5/X+`A| QkLfD>}p=0~x9شifwq xu0rD:lL#.TͲZZ'Ofĉi+D3{l^u@Oլ%n^ *!8? NS|\{Kס&οC -X:d+$B Pe[.C}!Nt,ZHsy`6oM7dUkdY:fX2|uQk:%AtK X433?5kTqZh?Ν;yᇩ_եwmiHCx5|`1c ~GnJ"Jrr2{/۷o_]vVC)>YG4DY<Ï5g}>}Dh-yyy<,ZQٔf.MmքK1>& |FF'RIT? //sVA׬#eYՄZ ,Cnݚ'x'l;xD1gyn`ݨS}QGz⹅kd jZ},Y%KTuwy||Gv܆:WY:jRuҲ6:S,}>ߋi ӟĺudp*:Zjٴib`1Hf1kI"5w 1ydlԩSep**խ[2qDu8#Վ Pkw>f>ȃ>H^^F|$ >oܪW?ԌE]* > j3D糲㡇"1r$ڵ+9o-Z]u[p WiS`ժU̚5&MDK5kٳ/ٳ֜R5J:^dZC|;QGׯ_?rss>}QS37r饗.iT\ ȹRm|Pڵ Lv޾}H$QmƖ-[[EP<:VVRGL|J`\RRӦMcݺu\xᅑ/D"_|1ׯgڴiV7&dڢNdnaUj*6Hj &:AUMSL᫯wޑ/Duo[QD1 U34Υ:U[C~zFI$ayۼ[V(r@AsKΥ7Ocǎ|<#$&ILLGaժUtQtI0hu:RA"xSraÆ7.eڲe ǎg8PZnS sO P[֢,=-|^#cRNj-î]8x``gϞ4l<6Sii)k׮uLG hܸ14j(fc޽ݻ7[xon`֭6+F MK: $ݺ6H}Fܹsiڴi A6o'|ž={ɓ4oޜVZѹsgFAZZZ?o+Wr9s -[e˖уf͚9?L)))`,333򕔔~~-ҥKHϊV 0a˖- PlA([bt,ϯ9A7Pu)5ŕW^i5a^`۶mJFF@WA ?i&5"Jս?ƍ+K, ͉{V^o߾);wVL^:bMѣGO>i({IIIK*7pʿQQzPIGma¯#:"БZ%k;?Q/8s2}t%--͕GY`AHu17ҬY3ϩS2x`w!p&MB*(Ν; e/~򳢙 gQ|>op%0 tWg-@c*[nhRG,.HM9NE'н{wV\Ik`I5ӥKVX!ڇssxoZSTCq,_<TS۷ꫯ2p@nFN82t*.5[n}jl|q^{5q͛ࡇ2xcÆ /g߾}?+''s=_|G:^_^^Ί+4hr OvYw|>|I^yPOOb7gjTдjT7SA|Q4s~)ZI$Yf|' :7O%ۀCO1# ݶAPi8 Ԛ5⣏>Ϸ%~xٱc+Wr\r%ϒ8oə3gx̧zFnı< 1/(NdرcÇ5CuY4k֌O?w^JKK} >/±ʕ+(.6dl޼9k׎ w^v4g.,^X***̧7 ЭT>2ck:ug}F۶AD9sp)kkΣ>l9 Dy|'\~l޼Y )bM"HvXÆ ɉ5 kfy=3g!vZz)O^eiQN}]Μ9ÿ/)-5([ Gן5>}:{o (r?{4h@XO?5|75]$Ik.qƢ.?Yp.hGe/6=MPT% p-_hP^}՞?kɆg$&&*Wp?keRRR_#QuFu1s_M 'Xr;C9~{%99p7|cy/lvԨQJyyϚ;wnxaeԪc/e;0O }Pj۵q2Rkx]}Pϗ4ӟ9S%Hj:Etj2jmj Ig3}w}?/~QCʼn,3g$###_VV… kD&))wya>dɸZ еkZQCg֋~ƍw⋆c/~`;%%y[naɁSN1gWJBc}g쏘cdT2@^n_lذaL2#Dwqh̆@JKn~T?ֶm[^}*QIHH4/j4h{~MrP#C?0c̝;zP"?n֬twiHZ^ϧQF׊:ua4]hذ!o(pƁ:b&qZL$''LرcaժU޽cǎѬY3ڵkǠA6l9֮]˞={u=zPRRG}Ē%K8x ݺuc9+V '';vp!ZjE߾}[i׮7ٶmNZ.***hݺ5kE\]<,X5k_:9tç~Q k4F?wEEQSRR4h˗/'11ǏSVV&Y^Ƞm۶8pd~gϐxK.aҤI̚5K%jW#Q6{+0Cn\kWm0zǏWbA-..Vx ~aÆO?>}yǏ+'OT $|iӂ/~Y#11Qyg2 8q<-[ \swM=w>//Ok*媫R;WexuDPkBI@,c?2}ԸNOZZRVVVOD( ׯ_̙3 ͛=\dѣԩS5@9yd9>s9YYYѣG]5??_[Dg)ogJyy2m4%)) 4P{9}"ߞ: (Ծdqm:O~dJhB X$$$(;w #J馛 g;vݻw7={xÇ+NrNA [l͏FM\'UމyRyţ FwL4It]\w^s=g*8y$O?4 b?WSơ7Cn:畕Or]w.CuÀXt}wJVuM FdEDhH@M6CpB6m… yIMM \f.rN8_|of`^z} 4(pO?e„ ;$''GvZ?߷~ۓ[|嗌=0 E]Č3/Yz53gd̘1Nb̘11G 0|>ŮXԩS')?a_I&w`;>o1My饗l~ذaKŮKEE\,[3fpݺu+=6mȑ# Mt3<òeذao&wyռy0a}?hڴ)կ5k.&MSNz-[WϮnRRR[̇ۢb8@M%oÜ{ /ꫯ4u_Mݹ裏SH1 5N>h$s?Xב"V;\pAMƸ{ Ӈ>Ν+ ƎˤI֭[2ece@wγ>W_MII ˗/&dNJJb̙ZMү_?5j+'8vƍ Ԗԭ[_|S 0)SopRRRByy9Ǐs=RaM1p@s樂5-E;?GKjCTбcGU[XbEܩA,X@yy9 ,׿sΝnӦ Ç`Ȑ! 22L͙gg͒%Ks=,Xq͟?Au1vXCn9s޽{s3~xƍǡCXh\cǎeƌl2poCjժ3g2sf@5…^7! {p# TjHLꇆ_G:^z,Z+"0f-rrrS\pW]uC aԭ[7ҥ (//gܹ̝;2dGj|G=ZϏ,dBuD[Ⱦ.p;Z5R<x탺i&%!!!p}vS*mڴ }P%))Iٸq̙c~߾}JJJHAx?_|Uyj:r IDAT_|Qa'DqjE@_ȴ-0! ꋪD6xYR 6z!>G_2dHеJzzzLg(͛7\C]SyPׯl߾ݶ ]w឵k]3{l5#F}(ʆ  ۦM8:}T@ܹ:CUVe|HG^&PُJ9R~QG!E>2]իWM?VD+AСC?ʖ-[B7xCiԨg5mTR^~ee>C5Km샪(RQQˁ/ۀ_Α:uT_9T^"FleGLiͣ_0zSO=؟QF<Ӂ}EQ1cg_픔)+̜uY5ǪW^y%ߣG~97n8ChݰC_խ͋jbar6*\&MW_M^HKK ްaCH:m5L`fٲe9r$5 shn=Ǐ7۷/w|<ݻwoC׃8?ٶr- 8>3RXؖTHCt$#z [yѨOy֭} 4w]w͛;mHKKcȐ!\gIƒ- +=џsv$qFC@mgLg}n߾=wq.:tꪫl'ǎK֭mPyG&6nȁ#Gg/bԨQ0F4FG"o6 n AL]0m,~b׮]\fϞMNN7oոqc}=\A]}}&']17;vׯ_{8g}ְFK ke.--t6VkIa\uD[b^CL|揘IMMeڴi۷>+0Œ6l` 2;v:0gwnF)***Xbf„ &''$%5(qn5 Pvb zr\%&&һw8\ʹ5N۶m]]nylHF1 50hoԭ[oիW~;w6 6o3N0zb(_L 4`ر >޶mku4 Ҥ$[-EQ a)J &ґxw,r8&NԫW'|?N?y[|́c̟?$ޱx/!sFKAO" PzM#4iİyz/'"5^y١)h47 \ch Z{g[7馛޽;͛7/9<0W:ss9琚[RQh"T2z$IDH$E*Loy8U>0a|(,Vk|̿EX<l#A+Oe!@`ƍ|5TK/K.}ӡnov=[M`־hiU}ь`:2X`Zg9`j㒂lѣGY~=!='))0ڷֿ[#%%۷?RXXիC.sǎ %qٳEk:|Ijb0GGD-ZnnԺԭ[J0ImDQW~&y$3摉:y$yQ$yEsD*Ho|^f>p}dK.m۶hn :EK dU>C͇DMv 5>@ OG4d{b}Ea6o>}ڐȎ7Zعs'ӦM3> |gqUMb+4ڑoì(ξ}bEpEpDŸ#F FE}ĠqIxiD0nQ#FԠ((qayAePEfe?zLs湯 -<3ox iMb;k%K1s$riiӦx≉iӦEr<{H k.nݺ ^ӿb'Oge˖ziEqśoɽkfW8b(n]Sq^xa7o^#sG)j d(r!S~Hu]IALhiyf.R۩`mTI3}\|y*N0q)M6'uѣG~ߥ,:uj%dFe?%h5|$YZk.1z("oN;3R.,޹s'wyg555z뭑HO? s]wUm߾=gyfsв[tvAuuu:.f#yFirMkq&L(Csꩧ&OδiL64i4I[>fľKJn{kq˖-2dHVʗOƎ[ov{^(FY/ܹs~\'L߾}K.,t͛7'}lW=:&9s&_~y/K,nHI& {<7Bn55ŴKl6 >y8@kRVVVгOS?ŋ袋|3rH>DI'ToT:thRkc=fk~߰dɒHNc_g|L0!soᆤn-p&-[0xWW^yesŎ;8s/S_J?GT'f*9 Euu5#F-Z|yy7oXdIλ_~I4y?cժU 2$Ik*ڴi̮=':z37ٳg8;LO_uv?ؚZSS?=ؤ wmzz:ӧ:۷oOLZy̘1I d^zn8:R9Ne@죁_[c]NxꩧO$3&6vsαB;i8-~HӨ 3,rG/SN9kTƍgqF7o?=ztAvx'ZX]]_1c6l`׮],^￟޽{' !۷oA:&Lozp'i'QoƎ;8묳׿"eN;oIy_5^z):t`РA\s5{/7x##F`sM%K.K\s5ZW-[Сguo뮻o~3ڷoϸq?0`@}«ʈ#!e!Ӓc5CŁr p=0{rΜ9s9Ě ~_+н{zL-mݸx7رcUq7vz-0ü o $Mm6N?t^|ż,7KrYg?> RMr`kO+]B`ڴi 2 V[ݭ_ÜS?ۺ*-)"-=h1_O><+A-ܒ8aڵ+swa曬[-[PZZJΝׯGf#FХKsؿW^,X3fO_Ӿ}{zE]DYYYg*=z4O>$gfժUTTTбcG/K 8_r%O?+† hժ:uJZkON56a3sLfϞ{W_}Eee%:us < . i+?.26l[8ʜ 㗿%Q!~:b:ANjCtAoY f?C妛nn7u1p2{lfϞg}ƦMشi>ҽ{wA;fvz 6H8&`Ya܌aIGKL# ӧss=Nt靁 1e3+z-tVSW&x78#;i5nu+2Gѿ $&pZ,ߺ1? m@16?`C/O>QF%-}"ƍ Oիm6:;wR 15aǸ`޴a;< ʕ+sX\AϪU6lÇg͚5KtYP{ֵR*sZL9pM'QYY7QG믿˲۷s&Z8R AsuѧOĚ8c&|~ΩiL[M侣!;f_Ŷ'Oa?!555<t-h9plojSCTy j,A.a.Y|̙C=?~<Aqq̜98QJs l;,iwo1TQ^_|r /~sr73j(;0M8?nܸ'/u` n 64ԏ )8AN}p No|-^z)w -)os3vX1v^¾zcj`P0rL%|&9ihFD'-p4l\v=m۶\s5\})^+G&IS^^wͦM. j\x JD;1[X4 `$Z's/Ҽys.B~qA.i^5 0z#IRZ<zeë#n-7RV7oȑ#+۷oV~C?>W2jڶm ╸ IDATmۘ2e sK. t/:^Cҫ#{a@]׾ #ęH$N;4*N?tN/df͚}6Y5MC\{1$fWߠ ZGR!TqPHH`iuRJ θ ΂Ceر 8 BL4^xo\v j?BfT9-Z|u$(uR]-9c91cpy祵 xYl<?x\ 5'k8.ֿ tPMԶiu 9.zA__LK7|ã>wYyёarsnaiH\A-PZ?I5`/ÁAڵ+cƌᢋ.V!%nOSSS~"41k5 ZЬ NhRF%t>nS!@|=Є_L7&[s΍{'NZi%۴9uuVG@_<ӗޛ!CPVVƩJցv n͚5o&mX3*Oґ ٵ sN)J-8NNQ[D ׯuKZ`S2IB`)IKio0[Nf ZQ8.!:b.Ht=P:t!C>|˗'twߍx715lj s-OQjI`M-c#JYZjũJYYC G?Q?Oh lܸYf1}8-pf'Xh:b.m4(hE#Q1*vRdzޭ9?NQÆ cРAbdUUU̘13gW_źǐ|haء)we%5HGv8ȖMMM n) YԤ'ml~Ωc4(sI9ZSȃ؛4i '@YYÆ [nDY|93f`;q[kp%$/%7sjwQBGPChTVֽ[)ޮ|>8]n"d-[2p@  '. /_λ̙33g۶m8]fᬃ6?+AiBc3*mNwV2褚3 6=jI9C \FO~zI˖-SB^Xv-/fbƍqnqF8[=~:bӐ\6mk'{&:b:dہq83wSnݺ%Ӈvڥ;w߱hѢDݘ- 8:Pґ(F#aXԖZٮr^ ɺMk}K PDpRɓ6c= 'y8N gȴnݚ>}&]ʸAkʕ+Ypabٺukl!dӰAF͘ s =2zex^~¸`W/i֬Gy$}IQGE-l!Klذ!IG.\!Jpc2AitD\":~sS3L=npze zu}nd9В>}z 9~`ѢE,\0>r..Q GIZM9 A)5u=44&haO_BLҜetNJĩztiӦM=CWsd@~ak G;>cjxqi3Ww?}? z% ТE zА>}pGҼyBlܸ1ɀ\p!ׯOq8*  &z{g #a6688몖Pnt钤#{f#rR> IGVXAvr nyawzXIjF]oJuvPxZ_KY)*[+Y){TCHts=ݻwz8Xzuh"tqv}k3a8AʠJ h8t0o8C zHY={&HeA|ZF׮]~c0lZbL-LGZK J۸u1*ݻOGQug^8C RnmҤ ݺuKґ^ziPQQŋtdqǏlqFzny5F#8`Yoy(Pؤ;ҽ9vP3L n-ђo=kR pM) }!:b$ޡHVqRzrEZQhڴ)~8xUK:vب{oܹuӐ5kְj*.]JuuuGC-SG_Czptd A(v'y5Z"ݛc5Ot`\FuTmFͰln}Vf>vW{>cn* 3P#UUUVӛW^^Vll_?ӝ.1Kh~x2p#~MGl{ӰnP#qlI-ZcǎlNh۶m&_3v}Yk׮e͙~q@:%a̽C15g#aokxѻfH:է8Ó:{_^4iBiiU?~ Zq@ꫯm G;Q%;t@1ʹH+R8\itoc״sV%qifckvE-mNjS^3 (%N^{ENiӦ [UVn:iײez֚****"iW^^@F\`:j;;?͒I!TB]&ԯ;T!Dd`imȀ-iiud?R{6m$ Ͷmۦ!nm;vӅ8n_}k֬aÆ 0M#ـ܀~TI[oh@*:eёb"$=8iI;q쓬<-))I:t萒~͚5]vM6f֭[6jVuuʄËl.ё(ř>g(pRh1+!Az7iVXle˖J*++Sԟ+jp WDsFݽA4#Ș 'fkMgׯC*HQGlKش$9^-qm$H,%%% Ց u&[Ӑu8h|(ꈭׅ_ݛw2DG#QZTHMgNk'-)nX͛7u֔PUUEee%;w 1T#sƸ-Ri-͆Dm1-R'-O"ݛcu,.+]rVR ~l!~L3ϸ4kr1.;8pY[ GZ4+j4g*[Kkqk=΁Tw#Ղ0c2JlQ5߯j݇VWGy~P]iΨMG0g415q첤t$1)e#A-IϽe連#qsd 8^ഀ:-e_舻Ҕ(ˣ$ZuDKPJ ^dZroe=XKJZ= ?Q1>~MКzod* '5Js'NSgԯZT36z=) {۾r_&R[gPUAƥ_2HEat ؛!^;58Lt8nm-[AJu@6;:; pٌJ?IkY$="YKܴWKP_CtĦ!~Hk`ZL$ o@ܴEDZWL/=bT+= m:59mi3@n#]lo/& Ӓ(vHVdDKRtP?zc>T{>W`HQV jȌz2*(-M ɕ j̆1R q0rHRrL -إkH-is]T3N+LC.kItěӒLh1甊Ab C2U3gB܀y&^a:tT$,ejzj6!M)#iҠZP24Hb:xf`:q ͠V4)U 6畀 c2jgP*S*挆~IL 6 18N5~UCEG8Z)bT5$ݠ)hIHQ%~dw9 cm -#GCDG@4;$MQmZb#$Τ)av{ #ql# Z#Ĺ9_ͧA8qP3Jg{gdz~j c2 2BR9c09~e>3dH@fdGcST ˨:Ƥґ(tTG5 h# ZЉay~]iI=ɔ&Qf:aöAZQK5˗pdpbD1HIYuQ4$ -9cc;#tȗl~ёB)U XlA"jadtͨFhH*o//n{MUh)@#E#3cӛqXQz_bPo^!q"NhX#4tѨI&$]bَ AGjFԡBA"6Ib%/zRj=_m~JZ5(Oc%>E^V6bmD0g/@G`IALZRھq8Š 36(Յ8Vt]P!6=?=a\g+G*:b;GG̼8:t.uy2?G"i?ӊz? jhdDݨ2W1hQ(qAzH% LŽ{[>G6\4:M9qt"sf^Xt#B|&Kl8(:MGo:[H'T҆ɏ :Lfh5:5m;#nMGǹ6hם @HFy.afʰ4lIPv\wBtDH/MEGlyjFl7W3ё"G)p'x/s頾s$ozRj н6k` *06QAx9aǙrDaX58S=*sR_3)HGŸ́v4ts.껒O@ ќVȞ +-ɔY-)22Cق N{G j$EcqM5 ǕAjX_h$ё8/ I YG }-QJLd>.gUkR sR $L"F9mD?HUܔ#/߁HEG!@tO2#qgfS}P:hIMܚ"d9\^FtDtD%we- {O7S y :\+Z-y*PЕ'%Dxɕh +#BדuTTJY_TB1""-B.#B1Qp~(g6 $'oetIAAA `r:,IۥP+   &WǏky`A8Z 1#l*AAA ԡ$O> Z&TSm   ER:ӏs_4efPJ<^.Zo}AAArRj qC`h&DQT-.Z8oSRT   C)J)u0 BrNHT|x`qj_%ߕRd   E) P{3r^AZ^ RnK%  R͔R7=8\k}ֺ& TtAqMJ}r^8AAA ,n}\q(IPJu&YNAAAl(zN&ZiRQ9K4\ZWl   QQJun.;qV1YӂAsP]Rp&R xRk]   .ZZ. aҦ:.J?\xx@kI &  PRp6c7ѫYw68 j3x`KfiEAAAh<`__k`YBTJ6G86J˪   ij gCnpl-jA);@@^ƙmAAA(p0pd8R/;IZ뷲Z#jJqgc*޳   E e 8!=h_hhi qP# ~LoAAA27K 8 z)LqPS@)'0_@HAA"b10 gΛ֍ץ8ijiwgAAA(xjϩ)z]^KGAJ}q8A}zm&*   Yc;usl1ҫqҥZʼȴ@yIENDB`nipype-0.9.2/doc/images/nipype_architecture_overview2.svg000066400000000000000000003472351227300005300236450ustar00rootroot00000000000000 image/svg+xml (Command-line programs) (Matlab functions) (Command-line programs) SPM Interface Uniform Python API FSL Interface FreeSurfer Interface Interfaces Idiosynchratic, Heterogeneous APIs SPM FSL FreeSurfer Workflow Engine Workflow - inputs/outputs setting- graph transformations (e.g., iterable expansion) .run() Workflow (Map)Node Interface MultiProc Linear S/OGE IPython Torque SSH Execution Plugins nipype-0.9.2/doc/index.rst000066400000000000000000000040311227300005300154220ustar00rootroot00000000000000.. list-table:: * - .. image:: images/nipype_architecture_overview2.png :width: 100 % - .. container:: Current neuroimaging software offer users an incredible opportunity to analyze data using a variety of different algorithms. However, this has resulted in a heterogeneous collection of specialized applications without transparent interoperability or a uniform operating interface. *Nipype*, an open-source, community-developed initiative under the umbrella of NiPy_, is a Python project that provides a uniform interface to existing neuroimaging software and facilitates interaction between these packages within a single workflow. Nipype provides an environment that encourages interactive exploration of algorithms from different packages (e.g., SPM_, FSL_, FreeSurfer_, Camino_, MRtrix_, MNE_, AFNI_, Slicer_), eases the design of workflows within and between packages, and reduces the learning curve necessary to use different packages. Nipype is creating a collaborative platform for neuroimaging software development in a high-level language and addressing limitations of existing pipeline systems. *Nipype* allows you to: * easily interact with tools from different software packages * combine processing steps from different software packages * develop new workflows faster by reusing common steps from old ones * process data faster by running it in parallel on many cores/machines * make your research easily reproducible * share your processing workflows with the community .. admonition:: Reference Gorgolewski K, Burns CD, Madison C, Clark D, Halchenko YO, Waskom ML, Ghosh SS. (2011). Nipype: a flexible, lightweight and extensible neuroimaging data processing framework in Python. Front. Neuroinform. 5:13. `Download`__ __ paper_ .. tip:: To get started, click Quickstart above. The Links box on the right is available on any page of this website. .. include:: links_names.txt nipype-0.9.2/doc/interfaces/000077500000000000000000000000001227300005300157065ustar00rootroot00000000000000nipype-0.9.2/doc/interfaces/.gitignore000066400000000000000000000000131227300005300176700ustar00rootroot00000000000000/generated nipype-0.9.2/doc/interfaces/index.rst000066400000000000000000000002501227300005300175440ustar00rootroot00000000000000.. _interface-index: ######################### Interfaces and Algorithms ######################### :Release: |version| :Date: |today| .. include:: generated/gen.rst nipype-0.9.2/doc/links_names.txt000066400000000000000000000117741227300005300166410ustar00rootroot00000000000000.. This (-*- rst -*-) format file contains commonly used link targets and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible candidates for this list. .. NOTE: reST targets are __not_case_sensitive__, so only one target definition is needed for nipy, NIPY, Nipy, etc... .. _nipy: http://nipy.org .. _`NIPY developer resources`: http://nipy.org/devel .. _`Brain Imaging Center`: http://bic.berkeley.edu/ .. _nitime: http://nipy.org/nitime/ .. _nibabel: http://nipy.org/nibabel/ .. _nipype: http://nipy.org/nipype/ .. _ConnectomeViewer: http://www.connectomeviewer.org/viewer/ .. _NeuroDebian: http://neuro.debian.net/ .. Documentation tools .. _graphviz: http://www.graphviz.org/ .. _Sphinx: http://sphinx.pocoo.org/ .. _`Sphinx reST`: http://sphinx.pocoo.org/rest.html .. _reST: http://docutils.sourceforge.net/rst.html .. _docutils: http://docutils.sourceforge.net .. Licenses .. _GPL: http://www.gnu.org/licenses/gpl.html .. _BSD: http://www.opensource.org/licenses/bsd-license.php .. _LGPL: http://www.gnu.org/copyleft/lesser.html .. Working process .. _pynifti: http://niftilib.sourceforge.net/pynifti/ .. _nifticlibs: http://nifti.nimh.nih.gov .. _nifti: http://nifti.nimh.nih.gov .. _`nipy sourceforge`: http://nipy.sourceforge.net/ .. _sourceforge: http://nipy.sourceforge.net/ .. _`nipy launchpad`: https://launchpad.net/nipy .. _launchpad: https://launchpad.net/ .. _`nipy trunk`: https://code.launchpad.net/~nipy-developers/nipy/trunk .. _`nipy mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel .. _`nipy bugs`: https://bugs.launchpad.net/nipy .. Code support stuff .. _pychecker: http://pychecker.sourceforge.net/ .. _pylint: http://www.logilab.org/project/pylint .. _pyflakes: http://divmod.org/trac/wiki/DivmodPyflakes .. _virtualenv: http://pypi.python.org/pypi/virtualenv .. _git: http://git.or.cz/ .. _flymake: http://flymake.sourceforge.net/ .. _rope: http://rope.sourceforge.net/ .. _pymacs: http://pymacs.progiciels-bpi.ca/pymacs.html .. _ropemacs: http://rope.sourceforge.net/ropemacs.html .. _ECB: http://ecb.sourceforge.net/ .. _emacs_python_mode: http://www.emacswiki.org/cgi-bin/wiki/PythonMode .. _doctest-mode: http://www.cis.upenn.edu/~edloper/projects/doctestmode/ .. _bazaar: http://bazaar-vcs.org/ .. _subversion: http://subversion.tigris.org/ .. _nose: http://somethingaboutorange.com/mrl/projects/nose .. _`python coverage tester`: http://nedbatchelder.com/code/modules/coverage.html .. Other python projects .. _numpy: http://www.scipy.org/NumPy .. _scipy: http://www.scipy.org .. _ipython: http://ipython.scipy.org .. _`ipython manual`: http://ipython.scipy.org/doc/manual/html .. _matplotlib: http://matplotlib.sourceforge.net .. _ETS: http://code.enthought.com/projects/tool-suite.php .. _`Enthought Tool Suite`: http://code.enthought.com/projects/tool-suite.php .. _python: http://www.python.org .. _mayavi: http://mayavi.sourceforge.net/ .. _sympy: http://code.google.com/p/sympy/ .. _networkx: http://networkx.lanl.gov/ .. _pythonxy: http://www.pythonxy.com/ .. _EPD: http://www.enthought.com/products/epd.php .. _Traits: http://code.enthought.com/projects/traits/ .. _Anaconda: https://store.continuum.io/cshop/anaconda/ .. _Canopy: https://www.enthought.com/products/canopy/ .. Python imaging projects .. _PyMVPA: http://www.pymvpa.org .. _BrainVISA: http://brainvisa.info .. _anatomist: http://brainvisa.info .. Not so python imaging projects .. _matlab: http://www.mathworks.com .. _spm: http://www.fil.ion.ucl.ac.uk/spm .. _eeglab: http://sccn.ucsd.edu/eeglab .. _AFNI: http://afni.nimh.nih.gov/afni .. _FSL: http://www.fmrib.ox.ac.uk/fsl .. _FreeSurfer: http://surfer.nmr.mgh.harvard.edu .. _voxbo: http://www.voxbo.org .. _Slicer: http://slicer.org .. _Camino: http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php .. _Camino2Trackvis: http://camino-trackvis.sourceforge.net/ .. _MRtrix: http://www.brain.org.au/software/mrtrix/index.html .. _MNE: https://martinos.org/mne/index.html .. General software .. _gcc: http://gcc.gnu.org .. _xcode: http://developer.apple.com/TOOLS/xcode .. _mingw: http://www.mingw.org .. _macports: http://www.macports.org/ .. _Vagrant: http://www.vagrantup.com/ .. _Docker: http://www.docker.io/ .. _Virtualbox: https://www.virtualbox.org/ .. Functional imaging labs .. _`functional imaging laboratory`: http://www.fil.ion.ucl.ac.uk .. _FMRIB: http://www.fmrib.ox.ac.uk .. Other organizations .. _enthought: http://www.enthought.com .. _kitware: http://www.kitware.com .. General information links .. _`wikipedia FMRI`: http://en.wikipedia.org/wiki/Functional_magnetic_resonance_imaging .. _`wikipedia PET`: http://en.wikipedia.org/wiki/Positron_emission_tomography .. Mathematical methods .. _`wikipedia ICA`: http://en.wikipedia.org/wiki/Independent_component_analysis .. _`wikipedia PCA`: http://en.wikipedia.org/wiki/Principal_component_analysis .. Nipype Paper .. _paper: http://www.frontiersin.org/Neuroinformatics/10.3389/fninf.2011.00013/abstract nipype-0.9.2/doc/make.bat000066400000000000000000000056171227300005300152010ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation set SPHINXBUILD=sphinx-build set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (_build\*) do rmdir /q /s %%i del /q /s _build\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html echo. echo.Build finished. The HTML pages are in _build/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml echo. echo.Build finished. The HTML pages are in _build/dirhtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in _build/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% _build/qthelp echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in _build/qthelp, like this: echo.^> qcollectiongenerator _build\qthelp\nipype.qhcp echo.To view the help file: echo.^> assistant -collectionFile _build\qthelp\nipype.ghc goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex echo. echo.Build finished; the LaTeX files are in _build/latex. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes echo. echo.The overview file is in _build/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck echo. echo.Link check complete; look for any errors in the above output ^ or in _build/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest echo. echo.Testing of doctests in the sources finished, look at the ^ results in _build/doctest/output.txt. goto end ) :end nipype-0.9.2/doc/quickstart.rst000066400000000000000000000017231227300005300165120ustar00rootroot00000000000000.. _quickstart: ========== Quickstart ========== Downloading and installing ========================== .. toctree:: :maxdepth: 1 users/install users/vagrant Beginner's guide ================ Beginner's tutorials (IPython Notebooks). `Availible here`__ Michael Notter's Nipype guide. `Available here`__ Dissecting Nipype Workflows. `Available here`__ Introductory slides [older]. `Available here`__ __ https://github.com/mwaskom/nipype_concepts __ http://miykael.github.com/nipype-beginner-s-guide/index.html __ http://slideviewer.herokuapp.com/url/raw.github.com/nipy/nipype/master/examples/nipype_tutorial.ipynb?theme=sky __ http://satra.github.com/intro2nipype User guides =========== .. toctree:: :maxdepth: 1 users/interface_tutorial users/pipeline_tutorial users/plugins users/debug Developer guides ================ .. toctree:: :maxdepth: 1 devel/writing_custom_interfaces devel/gitwash/index .. include:: links_names.txt nipype-0.9.2/doc/searchresults.rst000066400000000000000000000125201227300005300172040ustar00rootroot00000000000000.. This displays the search results from the Google Custom Search engine. Don't link to it directly. Search results ============== .. raw:: html

Loading
nipype-0.9.2/doc/sphinxext/000077500000000000000000000000001227300005300156155ustar00rootroot00000000000000nipype-0.9.2/doc/sphinxext/README.txt000066400000000000000000000007551227300005300173220ustar00rootroot00000000000000=================== Sphinx Extensions =================== We've copied these sphinx extensions over from nipy-core. Any edits should be done upstream in nipy-core, not here in nipype! These a are a few sphinx extensions we are using to build the nipy documentation. In this file we list where they each come from, since we intend to always push back upstream any modifications or improvements we make to them. * From numpy: * numpy_ext * From ipython * ipython_console_highlighting nipype-0.9.2/doc/sphinxext/autosummary_generate.py000077500000000000000000000166231227300005300224420ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: r""" autosummary_generate.py OPTIONS FILES Generate automatic RST source files for items referred to in autosummary:: directives. Each generated RST file contains a single auto*:: directive which extracts the docstring of the referred item. Example Makefile rule:: generate: ./ext/autosummary_generate.py -o source/generated source/*.rst """ import glob, re, inspect, os, optparse, pydoc from autosummary import import_by_name try: from phantom_import import import_phantom_module except ImportError: import_phantom_module = lambda x: x def main(): p = optparse.OptionParser(__doc__.strip()) p.add_option("-p", "--phantom", action="store", type="string", dest="phantom", default=None, help="Phantom import modules from a file") p.add_option("-o", "--output-dir", action="store", type="string", dest="output_dir", default=None, help=("Write all output files to the given directory (instead " "of writing them as specified in the autosummary:: " "directives)")) options, args = p.parse_args() if len(args) == 0: p.error("wrong number of arguments") if options.phantom and os.path.isfile(options.phantom): import_phantom_module(options.phantom) # read names = {} for name, loc in get_documented(args).items(): for (filename, sec_title, keyword, toctree) in loc: if toctree is not None: path = os.path.join(os.path.dirname(filename), toctree) names[name] = os.path.abspath(path) # write for name, path in sorted(names.items()): if options.output_dir is not None: path = options.output_dir if not os.path.isdir(path): os.makedirs(path) try: obj, name = import_by_name(name) except ImportError, e: print "Failed to import '%s': %s" % (name, e) continue fn = os.path.join(path, '%s.rst' % name) if os.path.exists(fn): # skip continue f = open(fn, 'w') try: f.write('%s\n%s\n\n' % (name, '='*len(name))) if inspect.isclass(obj): if issubclass(obj, Exception): f.write(format_modulemember(name, 'autoexception')) else: f.write(format_modulemember(name, 'autoclass')) elif inspect.ismodule(obj): f.write(format_modulemember(name, 'automodule')) elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): f.write(format_classmember(name, 'automethod')) elif callable(obj): f.write(format_modulemember(name, 'autofunction')) elif hasattr(obj, '__get__'): f.write(format_classmember(name, 'autoattribute')) else: f.write(format_modulemember(name, 'autofunction')) finally: f.close() def format_modulemember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-1]), parts[-1] return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) def format_classmember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) def get_documented(filenames): """ Find out what items are documented in source/*.rst See `get_documented_in_lines`. """ documented = {} for filename in filenames: f = open(filename, 'r') lines = f.read().splitlines() documented.update(get_documented_in_lines(lines, filename=filename)) f.close() return documented def get_documented_in_docstring(name, module=None, filename=None): """ Find out what items are documented in the given object's docstring. See `get_documented_in_lines`. """ try: obj, real_name = import_by_name(name) lines = pydoc.getdoc(obj).splitlines() return get_documented_in_lines(lines, module=name, filename=filename) except AttributeError: pass except ImportError, e: print "Failed to import '%s': %s" % (name, e) return {} def get_documented_in_lines(lines, module=None, filename=None): """ Find out what items are documented in the given lines Returns ------- documented : dict of list of (filename, title, keyword, toctree) Dictionary whose keys are documented names of objects. The value is a list of locations where the object was documented. Each location is a tuple of filename, the current section title, the name of the directive, and the value of the :toctree: argument (if present) of the directive. """ title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?') toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') documented = {} current_title = [] last_line = None toctree = None current_module = module in_autosummary = False for line in lines: try: if in_autosummary: m = toctree_arg_re.match(line) if m: toctree = m.group(1) continue if line.strip().startswith(':'): continue # skip options m = autosummary_item_re.match(line) if m: name = m.group(1).strip() if current_module and not name.startswith(current_module + '.'): name = "%s.%s" % (current_module, name) documented.setdefault(name, []).append( (filename, current_title, 'autosummary', toctree)) continue if line.strip() == '': continue in_autosummary = False m = autosummary_re.match(line) if m: in_autosummary = True continue m = autodoc_re.search(line) if m: name = m.group(2).strip() if m.group(1) == "module": current_module = name documented.update(get_documented_in_docstring( name, filename=filename)) elif current_module and not name.startswith(current_module+'.'): name = "%s.%s" % (current_module, name) documented.setdefault(name, []).append( (filename, current_title, "auto" + m.group(1), None)) continue m = title_underline_re.match(line) if m and last_line: current_title = last_line.strip() continue m = module_re.match(line) if m: current_module = m.group(2) continue finally: last_line = line return documented if __name__ == "__main__": main() nipype-0.9.2/doc/sphinxext/ipython_console_highlighting.py000066400000000000000000000067321227300005300241400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """reST directive for syntax-highlighting ipython interactive sessions. """ #----------------------------------------------------------------------------- # Needed modules # Standard library import re # Third party from pygments.lexer import Lexer, do_insertions from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer, PythonTracebackLexer) from pygments.token import Comment, Generic from sphinx import highlighting #----------------------------------------------------------------------------- # Global constants line_re = re.compile('.*?\n') #----------------------------------------------------------------------------- # Code begins - classes and functions class IPythonConsoleLexer(Lexer): """ For IPython console output or doctests, such as: .. sourcecode:: ipython In [1]: a = 'foo' In [2]: a Out[2]: 'foo' In [3]: print a foo In [4]: 1 / 0 Notes: - Tracebacks are not currently supported. - It assumes the default IPython prompts, not customized ones. """ name = 'IPython console session' aliases = ['ipython'] mimetypes = ['text/x-ipython-console'] input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)") output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)") continue_prompt = re.compile(" \.\.\.+:") tb_start = re.compile("\-+") def get_tokens_unprocessed(self, text): pylexer = PythonLexer(**self.options) tblexer = PythonTracebackLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() input_prompt = self.input_prompt.match(line) continue_prompt = self.continue_prompt.match(line.rstrip()) output_prompt = self.output_prompt.match(line) if line.startswith("#"): insertions.append((len(curcode), [(0, Comment, line)])) elif input_prompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, input_prompt.group())])) curcode += line[input_prompt.end():] elif continue_prompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, continue_prompt.group())])) curcode += line[continue_prompt.end():] elif output_prompt is not None: insertions.append((len(curcode), [(0, Generic.Output, output_prompt.group())])) curcode += line[output_prompt.end():] else: if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] yield match.start(), Generic.Output, line if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item #----------------------------------------------------------------------------- # Register the extension as a valid pygments lexer highlighting.lexers['ipython'] = IPythonConsoleLexer() nipype-0.9.2/doc/sphinxext/numpy_ext/000077500000000000000000000000001227300005300176455ustar00rootroot00000000000000nipype-0.9.2/doc/sphinxext/numpy_ext/__init__.py000066400000000000000000000000001227300005300217440ustar00rootroot00000000000000nipype-0.9.2/doc/sphinxext/numpy_ext/docscrape.py000066400000000000000000000361011227300005300221630ustar00rootroot00000000000000"""Extract reference documentation from the NumPy source tree. """ import inspect import textwrap import re import pydoc from StringIO import StringIO from warnings import warn class Reader(object): """A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data,list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self,n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class NumpyDocString(object): def __init__(self, docstring, config={}): docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [], 'References': '', 'Examples': '', 'index': {} } self._parse() def __getitem__(self,key): return self._parsed_data[key] def __setitem__(self,key,val): if not self._parsed_data.has_key(key): warn("Unknown section %s" % key) else: self._parsed_data[key] = val def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self,doc): i = 0 j = 0 for i,line in enumerate(doc): if line.strip(): break for j,line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self,content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name,arg_type,desc)) return params _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ValueError("%s is not a item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): if func.strip(): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): self['Summary'] = self._doc.read_to_next_empty_line() else: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() for (section,content) in self._read_sections(): if not section.startswith('..'): section = ' '.join([s.capitalize() for s in section.split(' ')]) if section in ('Parameters', 'Returns', 'Raises', 'Warns', 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*','\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param,param_type,desc in self[name]: out += ['%s : %s' % (param, param_type)] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes','References','Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) def indent(str,indent=4): indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") doc = inspect.getdoc(func) or '' NumpyDocString.__init__(self, doc) if not self['Signature'] and func is not None: func, func_name = self.get_func() try: # try to read signature argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) argspec = argspec.replace('*','\*') signature = '%s%s' % (func_name, argspec) except TypeError, e: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if not roles.has_key(self._role): print "Warning: invalid role %s" % self._role out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): extra_public_methods = ['__call__'] def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename if doc is None: if cls is None: raise ValueError("No class or documentation string given") doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) if config.get('show_class_members', True): if not self['Methods']: self['Methods'] = [(name, '', '') for name in sorted(self.methods)] if not self['Attributes']: self['Attributes'] = [(name, '', '') for name in sorted(self.properties)] @property def methods(self): if self._cls is None: return [] return [name for name,func in inspect.getmembers(self._cls) if ((not name.startswith('_') or name in self.extra_public_methods) and callable(func))] @property def properties(self): if self._cls is None: return [] return [name for name,func in inspect.getmembers(self._cls) if not name.startswith('_') and func is None] nipype-0.9.2/doc/sphinxext/numpy_ext/docscrape_sphinx.py000066400000000000000000000171171227300005300235620ustar00rootroot00000000000000import re, inspect, textwrap, pydoc import sphinx from docscrape import NumpyDocString, FunctionDoc, ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config={}): self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param,param_type,desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc,8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: out += ['.. autosummary::', ' :toctree:', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": out += ['.. only:: latex',''] else: out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out,indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config={}): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config) nipype-0.9.2/doc/sphinxext/numpy_ext/numpydoc.py000066400000000000000000000130611227300005300220560ustar00rootroot00000000000000""" ======== numpydoc ======== Sphinx extension that handles docstrings in the Numpy standard format. [1] It will: - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. - Extract the signature from the docstring, if it can't be determined otherwise. .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ import sphinx if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") import os, re, pydoc from docscrape_sphinx import get_doc_object, SphinxDocString from sphinx.util.compat import Directive import inspect def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict(use_plots=app.config.numpydoc_use_plots, show_class_members=app.config.numpydoc_show_class_members) if what == 'module': # Strip top title title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', re.I|re.S) lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") else: doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) lines[:] = unicode(doc).split(u"\n") if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [u'', u'.. only:: html', ''] lines += [u' %s' % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) if m: references.append(m.group(1)) # start renaming from the longest string, to avoid overwriting parts references.sort(key=lambda x: -len(x)) if references: for i, line in enumerate(lines): for r in references: if re.match(ur'^\d+$', r): new_r = u"R%d" % (reference_offset[0] + int(r)) else: new_r = u"%s%d" % (r, reference_offset[0]) lines[i] = lines[i].replace(u'[%s]_' % r, u'[%s]_' % new_r) lines[i] = lines[i].replace(u'.. [%s]' % r, u'.. [%s]' % new_r) reference_offset[0] += len(references) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: sig = re.sub(u"^[^(]*", u"", doc['Signature']) return sig, u'' def setup(app, get_doc_object_=get_doc_object): global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) #------------------------------------------------------------------------------ # Docstring-mangling domains #------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in self.directive_mangling_map.items(): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive nipype-0.9.2/doc/users/000077500000000000000000000000001227300005300147245ustar00rootroot00000000000000nipype-0.9.2/doc/users/caching_tutorial.rst000066400000000000000000000153401227300005300210000ustar00rootroot00000000000000 .. _caching: =========================== Interface caching =========================== This section details the interface-caching mechanism, exposed in the :mod:`nipype.caching` module. .. currentmodule:: nipype.caching Interface caching: why and how =============================== * :ref:`Pipelines ` (also called `workflows`) specify processing by an execution graph. This is useful because it opens the door to dependency checking and enable `i)` to minimize recomputations, `ii)` to have the execution engine transparently deal with intermediate file manipulations. They however do not blend in well with arbitrary Python code, as they must rely on their own execution engine. * :ref:`Interfaces ` give fine control of the execution of each step with a thin wrapper on the underlying software. As a result that can easily be inserted in Python code. However, they force the user to specify explicit input and output file names and cannot do any caching. This is why nipype exposes an intermediate mechanism, `caching` that provides transparent output file management and caching within imperative Python code rather than a workflow. A big picture view: using the :class:`Memory` object ======================================================= nipype caching relies on the :class:`Memory` class: it creates an execution context that is bound to a disk cache:: >>> from nipype.caching import Memory >>> mem = Memory(base_dir='.') Note that the caching directory is a subdirectory called `nipype_mem` of the given `base_dir`. This is done to avoid polluting the base director. In the corresponding execution context, nipype interfaces can be turned into callables that can be used as functions using the :meth:`Memory.cache` method. For instance if we want to run the fslMerge command on a set of files:: >>> from nipype.interface import fsl >>> fsl_merge = mem.cache(fsl.Merge) Note that the :meth:`Memory.cache` method takes interfaces **classes**, and not instances. The resulting `fsl_merge` object can be applied as a function to parameters, that will form the inputs of the `merge` fsl commands. Those inputs are given as keyword arguments, bearing the same name as the name in the inputs specs of the interface. In IPython, you can also get the argument list by using the `fsl_merge?` synthax to inspect the docs:: In [10]: fsl_merge? String Form:PipeFunc(nipype.interfaces.fsl.utils.Merge, base_dir=/home/varoquau/dev/nipype/nipype/caching/nipype_mem) Namespace: Interactive File: /home/varoquau/dev/nipype/nipype/caching/memory.py Definition: fsl_merge(self, **kwargs) Docstring: Use fslmerge to concatenate images Inputs ------ Mandatory: dimension: dimension along which the file will be merged in_files: None Optional: args: Additional parameters to the command environ: Environment variables (default={}) ignore_exception: Print an error message instead of throwing an exception in case the interface fails to run (default=False) merged_file: None output_type: FSL output type Outputs ------- merged_file: None Class Docstring: ... Thus `fsl_merge` is applied to parameters as such:: >>> results = fsl_merge(dimension='t', in_files=['a.nii.gz', 'b.nii.gz']) INFO:workflow:Executing node faa7888f5955c961e5c6aa70cbd5c807 in dir: /home/varoquau/dev/nipype/nipype/caching/nipype_mem/nipype-interfaces-fsl-utils-Merge/faa7888f5955c961e5c6aa70cbd5c807 INFO:workflow:Running: fslmerge -t /home/varoquau/dev/nipype/nipype/caching/nipype_mem/nipype-interfaces-fsl-utils-Merge/faa7888f5955c961e5c6aa70cbd5c807/a_merged.nii /home/varoquau/dev/nipype/nipype/caching/a.nii.gz /home/varoquau/dev/nipype/nipype/caching/b.nii.gz The results are standard nipype nodes results. In particular, they expose an `outputs` attribute that carries all the outputs of the process, as specified by the docs. >>> results.outputs.merged_file '/home/varoquau/dev/nipype/nipype/caching/nipype_mem/nipype-interfaces-fsl-utils-Merge/faa7888f5955c961e5c6aa70cbd5c807/a_merged.nii' Finally, and most important, if the node is applied to the same input parameters, it is not computed, and the results are reloaded from the disk:: >>> results = fsl_merge(dimension='t', in_files=['a.nii.gz', 'b.nii.gz']) INFO:workflow:Executing node faa7888f5955c961e5c6aa70cbd5c807 in dir: /home/varoquau/dev/nipype/nipype/caching/nipype_mem/nipype-interfaces-fsl-utils-Merge/faa7888f5955c961e5c6aa70cbd5c807 INFO:workflow:Collecting precomputed outputs Once the :class:`Memory` is set up and you are applying it to data, an important thing to keep in mind is that you are using up disk cache. It might be useful to clean it using the methods that :class:`Memory` provides for this: :meth:`Memory.clear_previous_runs`, :meth:`Memory.clear_runs_since`. .. topic:: Example A full-blown example showing how to stage multiple operations can be found in the :download:`caching_example.py <../../examples/howto_caching_example.py>` file. Usage patterns: working efficiently with caching =================================================== The goal of the `caching` module is to enable writing plain Python code rather than workflows. Use it: instead of data grabber nodes, use for instance the `glob` module. To vary parameters, use `for` loops. To make reusable code, write Python functions. One good rule of thumb to respect is to avoid the usage of explicit filenames apart from the outermost inputs and outputs of your processing. The reason being that the caching mechanism of :mod:`nipy.caching` takes care of generating the unique hashes, ensuring that, when you vary parameters, files are not overridden by the output of different computations. .. topic:: Debuging If you need to inspect the running environment of the nodes, it may be useful to know where they were executed. With `nipype.caching`, you do not control this location as it is encoded by hashes. To find out where an operation has been persisted, simply look in it's output variable:: out.runtime.cwd Finally, the more you explore different parameters, the more you risk creating cached results that will never be reused. Keep in mind that it may be useful to flush the cache using :meth:`Memory.clear_previous_runs` or :meth:`Memory.clear_runs_since`. API reference =============== The main class of the :mod:`nipype.caching` module is the :class:`Memory` class: .. autoclass:: Memory :members: __init__, cache, clear_previous_runs, clear_runs_since ____ Also used are the :class:`PipeFunc`, callables that are returned by the :meth:`Memory.cache` decorator: .. currentmodule:: nipype.caching.memory .. autoclass:: PipeFunc :members: __init__ nipype-0.9.2/doc/users/config_file.rst000066400000000000000000000155371227300005300177350ustar00rootroot00000000000000.. _config_file: ======================= Configuration File ======================= Some of the system wide options of Nipype can be configured using a configuration file. Nipype looks for the file in the local folder under the name ``nipype.cfg`` and in ``~/.nipype/nipype.cfg`` (in this order). If an option will not be specified a default value will be assumed. The file is divided into following sections: Logging ~~~~~~~ *workflow_level* How detailed the logs regarding workflow should be (possible values: ``INFO`` and ``DEBUG``; default value: ``INFO``) *filemanip_level* How detailed the logs regarding file operations (for example overwriting warning) should be (possible values: ``INFO`` and ``DEBUG``; default value: ``INFO``) *interface_level* How detailed the logs regarding interface execution should be (possible values: ``INFO`` and ``DEBUG``; default value: ``INFO``) *log_to_file* Indicates whether logging should also send the output to a file (possible values: ``true`` and ``false``; default value: ``false``) *log_directory* Where to store logs. (string, default value: home directory) *log_size* Size of a single log file. (integer, default value: 254000) *log_rotate* How many rotation should the log file make. (integer, default value: 4) Execution ~~~~~~~~~ *plugin* This defines which execution plugin to use. (possible values: ``Linear``, ``MultiProc``, ``SGE``, ``IPython``; default value: ``Linear``) *stop_on_first_crash* Should the workflow stop upon first node crashing or try to execute as many nodes as possible? (possible values: ``true`` and ``false``; default value: ``false``) *stop_on_first_rerun* Should the workflow stop upon first node trying to recompute (by that we mean rerunning a node that has been run before - this can happen due changed inputs and/or hash_method since the last run). (possible values: ``true`` and ``false``; default value: ``false``) *hash_method* Should the input files be checked for changes using their content (slow, but 100% accurate) or just their size and modification date (fast, but potentially prone to errors)? (possible values: ``content`` and ``timestamp``; default value: ``content``) *keep_inputs* Ensures that all inputs that are created in the nodes working directory are kept after node execution (possible values: ``true`` and ``false``; default value: ``false``) *single_thread_matlab* Should all of the Matlab interfaces (including SPM) use only one thread? This is useful if you are parallelizing your workflow using MultiProc or IPython on a single multicore machine. (possible values: ``true`` and ``false``; default value: ``true``) *display_variable* What ``DISPLAY`` variable should all command line interfaces be run with. This is useful if you are using `xnest `_ or `Xvfb `_ and you would like to redirect all spawned windows to it. (possible values: any X server address; default value: not set) *remove_unnecessary_outputs* This will remove any interface outputs not needed by the workflow. If the required outputs from a node changes, rerunning the workflow will rerun the node. Outputs of leaf nodes (nodes whose outputs are not connected to any other nodes) will never be deleted independent of this parameter. (possible values: ``true`` and ``false``; default value: ``true``) *use_relative_paths* Should the paths stored in results (and used to look for inputs) be relative or absolute. Relative paths allow moving the whole working directory around but may cause problems with symlinks. (possible values: ``true`` and ``false``; default value: ``false``) *local_hash_check* Perform the hash check on the job submission machine. This option minimizes the number of jobs submitted to a cluster engine or a multiprocessing pool to only those that need to be rerun. (possible values: ``true`` and ``false``; default value: ``true``) *job_finished_timeout* When batch jobs are submitted through, SGE/PBS/Condor they could be killed externally. Nipype checks to see if a results file exists to determine if the node has completed. This timeout determines for how long this check is done after a job finish is detected. (float in seconds; default value: 5) *remove_node_directories (EXPERIMENTAL)* Removes directories whose outputs have already been used up. Doesn't work with IdentiInterface or any node that patches data through (without copying) (possible values: ``true`` and ``false``; default value: ``false``) *stop_on_unknown_version* If this is set to True, an underlying interface will raise an error, when no version information is available. Please notify developers or submit a patch. *parameterize_dirs* If this is set to True, the node's output directory will contain full parameterization of any iterable, otherwise parameterizations over 32 characters will be replaced by their hash. (possible values: ``true`` and ``false``; default value: ``true``) Example ~~~~~~~ :: [logging] workflow_level = DEBUG [execution] stop_on_first_crash = true hash_method = timestamp display_variable = :1 Workflow.config property has a form of a nested dictionary reflecting the structure of the .cfg file. :: myworkflow = pe.Workflow() myworkflow.config['execution'] = {'stop_on_first_rerun': 'True', 'hash_method': 'timestamp'} You can also directly set global config options in your workflow script. An example is shown below. This needs to be called before you import the pipeline or the logger. Otherwise logging level will not be reset. :: from nipype import config cfg = dict(logging=dict(workflow_level = 'DEBUG'), execution={'stop_on_first_crash': False, 'hash_method': 'content'}) config.update_config(cfg) Enabling logging to file ~~~~~~~~~~~~~~~~~~~~~~~~ By default, logging to file is disabled. One can enable and write the file to a location of choice as in the example below. :: import os from nipype import config, logging config.update_config({'logging': {'log_directory': os.getcwd(), 'log_to_file': True}}) logging.update_logging(config) The logging update line is necessary to change the behavior of logging such as output directory, logging level, etc.,. Debug configuration ~~~~~~~~~~~~~~~~~~~ To enable debug mode, one can insert the following lines:: from nipype import config, logging config.enable_debug_mode() logging.update_logging(config) In this mode the following variables are set:: config.set('execution', 'stop_on_first_crash', 'true') config.set('execution', 'remove_unnecessary_outputs', 'false') config.set('logging', 'workflow_level', 'DEBUG') config.set('logging', 'interface_level', 'DEBUG') .. include:: ../links_names.txt nipype-0.9.2/doc/users/debug.rst000066400000000000000000000052051227300005300165460ustar00rootroot00000000000000.. _debug: ========================== Debugging Nipype Workflows ========================== Throughout Nipype_ we try to provide meaningful error messages. If you run into an error that does not have a meaningful error message please let us know so that we can improve error reporting. Here are some notes that may help debugging workflows or understanding performance issues. #. Always run your workflow first on a single iterable (e.g. subject) and gradually increase the execution distribution complexity (Linear->MultiProc-> SGE). #. Use the debug config mode. This can be done by setting:: from nipype import config config.enable_debug_mode() as the first import of your nipype script. .. note:: Turning on debug will rerun your workflows and will rerun them after debugging is turned off. #. There are several configuration options that can help with debugging. See :ref:`config_file` for more details:: keep_inputs remove_unnecessary_outputs stop_on_first_crash stop_on_first_rerun #. When running in distributed mode on cluster engines, it is possible for a node to fail without generating a crash file in the crashdump directory. In such cases, it will store a crash file in the `batch` directory. #. All Nipype crashfiles can be inspected with the `nipype_display_crash` utility. #. Nipype determines the hash of the input state of a node. If any input contains strings that represent files on the system path, the hash evaluation mechanism will determine the timestamp or content hash of each of those files. Thus any node with an input containing huge dictionaries (or lists) of file names can cause serious performance penalties. #. For HUGE data processing, 'stop_on_first_crash':'False', is needed to get the bulk of processing done, and then 'stop_on_first_crash':'True', is needed for debugging and finding failing cases. Setting 'stop_on_first_crash': 'False' is a reasonable option when you would expect 90% of the data to execute properly. #. Sometimes nipype will hang as if nothing is going on and if you hit Ctrl+C you will get a `ConcurrentLogHandler` error. Simply remove the pypeline.lock file in your home directory and continue. #. One many clusters with shared NFS mounts synchronization of files across clusters may not happen before the typical NFS cache timeouts. When using PBS/LSF/SGE/Condor plugins in such cases the workflow may crash because it cannot retrieve the node result. Setting the `job_finished_timeout` can help:: workflow.config['execution']['job_finished_timeout'] = 65 .. include:: ../links_names.txt nipype-0.9.2/doc/users/function_interface.rst000066400000000000000000000132711227300005300213270ustar00rootroot00000000000000.. _function_interface: ====================== The Function Interface ====================== Most Nipype interfaces provide access to external programs, such as FSL binaries or SPM routines. However, a special interface, :class:`nipype.interfaces.utility.Function`, allows you to wrap arbitrary Python code in the Interface framework and seamlessly integrate it into your workflows. A Simple Function Interface --------------------------- The most important component of a working Function interface is a Python function. There are several ways to associate a function with a Function interface, but the most common way will involve functions you code yourself as part of your Nipype scripts. Consider the following function:: def add_two(val): return val + 2 This simple function takes a value, adds 2 to it, and returns that new value. Just as Nipype interfaces have inputs and outputs, Python functions have inputs, in the form of parameters or arguments, and outputs, in the form of their return values. When you define a Function interface object with an existing function, as in the case of ``add_two()`` above, you must pass the constructor information about the function's inputs, its outputs, and the function itself. For example, :: from nipype.interfaces.utility import Function add_two_interface = Function(input_names=["val"], output_names=["out_val"], function=add_two) Then you can set the inputs and run just as you would with any other interface:: add_two_interface.inputs.val = 2 res = add_two_interface.run() print res.outputs.out_val Which would print ``4``. Note that, if you are working interactively, the Function interface is unable to use functions that are defined within your interpreter session. (Specifically, it can't use functions that live in the ``__main__`` namespace). Using External Packages ----------------------- Chances are, you will want to write functions that do more complicated processing, particularly using the growing stack of Python packages geared towards neuroimaging, such as Nibabel_, Nipy_, or PyMVPA_. While this is completely possible (and, indeed, an intended use of the Function interface), it does come with one important constraint. The function code you write is executed in a standalone environment, which means that any external functions or classes you use have to be imported within the function itself:: def get_n_trs(in_file): import nibabel f = nibabel.load(in_file) return f.shape[-1] Without explicitly importing Nibabel in the body of the function, this would fail. Alternatively, it is possible to provide a list of strings corresponding to the imports needed to execute a function as a parameter of the `Function` constructor. This allows for the use of external functions that do not import all external definitions inside the function body. Hello World - Function interface in a workflow ---------------------------------------------- Contributed by: Hänel Nikolaus Valentin The following snippet of code demonstrates the use of the function interface in the context of a workflow. Note the use of ``import os`` within the function as well as returning the absolute path from the Hello function. The `import` inside is necessary because functions are coded as strings and do not have to be on the PYTHONPATH. However any function called by this function has to be available on the PYTHONPATH. The `absolute path` is necessary because all workflow nodes are executed in their own directory and therefore there is no way of determining that the input file came from a different directory:: import nipype.pipeline.engine as pe from nipype.interfaces.utility import Function def Hello(): import os from nipype import logging iflogger = logging.getLogger('interface') message = "Hello " file_name = 'hello.txt' iflogger.info(message) with open(file_name, 'w') as fp: fp.write(message) return os.path.abspath(file_name) def World(in_file): from nipype import logging iflogger = logging.getLogger('interface') message = "World!" iflogger.info(message) with open(in_file, 'a') as fp: fp.write(message) hello = pe.Node(name='hello', interface=Function(input_names=[], output_names=['out_file'], function=Hello)) world = pe.Node(name='world', interface=Function(input_names=['in_file'], output_names=[], function=World)) pipeline = pe.Workflow(name='nipype_demo') pipeline.connect([(hello, world, [('out_file', 'in_file')])]) pipeline.run() pipeline.write_graph(graph2use='flat') Advanced Use ------------ To use an existing function object (as we have been doing so far) with a Function interface, it must be passed to the constructor. However, it is also possible to dynamically set how a Function interface will process its inputs using the special ``function_str`` input. This input takes not a function object, but actually a single string that can be parsed to define a function. In the equivalent case to our example above, the string would be :: add_two_str = "def add_two(val):\n return val + 2\n" Unlike when using a function object, this input can be set like any other, meaning that you could write a function that outputs different function strings depending on some run-time contingencies, and connect that output the the ``function_str`` input of a downstream Function interface. .. include:: ../links_names.txt nipype-0.9.2/doc/users/grabbing_and_sinking.rst000066400000000000000000000240101227300005300215720ustar00rootroot00000000000000.. _grabbing_and_sinking: ================================== DataGrabber and DataSink explained ================================== In this chapter we will try to explain the concepts behind DataGrabber and :ref:`DataSink `. Why do we need these interfaces? ================================ A typical workflow takes data as input and produces data as the result of one or more operations. One can set the data required by a workflow directly as illustrated below. :: from fsl_tutorial2 import preproc preproc.base_dir = os.path.abspath('.') preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii') preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii') preproc.run() Typical neuroimaging studies require running workflows on multiple subjects or different parameterizations of algorithms. One simple approach to that would be to simply iterate over subjects. :: from fsl_tutorial2 import preproc for name in subjects: preproc.base_dir = os.path.abspath('.') preproc.inputs.inputspec.func = os.path.abspath('data/%s/f3.nii'%name) preproc.inputs.inputspec.struct = os.path.abspath('data/%s/struct.nii'%name) preproc.run() However, in the context of complex workflows and given that users typically arrange their imaging and other data in a semantically hierarchical data store, an alternative mechanism for reading and writing the data generated by a workflow is often necessary. As the names suggest DataGrabber is used to get at data stored in a shared file system while :ref:`DataSink ` is used to store the data generated by a workflow into a hierarchical structure on disk. DataGrabber =========== DataGrabber is an interface for collecting files from hard drive. It is very flexible and supports almost any file organization of your data you can imagine. You can use it as a trivial use case of getting a fixed file. By default, DataGrabber stores its outputs in a field called outfiles. :: import nipype.interfaces.io as nio datasource1 = nio.DataGrabber() datasource1.inputs.base_directory = os.getcwd() datasource1.inputs.template = 'data/s1/f3.nii' results = datasource1.run() Or you can get at all uncompressed NIfTI files starting with the letter 'f' in all directories starting with the letter 's'. :: datasource2.inputs.base_directory = '/mass' datasource2.inputs.template = 'data/s*/f*.nii' Two special inputs were used in these previous cases. The input `base_directory` indicates in which directory to search, while the input `template` indicates the string template to match. So in the previous case datagrabber is looking for path matches of the form `/mass/data/s*/f*`. .. note:: When used with wildcards (e.g., s* and f* above) DataGrabber does not return data in sorted order. In order to force it to return data in sorted order, one needs to set the input `sorted = True`. However, when explicitly specifying an order as we will see below, `sorted` should be set to `False`. More useful cases arise when the template can be filled by other inputs. In the example below, we define an input field for `datagrabber` called `run`. This is then used to set the template (see %d in the template). :: datasource3 = nio.DataGrabber(infields=['run']) datasource3.inputs.base_directory = os.getcwd() datasource3.inputs.template = 'data/s1/f%d.nii' datasource3.inputs.run = [3, 7] This will return files `basedir/data/s1/f3.nii` and `basedir/data/s1/f7.nii`. We can take this a step further and pair subjects with runs. :: datasource4 = nio.DataGrabber(infields=['subject_id', 'run']) datasource4.inputs.template = 'data/%s/f%d.nii' datasource4.inputs.run = [3, 7] datasource4.inputs.subject_id = ['s1', 's3'] This will return files `basedir/data/s1/f3.nii` and `basedir/data/s3/f7.nii`. A more realistic use-case ------------------------- In a typical study one often wants to grab different files for a given subject and store them in semantically meaningful outputs. In the following example, we wish to retrieve all the functional runs and the structural image for the subject 's1'. :: datasource = nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']) datasource.inputs.base_directory = 'data' datasource.inputs.template = '*' datasource.inputs.field_template = dict(func='%s/f%d.nii', struct='%s/struct.nii') datasource.inputs.template_args = dict(func=[['subject_id', [3,5,7,10]]], struct=[['subject_id']]) datasource.inputs.subject_id = 's1' Two more fields are introduced: `field_template` and `template_args`. These fields are both dictionaries whose keys correspond to the `outfields` keyword. The `field_template` reflects the search path for each output field, while the `template_args` reflect the inputs that satisfy the template. The inputs can either be one of the named inputs specified by the `infields` keyword arg or it can be raw strings or integers corresponding to the template. For the `func` output, the **%s** in the `field_template` is satisfied by `subject_id` and the **%d** is field in by the list of numbers. .. note:: We have not set `sorted` to `True` as we want the DataGrabber to return the functional files in the order it was specified rather than in an alphabetic sorted order. DataSink ======== A workflow working directory is like a **cache**. It contains not only the outputs of various processing stages, it also contains various extraneous information such as execution reports, hashfiles determining the input state of processes. All of this is embedded in a hierarchical structure that reflects the iterables that have been used in the workflow. This makes navigating the working directory a not so pleasant experience. And typically the user is interested in preserving only a small percentage of these outputs. The :ref:`DataSink ` interface can be used to extract components from this `cache` and store it at a different location. For XNAT-based storage, see :ref:`XNATSink ` . .. note:: Unlike other interfaces, a :ref:`DataSink `'s inputs are defined and created by using the workflow connect statement. Currently disconnecting an input from the :ref:`DataSink ` does not remove that connection port. Let's assume we have the following workflow. .. digraph:: simple_workflow "InputNode" -> "Realign" -> "DataSink"; "InputNode" -> "DataSink"; The following code segment defines the :ref:`DataSink ` node and sets the `base_directory` in which all outputs will be stored. The `container` input creates a subdirectory within the `base_directory`. If you are iterating a workflow over subjects, it may be useful to save it within a folder with the subject id. :: datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = '/path/to/output' workflow.connect(inputnode, 'subject_id', datasink, 'container') If we wanted to save the realigned files and the realignment parameters to the same place the most intuitive option would be: :: workflow.connect(realigner, 'realigned_files', datasink, 'motion') workflow.connect(realigner, 'realignment_parameters', datasink, 'motion') However, this will not work as only one connection is allowed per input port. So we need to create a second port. We can store the files in a separate folder. :: workflow.connect(realigner, 'realigned_files', datasink, 'motion') workflow.connect(realigner, 'realignment_parameters', datasink, 'motion.par') The period (.) indicates that a subfolder called par should be created. But if we wanted to store it in the same folder as the realigned files, we would use the `.@` syntax. The @ tells the :ref:`DataSink ` interface to not create the subfolder. This will allow us to create different named input ports for :ref:`DataSink ` and allow the user to store the files in the same folder. :: workflow.connect(realigner, 'realigned_files', datasink, 'motion') workflow.connect(realigner, 'realignment_parameters', datasink, 'motion.@par') The syntax for the input port of :ref:`DataSink ` takes the following form: :: string[[.[@]]string[[.[@]]string] ...] where parts between paired [] are optional. MapNode ------- In order to use :ref:`DataSink ` inside a MapNode, it's inputs have to be defined inside the constructor using the `infields` keyword arg. Parameterization ---------------- As discussed in :doc:`mapnode_and_iterables`, one can run a workflow iterating over various inputs using the iterables attribute of nodes. This means that a given workflow can have multiple outputs depending on how many iterables are there. Iterables create working directory subfolders such as `_iterable_name_value`. The `parameterization` input parameter controls whether the data stored using :ref:`DataSink ` is in a folder structure that contains this iterable information or not. It is generally recommended to set this to `True` when using multiple nested iterables. Substitutions ------------- The `substitutions` and `substitutions_regexp` inputs allow users to modify the output destination path and name of a file. Substitutions are a list of 2-tuples and are carried out in the order in which they were entered. Assuming that the output path of a file is: :: /root/container/_variable_1/file_subject_realigned.nii we can use substitutions to clean up the output path. :: datasink.inputs.substitutions = [('_variable', 'variable'), ('file_subject_', '')] This will rewrite the file as: :: /root/container/variable_1/realigned.nii .. note:: In order to figure out which substitutions are needed it is often useful to run the workflow on a limited set of iterables and then determine the substitutions. .. include:: ../links_names.txt nipype-0.9.2/doc/users/images/000077500000000000000000000000001227300005300161715ustar00rootroot00000000000000nipype-0.9.2/doc/users/images/componentarchitecture.png000066400000000000000000004614061227300005300233170ustar00rootroot00000000000000PNG  IHDR@':/iCCPICC Profilexc``2ptqre``+) rwRR` ``\\yy | 2 U +'300%@q9@HR6. r!+ v.z H}:b'A2 vIj^ʢ#ǔTbϼĒZ@! A!ahii "q(v!( cd2f` G1G)I/15C}}sïPo pHYsgR IDATx MU_ Q MފʐJ(i-SIT^JT*IyTh.EEBB}X983}w^{g};k(cǎ1A @ Bp.~ @ @!' @ @ ԓXtX@ @PO8 @ @@,' @ p@ @ XPOba @ @@= @ @Ģ:@ @ z9@ @b@=Eu @ @s @ "z @ @  @ @E$A @ @ @ I,: @ @' @ @ ԓXtX@ @PO8 @ @@,' @ p@ @ XPOba @ @@= @ @Ģ:@ @ z9@ @b(k% @'cWmHQ. @HB eQkm" @=I8R2 0$R(0@  ,}$-e@ 4 Dp‡3FM.<t[aΏ@ !}8ir~fܻIHd@@=q(0 @ nbsW6[Gؾ}KcKgB R찛ׯ[5ݓIH d@ Nᷡ87# @e;I0}dךkY9#_# {lTĜn.#\2yv0 uO !@w>i(VwkK 1VEMDn‘ȯIk3ȹnԅ ?2<K$< RD@oړwnz6 R:w~3¢ K" @ar 3l1lnݨ)7m9#zE@$1fHL%$cܹ3gꫯ֭[_6qeU <2y$+WhѢEV/.M f"͙8ZQڬeA!@ LM"Glm۶ɓ'9D PB~?%KZ%VhU?iӦMU'Emvʼn)VX(xO8y IA- (&FI(M"c@I& Dh'squA`$=@Ѭ+1 &V(7fC/]T~ > "pk~;jȶ%zL Z4:@ >'q" '6˱[m7 jժ @ @ >{iz͎ZΤmJwO@@=ɲNq!ހnrޒvA@PbfY"`yl٢%Zhz^!Ds51E~D@=@|I&[ 3,/K@P x۝?q`naΏ@ N= @ oƊF_͎=/ZLD@)! Dw~7CZ> \}JI,d=K d*l7j. nmrAg-O/(l@ {Rzl @)"_cMV gKC1%Ea@H7=MdknͭXCQ9B'>h$@ݓ@6 ##Pi(J $hS +A6wI;@?Jr^ɩ7fQs~u[B@)`^)jZ,V,ps, /= @^-^ekavM{7ۊ#;"x@o{#B< + Ʉ:l=Ѣ+ @ 螯;]ǁ{"<2SuO]B@>_{icl@A$ DaG}i%rkX@b@'~8 @'" k3V$T %g} =_n{h29[~MA.(Ci&@ݓ4v@qWaweg&$ # pO:O敝A)#z2O^|C۰l%k' @=_w~{s!z%@_Bq;K@'=l^ga@f@=J d$ccFBA@nr{8m@<POD@# +,;c@&`wOD &zՇC:\l{zш$`ww87' xC4 ;a.>< r{Ld! $IB0  @ @K$c- U~UB۾;2H$0  @ $z"A @ @' IV @ d ԓ <  @ @=I L Ptd @ @ɲNq!H'I^ TI!@ 1ۇa  ':X @ @@: @ @ @POt @ 4@=Itv @ @@`* @ @i z @ @= "T@ @@$ % @ @"zE @ IK@Hm^qw7oܸ['|Rɛ7ou&֞2evqal>`yyHAqFB2@ aL@|H`Æ +Vx7y^{^zfZ"7Ig֭E]-['N# @>$zÃBH @޽ ?{>lEQUꪫ*W ĿǴDx Izw@F` &9ݻ׮][Zɼyc9FR6ۉ nȐ!Qu%FE@OOa@ l={|gfرc۶m۷owӢ&[/ʕ+]HCIy睷~;SȔ.[BEY~of[o evm-`ꫯ^|E]a9h={5k"W IC +V,^`1=cEmР-G-jpWE{te;~駰,]RUm۶kNnzɒ%akԨ]L8 vիΝ;++Cvg:)GâQF>z*UT~]j.[k?xʕSU݇zhԩe˖=S@Z0 @~$_X_tP:,u1[fM`5qs>c믿~iIP2ǜ9sfΜ)=emڴޢ ?-[J h'LPL|0,qwSOURSNQU-Yn]TqƵ`Zl~PBz>죵_|ad 8= !#`@E@-Y$| K;ӓ!J:;G}tܹ .c=~aÆV6m $ XuIK1Q4RU4N#Wik׮6mڬY~i(Vƍ'u#2#E(W^yENTD=.ٵ^+VZ{hѢEJwߩʻᆱ41Z0 @>'zDx jHzpSnT/CǪX*B #V/hnM<,mE2Ό=DXbj#B:Zĸ4^q+[ڈsF547(/`,<fHTJ 8';d @@: t7I8P Hz/~@śLMx%UƔ)S4?3x_c=VuDVlMְa…CG҉,[ոqc3fТjڴK#C͂Ե׃ @.= #r@@@5/ºAs=׉ҥKЁL}Uu+bqcZITD}ʖ_}h𚯿ZzD Šn4JNRi\9d'2ŋG:@H^cxԈ xw¢衇Ѹ6a{.,ٶm+Ƶ(aҸ_7|s0$͞= IpC HFձΝ;rH7xy4j̚5k̘1۷o5޽h>GО{ţm;uj2i @ !@˝`$@ /U֭mѢŲe^~eI_~ hŊYөA:txWy?Z's (҉vsJcd @=)(A @y" / 0ߞ3gmإKUT~_|9%k۶#$iw)H|1H.#Gbe9ꨣ9s d |@=7:6 @ ,]4O>?gDL矿[up՜y0 @%zcG @2eo A*@I&W{oժUx 4csߎ5 @= ̡"P@M@RN :9S3$+@O(D@ @|Hć @ @ B @ @PO|xP  @ @= @ @|Hć @ @ B @ @PO|xP  @ @= @ @|Hć @ @ B @ @PO|xP  @ @= @ @|Hć @ @ B @ @PO|xP  @ @= @ @|Hć @ @ B @ @PO|xP  @ @= @ @|Hć @ @ B @ @PO|xP AP$޹zdcc @ xV\lH(" `' Jv @ dԓ ; @ @=I0P @ @ d8 @ $I @ @F$(Ł А P !m?{5%@ > `ʄ$#@$4gD O aA@<ٚ.zmݺ5H@%`y }4h  !A +{7(UW^ubC@#1 @=-"@_Wa3lQQ>/7q@@}J*P8da~7OBd dԓ,:@pPի9s~+2% رcǴidԯ_= $Iby $ތm?*~p̙VZzuX?ɍ!@ M&Ojժ=أqƻnMR0 IDAT#M[@ dQ 2F{>䓕xԨQ+W @ 6m4h E~qǕ,YRw~!QD IP#@(BlG5Yfk?t颗혍!@g9眯\rROL48n |VD$HGX!@@weCaoEg7碋.*S|wk:@́ ^z=EӧOҥu{x63N) @=I/@%deaʚۤJ*ZMgϞ}衇;vC@M4yꩧ$yuu|N(3  S- L! Cm۶iuV-nٲE6߸q_VgIMjIƊ+ƌcuOU֩S'yː)l( L&zOLdUM;& (QXbe˖\ 5׽]Šh)UZ4y%@=IZ2 'JL=\4͝zyfz"[̚5wޑ RM@"Gݾ}˫X#ŋ7D'jѣUJ8DN٨'>`Ehf@@&_ /26, E/Īc^S4ɐSm۶=ꨣ>e˖YFR5d2l:YZ }*lVt5_*~p}= ek}^bUVFSD7IOM^rL4(†|B'0 @ ?Vތ5&|| 4GkcXLg"@FϑH6?ܹs\kc9F$tdD&O^?x]IYr`.~Ii D4ɁwFX-].!YD*Z)솯EmIkx %Z  MVaDfHm%~:0hR@N'>mn KI$~{OhuߴiSumo$@`ݺuPt}r!-ZHn hc…P3x6:gcbч]9!*H4*H@Ѥ'e_T)ubmvFj斡[r2Ib @ Hd2DdLdw50$LrZ2 W=QVZ+Fު1d/Qϟ >uխU}>pN@7^ySzEO{n/^hn) bŊtf͚<0:!WIGJ2՝d޼y| jժmڴACI {tMC1DH475*4`Vi%J)[h2QbGvOV@@=ɞcMI! Eo-nbs}xՓ0E\MQn.+eS5'7|믫RqGi``u1M8QPs .@b:U$ Ew[zc=Vs%gx'ElUE$hIM[M%2D= @1B 'RChs'c5M47C̰UKL7\v\sPv&4'-^zԩK,Խt͛۷MIƩ&Ds?R/TGo辡(I.JA0KͽSObbI'RCek2HOQbDlCeUO -BL$>e 08C6IaJh"MJU)&X^Dv*\}_IӪU+u/@`%1.(1Ow3۰aai((R { 'ڿљbsNAAܩ'f8DRM12q[A'zCD }hnK!DsAd&Ndk,4DeWmV{kM$1cK/dvaf4D"e^ {eջwoe1?A]V'F׶;9>+J~mt8ӄ7WM$e&'hQ%0 ETm.Z&MC"zR |l @$ C3 2nM1SMC*n2ȯ)ʒ}i &&[F^zկ_?KN1JkҤI4h:$ V ,}C-[ܹsʕ3.i67:ad܄آVpu(6QnXVZ$;e!@ dh2;T0?L0qD-b"ٖL(C-i!w=cLʗ/߳gSAvԨQN;SB A,B'|RVg(ݺu^U bJKcȯ˩I&"[~$禛(Msŀ 'ԓ8A O K#t0[sSL4d6I)U6f䁍@CW^^}7=z{O@]\}Ւ֭{m|^~R 7N#p+@}؟tIvo8\Nΐ{9Bbɮ7"idLN\:Lf[.U1a POba- 67Cs-lL=*b"4Ĵ-MZ X%>/رc[n}饗%Q?E]믿vaJO>3fG}Qݻ+W.S˛rŖ0LPN &fHFQb]2#[Iz@aPO€@~' C!zDL 1DpkrN'Jk6IN^~3>}'Cl 0`)SY0oW_T7ߨ1WV/x h6 3#Iil[y,2 Iѱ! ޫz8NlL1q6Zm-El޼=z4֥EjrK:g8 rA=PCw^z<:TrWh\=73doz vODfܴN&^m@@ 2G?R7QrYĊdmU ݿkÇ_|mܸСC4i1VZu']OhR{݅&NZZ+W>o?CԵkW}*G 찹b6/C{M죶j GϟY7 r˖-O<45k֨,k߿7ζZ]D12JlF& !~@'PO|r C@6mow8Iy,ޭͰsFয়~[|I)Ye]S#Əy#n>PgJQ~믿^ ߂[|DU0PζhslnI>Bb@@=ɍ ~@Hu SErUlKVΰ4Y*xnͺ8QŁaÆAVA% G-W;SNft)_*]ڵ ĎȬ?L.QJXnnZC(8ԓ3$@t8CaɶɅE׻[k~xAKun2jԨf͚$u֎; Xzf̘a} a7߰a]w.ټyne˖A,Kb8˶E+G=8S$$ ['ED@'8Cq;qĕzت0O.,4-Zt׾*Ɨw]=J3!IEVR ԑ} PZt!d ԓl9Ҕ M6uq~<|  |'j/wԐoOyԱo%n^~R9+UR- @= Q#f@@L8QL/_[h.Nr'#s=wii &3_ydF3gT[Yf&檜N|) '>8@H立9shCAM4N:aB@ݣ|ͪ^1{ 7Jv+P\hrI+ʖ-w$ <@=,B2ʕ+U_pݿ?%ǨQ Qf:wX|H\ٞ[ouϝ;W 45\dd;d1'*E ,"^wu'OVʼn:t++E(jZ h47;W˝m۪:h޼ye˖ Sj Ei#U?QC.tyɃ2_ sG 9, o-[m…^8tP @T$xĹf͚&MSUzS@j?W +WV(]tMWH@ZK=1A$Lb'-) @ eɸq O?iDMu6lb0y+Rx=^LJݝj*uYÆ Kl&)M0Al-ᄎA{ԃ@zԓHA<^ĥ:u`zQR@<D?cM8z;v̼bR"*9\˶Mf;íJO[;R37FN8^ҐkԪUKuRz񿒴G TO)*7^aN!@#l2r>eE^05nctӻJ-\v^sz6`eʔQ_Jp%hG(zժUKzT'Ns=y&g@i$" NqTI\ܹ6*,% @_;!Ss9d@ 2giIV_<ۢnZLm۶?^ԩSg{WwAMQF]VN:)`@G IRIUL6Dܝ @-=@F>|~m VXJtF XV~z ;_~%[>]wݥ*?-"@ 8][t SO[[UCbkw0ǀ!@ k H1ѣǗ_~)\#{YK).'|a[+&-e8[@O>9ꨣ5Qw ̙r# n{$(M-aSVrK@ A=1ͥ'r /曟~ؼys3 @+< IDATlE{4hкunݺiydh#4ILq$!Ck={&i/dF@r=jԣU,Lmy<Ȱd,B($'V%͝\b*8lgսޫI @ oc ^{WKOqj;cFǔdStc=fϞ}'cGyD*'|M7vaQ  )UO'NlQsM}Yg_]:WӦMTRl$T@ %6nܨj /^X֛8M4tULJJksӦM^|PR,9.>)4hkcSO=UcO# @ DD-Ꞅ4bͰ'e|W]tQ/_+V:t쐼"M @b7wXti%ƍΏM@1lݙ=c\UܬY3gI< ]oQwNs5RφzWw9ju׫NPj^ $@RWDQJ"M͎ׯߤIog&>S|D| pB}/ U}~Q:u[GTQ]?O4ڰi,\xnW_}UqFU)~j;C"aA4IC=TMfID&r6* 7OR$tI/?\n ,ZHVJ=3p@=(w! @d &$6&dg]vmH')8&Ϛ60Xjlitu/XN[hC=JB ^s޹0/OW^wqg>{Eߋ.H?ٹmu#ms􉡒[_-* fիWϙ3{tr'Fnnil >;CbA2eJ臲~ܻ <|p.}&{gw:C]ztjU,U}Cwi0 @d}R$[h 'ШQdDB@ xg>k {h?S$UC#88y[B)~7>:x?6m)s9瞓ҕ_Ԯ+ֿMv)ĈD_xх.b%k4pГO. N2Fw<ੲ.mmrҢa 4HwbՃljՒ򌇀0~뭷}]CpHvF8ꪫ4`4@ $6yspԓ7xC?[4a@j4nܸ\w~I A% ҉QɸI-{K eH ̴eɒ%#+B̟?_"HH-%CM:tϛ_>#F|K}G[߮?C)+ճF!IەbZn=k,iҤ$rxXv8!@h>8eJ(_j,YR1@ '6~٠^9랠I7f-*@K/aw6dN|V_1JX~MU7՜dƌ3P>aJ(XԜvr˵lRɖ~Ts\laZ;Prnݲ5,l̙•(2nYy"szIfM{衻VM6ݛ N[-isA~#>ԫWOQ-\{_u.,ҥKN%E~޼yA6eeG1H7WiKyy睛6m @RZx cH$U'WJ=b@K Dkk֬yvҳ(!7:=\rcKMzsښ5kN>mٲeơV]_?x;3,~y]r%ne:,}~?*S`uO-z{O"e{=9WF Iʕ (ΒMر㺵.RflW]-cG|.W 0@4t;5g׮]ovw8Yyܮ# };uVEQ=N:[(roQA@u4؍tv{JYe>q?~ر׶o߾)5;A@ϯ.]nI5\s]>c*@!'z"M* I6[Bk+T@ ]Əe+Uܾ}-[#X(XP"Cx+-Z-֢5_reSI:ٻOx7kLFw޺h"5`"DC{?#8ͱm􃤺w5Q?jFrJUnueQauQkZkj"qAGZI'Zu'5lP +W9UQ]:kH#;ҥ+<#3f͜~zU9-O4_t۰Mr[vnSVc? jw U-^ PsJ)\k3RPv\;'WNE<|TE&0$rNUhZ|Ǔ!i $@ROJis{|G]B5< Zl<y„ ʫ~*OlkӮ}RKic eW綨GN UR'G~Dn)C'`ʧciA(C-*KtWJ#OW*{g_1=XY5SP#UUC] IHn*]ji@ )D+ ̐nV- El£'OVJ#*;Ш35x- 8P}qw}~iP#<[ni۶mrA%`[Nhb҉w!?(_Xs U$)_|XJœy]aDGnCD~]&1U{ u9kHgSO=5jU?pu^4~su|P(q6Q<|1/+ZCNj`oP"3W={Ddܼ~fڋb+{-j4îcгU]oȑwܹs>Tu0޲e @b Sas {6@>!`u">ZQn|*NJBJ@&L{ll"D{w}s OH< jՓN ob=Ǵ~Iu.\ϲ3m`}(Ξ={cP'jtWƎ;@$f!i֭ڑb lf}zP=ݩj"SFk߇M_}i7s'hh_kf <5t'>}V-}DF:4DDd<j{Zhp=|駳gܹyʖUzbP^{*;V=iڵ*gܶ@ ![BԣQ޺'~x^& @ K"M@lc#&cŪ?T!O*V `%,PG%]QU3َjo$*԰9sujh"?f͚ȵx '`~gY._ýՠX?C=$wfK}*_S0@A )ub8{#B XvjX‹D}sܨ!Azh\ pBMɰBe3[0Օ {?>;ӵkذQRgBJ@Hzb@{"Zާs4@I z`9j:UJVcּ?Y⟒j麓K̲9fk'd!$?ﻟn)&~J13dB5n^o}Tx8ꫯ(2 H%BzH$p I$siqZ/ٚ47G#K7~\c +@ vG5.mIN읺RosskI nm @AQ3Pu\b*j5M g&QLVA~#py{&1fldz%W߿u֙Q"J@ %2,3"7LG5D^%v5Nz>CW^fG ɟL ,$޹$ O2 x lc:k(^!$ *7{K}EtiӂӨQq)o*xF|…R^ׯ_߿ڵks=jƨ5 PIQOpOGgDM@fض}z}zWҥKfAL}Dvhd<%{wU?Q+ǝ;wn֬[ nH*T'I- C @ N<Ç+K/T#&0g2ep P ${1!׿袋/_?  VOPv @ +:묿NSOvf 6,ѥ@ҥ( eȐ!{… O<ģ>NQH( - yym %#TgBgN@]1'rmI]^^v?3Ayz]D:zbk[$xR3{FG(NJޫk[x]#F,^wԩܹsk|gņ!yD'rtM}5j}7gΜv{챪qɡi׶l#=Gu0 6?3<ңRx^,xWOrC`|-(JN6 ]ƕ%l 1L#4r3\0 I%`W.|q3\V*OF…~#]{MƎJԵkaÆ|N`g(O6h~EV9آcd yB9: m]d %W '.hg؍ tU{T-ha s&3" s \(țx ;_ r F>յrRR;{Ą@ D^ q~wl{)NE,;._KLd{n:Z @h)3$G:mw>|&Yvo}r]tGuV5 >5Jي4!]U2"Oq Iҿ0azրM:U4fSݺu s䶊Mny%Gk%6#Dvbx: )|'.;B޹G31?!{o~[g~3 K"'`Sn7kz=͹ϘG?eЇ; NP]&n1\Jw]Z4&wس7{eːż_@S{-x흿 JNѥsYr&wz_|y))wJy K"DN'9wsL##ҙy*ivfd򘝧{yr w~[ F T(oitI\swqeթSCg+$[knm۶m牥qb9?)^  "`lQ3Rڶ#^w\4,B̨$v))gy멢p?CVZ g S2Q.TRBM9:~ȰI>y(K)۵dmmf;oѫW/ Rb|eF &SN|׳]L׶2k}֯_ĉL\ Ͼ|an^ D2 gf ,v{DuIy5읋~zPH@yy)MVd| K>ɹaW= T k>GF_eH 6_r|1 2"O +TD*UWޑGYZ5۪)Ω)9' X=YM9Rk%K_~˖-&>r)%La!RRŊU~/ՊoڴA .;Uv˚(tYا׮bpd?_zܹ Wn1SBs)}L2~59г*g**rXlg}'9CvnO??pŊ7n&+w1vҥ_n)l*!v,Yjժjrվ}5kjw|Z->=,?,``zkf͒2s̻k̘1X3 5aAjQ/xis), @il=&41[jUxq;muhr mQġn#.B!\֭[5?([`5mڴIs=}]yޤIqyTZΕO"N$G;)voMx2 -|E2"v賐+qK9o m 8M4؆61v%HUAں 1b&GpSM5ՉP"[0 r-+da- j~ovۍ7IneZmuyޭc]nP'MbU裎~̩cj0XfFO /m{g@7WJr`L HTRN&0t2I?gywSzWYe\dN[ =qP#GL1Z܀8Y&b#:B$4E&Dŝp̈́kd[l[o!71cƌ5jذacǎyEtI . bmVDz+iOLШŤϮzgZ TmBT~ *DZKe뮻`W^y30!{DR\08. d (T@rgyy1^P,pK-v5YD"my=W v-`~n`FzGq>F|]w_~!' 8^4^1vwPx%(!AZ2Q訑||ۖ:u\d@|w11ǜs`?&@ G&JwQ4gj<[o˕-2@C 㮒ODTL;W\AX\ 2iI_vR'Fxd:N0IrpG]q#N"'pg/"ƊoYfwߝI#OɅIDx`U:6s) 4q\rk {IWm`^'04rGdWc9&K,F]T'y)՞7|^{-#"oχcnc<$:2e{j%"đSovqǣ:̄ݶ^xT'iU 0Sp qg~va~'† pE.u#?G$:qj5W_СC I:t+/0iF뢋 >/,O Mվ+O|W0IċE2GTXt([qo,dai?]K"ha+X)FV6 Luv9=΃,@ndv2 y)|63s3^N c] Gy+ vq"kS=YlD)5?o!n(qC S:(1鑇gyt$Kv؁/"JĜ2R["pBďh*ƲwA$u:9q?UEI Qῖc;g֗H'K4Csz _54ABi7h"ړeV_}U(ߖ"4huwqߓ rmo_|0` ڭH%\FƎbJA(o+Qq$C9j$L'B 0:lᆴ >SP(['Q0+3u SJkV ` Ly~4>!IׯGgqSVt([lϥLprag=]DfG{9; (#5 $e< cLѣGA[ӿ,V}hd>#:v9 "Cџ D$cUاi$TZ%jÅ3A*LN׀.d LiTBDџ2-vh35ӋjE掷 gEA63F1fxU0=~bWcQ&vl 9 D1bgx,"]R تHU0a"bH{3â *͜G퉛њ}tɾ#77n6lU !`\ ǩNL^{mEӜp`@u&QJ4"%D0ӌH0x_ to(M J6KԈܢ6W@XH8ng& IcKT27Ej d"zc~gň+ *+O"b…X3rGhm5$6-oq z!r&JN26n#P%v80I1R㋷%D0E_Vd-l`=@N& ո4i(%XCAID*CBؘ`oXiϋ(ܶ$G FSLU z#`p[͋7 cf"|ABFDT44Bɑq(uΥ {Z|nqWP)&,ȗ''Bp"ϛ۬"!Y_iq2XMU#*#tMZAcDjx9?'*"jC3LF4n$cF7M|?BǭfƼ؞Չ= $`zbu.B De"Ƅhmy9Ó&v8]O2:Ljx[+"J $J5E,QwGRP5R8BUrd\)R((ͤM %0u%VIQq u)ys|O3 JF/ M`bsBC")"`gS57tt. K8)nvW x?!(h>wxL۞={bI)]qRs7$\p]8?Z>t׮5 QLS@NąZ==zi2i (.v IDATH 4A/__(%퉿9xsf;C= 0"UיFalOlmܮ &-O,TݙyT}obxhgcqn-P҂y!Kd~$`rTa 0kBgp")p"R7tfnip;L5EGnT W 9ڶ?1 `u0PI!8 6EOTBy>tLu+QQnU"Py ^(,bZLlOKd>I<,E 6H$^^#WPrr!KI"J̀@4`xw=2D Q?+-/GVB($E}%"ۮy> Oq3zEMT@ @χ>B0^N}_W2R-}5#0"Pokc CL #4.DuSX!м8S8ؾGP^##kȑ&Ҟvm.Dv:W,Ѫ@D1B]i6~( ;KDQPN5k7EA'c."821gK+-CX ":*xލߟ_C ). =1U2-wԆ!ZW9ѳu D Y4(&,@|#IA:cBOgi&^xla{_~t +v-P0}7}ꪫwޞ1nTÖ-̰ 0#7hzhO_5[! B3#K<?1cƤ[|q^,l;o$bF.~-=5/_w}(z{1v֨Q)c}#GLf)}VZ&#<\s5wurZp}qEA@ړjB@! @"pg8lgJ=DaiYdE6dX`92dt 301)v_ve?s"G}Wdy!qWDpJ"EE]4bs?،}݉//2lW CZ0M v<.=`KaWKp6$r })Ķ嬳-rqQ/:vtIz믿bfBHfٿ9;d80ITB 'B{Š7 .j?%%P\!PRp\<+wRcLZ!Lv"EPF|% F;9?RuQ^~>11 ƴ'n)qebEuY^zo$?W6+t?x"4)f~2CmVb)|~馛zkħ"x?aÑ%Aimeh y8CR443B{9t.Q8mmITփHS#D#i(Lt_s^"O[2DT,:9xa fBDKAj =3!j<:ndBTG\ R& |IS>z!^+ƧGI{,3jԨU'Yzj1؛`$Je„ >,CC=ؼIYy1 7܀m vdO?}w-W^aP2OnSA8@bOGYuSo-Uu##2 S3{C9B@! :[VфToMwE5rG 8J^{[n*BEŃ f,hUP`ۂ3Z6Po1RK-8Fz+< \l>3ϰ  )ӓ: SVxK9" = (jB@! _|1lL$] G$qȏ?,SB'mswVC b.QugD|_~fcbg}zϞ=}<|g4(}ff3<Ӕ88v9CIe,_|EyC@ړ"jB@! @":{7cW_|',bAb[1G'hOH8p WR0 aМsigXR?B:)CdZU]w݅C5OehGA:{|8(ظ̯! @$ B@!YNܑ^hłmB,,"9} 6KT_X,Mfi&t1,>y-9IH?vۿ0\|6fm0E.^iђ$xeNG?z3(石)_B@ IN~5C! BS vvU|bŷkX2ng81*/,̂Po!nb9x1Tjt\YfoAU믇9<Ȕ2=3 ~O|Xls10rIL瞛 RyG[Hry0plz9sB@! @!Ɩ[nI1&&ᾞ7|ίM{"ӌöX~OMW^fbKgw?Aq <|ZΪ'x"T4keq‘Cwi'@  M>RS!O?@ģpk< | ?oH8>9͊C}f,,EG@"B@! B8s؆lСz`$I~@),3P] Q4“Ž%z;&=!_xNg ^>sR>_o1~O 4gNᔟ:c;tqѤ`DcBjӌXWlu]igTq!DNȆcaԈcuҞtrPB@! :T$?4cƌ_%dAq'cو/3 9G8>PkeX Jq1G&`s :9Uq,s+5IQLqURsꪫd3W馛.a%zQDq 0b%w(AGԪJB@! @s#TB>S̀;u L0'I;W_6|ʶ۹&!A.Wpʪ/ Kqҷoߐ#GRv($s=>(ے;#% rlv7VXaX?qVwN7;rIqAV'{c:8Ap(+|?uE$vQN»J9%+Rb#o B@! Qht+V&B#3Έ';l7n‚aFk G;wql()cϏNoe].b>o]i>/BA[n|5\C,ň@6O@DsWƴ-"yC`>oS{B@! B@T@7@uB;{ 7$^#VdNHDK- bMmaP 9vX z)8JxœecP*ga7"("{W\i*4ٗgS26H{EB@! B@JnF& b&$>p馛E1s}קORp*D|5q)Tl{衇._|q})'\*vP=v9eע D&P`xV 6Q7<+3ҞQۄB@! B!;a4>l2 >zS{ lOФX2vnO‰ORz,GK~gØ{fcafS<In5L! B@!  Coy(l袋ؼIOh(njb)(A@+bYoY )aᖁY14VyD: -ҞQÄB@! BHëG3kp B|اܠxs;=>`G0--<V$bړz+|43Fς6af󉀴']*! B@! hAM:)}3G}tm~Om;%oiG.ml[N!ݎ+NfVJ! ' cB@! B@! 5~{2Xoq+%1#diD21װNo‚ 3+O=V ! B@! @ꫯL)Fr[`LNݽ+\98H) vdQ wyy9i2Hq>a`c!1[tEVfRvA@/|!.<0,sԨQ8_#/޻wz""pNjxm  sJB|AR6`0]&@@'m9qDqfm_~y:lXu|gb;d6{=cȼ ;GӃ bBkrg;}K< +/>?X KCNSx5 CmK9/۞/| y9g5/ /J+ԯ_?On6d[nYgv"PgdS7ߔ|j⦛nJQ⶚ ~ *,s91nX:^ 1'^1[Q$Ȣs|Zs"a5 OjF ~1~NޚvkR;WԞ/| y㪺l`? yϳI.RmUQLD*B=`mMϸN;%IOa)IZrrS9~%sNa x62dȾK{|tg'LPOvviֿ/P-*E!^;Vs=DDλȅ^k :tUVӟZsoB -Kej2N)t￧k81Cڳ,j +x&T?,{ƍm?- yyh"Ъ$+Çg8BJ,[mnWc\{w shCʜXM4g6om4l!ū&;5>H?C|zc+)*ckѐ!{bz!,Or }bSRi/P Yg :_X/c S[fP\t~; si4F6=GdaE&plFOBUP!'FQg K8CpV[2ǝcХw| z`\Ac,g%D=>xk^c+ⱒTnI쩆< y1ۭ_0CmQm&vG2OL,Cy߁q5ڃaߏ?O?I[(=7X|Co7X"?<4lfC@ړSTuCH9E}9C8s_(JpqjOИض'XSJU{1:,/\p ,\0.СA:.حNPg/&d(Gh^@x33;vZfhXჍV;/sOxX:>MY<FqmrB(Xqv@s6tDt@>BWc|u3? Ūе6eqa)1BهrR#GUPpZlRCrA=j2)>x?0g r%ߨN8 /+k]N0²7%򩌅w}wxq;E* :8<0̉94GkJF:J9)sŸN>|c/^nVY'mޙؒ/jy$&i"4cֻl@rҗ~feQ0%`hYcRa"a78Oɡ}1bbaNŅ@ Z/!V>GbN*ۧ )ΝVD/sȩ7(L<²wكˉY>00:27e1 sb8 ,\3tDTUؖ:gnqe#@IV#+ؿ_bDCobJ_0<`0*9YU'd`ل+Pj-? Җ ʬ|"hljeB#7ޖ;1) /T^usūլ0-%*~")9ZKbRU,@SvC^앋}:`IK?2:(C-]e,hl +~uDvRCI vCoMQKj[h֪B@'mYr813]Lep0S:'`[ڞ+V 7K[a>Ia}=vœ5yx#U5(&X=@or5-3Ly?ߞ:&2Iˣ>Jblab;p3o. z)vR`ǖM`[Nٗ[^X=$0\sk6ޢ @oXYvj`˔cViNa ^|LvxyfwctW+!)!ƨPcf͆>7^E->'x:,Nnwi)9ZKbRUȬ!OC^e~2^%A =Iu& =.qΊ%MٚPCxKA}5UCYC'POVP_2,7%h)SH,j"xDKM|E! iOjX \OvԞ@,m1LXe.lA{bwJ $&XumJ nm-|.0[l}l~VcnXf,$0Od#)k;jkg6 X'd <̹cccހ+'Uavtu:ūB =F4ꜼCu(Mءľ&qUGjhL"kw§!NQk A&`)If.bjSN!K,QD!̝_WNj/XԳdمB"f'tìKpG t-b[^*akF[KYqce2$fKGKent٤EP/R\ٹ;̂DHcu:D 3NliWT'|n{+n14CqN9|N2eH9)sa8'd'4ҾÖcbo)T yL5aÆq% g})dbJ6eͳ%bkHfS)`-Z̢9oDv|Vmhskoj[#;#;%XC~vUS-P\Eꇀ'5b_=PS]p·IŲ2vB,bxb{m^p ,DZ}E~a]v 5K `}NX'43|Y[/>`Z?M~N2ۦH眔9J2{0phRS[~F+ɳB"i1 yi~>8b/V^!~feD8ò[1BEzԔ,C&Z< eGe\[sXmb㵑JY5_+8dOuaw|"v t 9 !.bAe`6̬FMJ jjl}80l\s5syw`X`kIK/E']CCˊ g0ߜ(hr9 /=|M7=|(rLBduֱ9 ^TZQ*ұ,pHlL3dfAv駟oLym=G'8 HE8b I[o=<a*p seF40dN"z%>'hݯq]kC =y)Ƨ**!Sѣ| S?u+ѣĊmY"-iȫ?U*X3c d&Z,D)SMtn>j)gV~|$E;,c8"y|aU+C3!kY8"Ň@}b&HzIfQ1UAQLšF{ F0(2׼q 8 0A߁v9:y|1ێ\yG<>" @amd]݈ԃ-a~dô͡^`B6a49yY z] F2sNʜhQL ]gWةgH.nvbQ-4yNPC`)x?AM`-aLa 퉩Q4/kOn6V.HBM>bӠyPFtjaZDcښL{žv޶hY7mf8l" Sp,8T ,sJI<Ѳ {#\ qԄ~GP/?§2̴wȰ5b1EVq.X~j&4b#khY,YdL^4cnߝu5d)L#UE ,Nd4ώ6 uO#)˟½z7_Sօҵ[W>fpȕIL+%8_A~sR"_x$l|:ml@!Njpb9[[?ӈ*&4p!C8 ?Ծ6.sS:)kK)t}^Sp%A%Dب0Dą!GH+' q*rMSzzI:b3M%o$ޥɖ9F=^djSrc|îCW~h ?oͰDOB|% O66!D$|co*2S̈P)"tcJWZά˾N}L劒/jm.ް4[|񅫍/m"ĕafG~,QsbKP{e#/,sD")_؏#|(+#3ia0 =MÇ\#y-@̯ )YͅJs؛ еf݇1&~zA}E RDf:ҸeS# 1n٨=OGmoP"hUwHqٞd;d@H0<0r$['2zXY[,[0 Ga @Bx (bΎƿ[~@  SmW .z#B%=*|Y6|ZK#PbvT=4B@! B@$k"𹎎vO(A0{Q52_:=Hd 6@k@q\OAq 'EFt! qsأ6eQ#b1;[כW@T #ڀ(Xŧ龭aU q%ؠ)5 Z4Gl2}):vsc2gSZP! B@! @EXIYf8qV ÇcӞXA2dȐ]wo T'ߢ@ wyM []<NpJ{{v×cKe V!/8G)Oφ` Еx R 5Yg:eky7 O oСxT@Cn7/B@! mD.;Mk  "x8f7(']"O J9bx< GasF9MIf6m5[$F1A45%ȆS>Ijk-OORS7A9qht⦯ԅQeTWe1lytūE@ړjS~! B@!МूzC*MWE]G%w|㹃b̆3r;JlgJ##fgQ`n1`I*hP(A7)6jO#WWf8S}cE7Nd =䓱mAZȞ1cƐnGIr@G 3Kmɱ;a:q64YJf2MNQB@! B"`+b t"'oPb||')Y^Gy$VSYcn/Bv$|8dζw?$kb.yX2''fuְuB@ړ:+B@! ̓; 8 ;ÊǮB6>V*[HB;3*'}T(RQ;r}$W]uUX݉'hGў?DJlvd:/S+-62 {l}!=أ\ΪonJLR Xb _?|s1% *1s% IDAT=RB@! h68Yk W_}uEw3~].[&A_5j%p,UtP2x-p,{vhsXT>|Q({{^9ƎKN!v#%䬷z_tE!q :W^AC8Фm,(M (ل#yiU78K:j=97{oN:M7d4hPY뇀[QB@! hB#9SuJ@_ 9Ӵ 1 Ň+144DáYYT}׳}b62Ƭ>, q[eN8ᄣ:Jnb)Eړ:+B@! M|?7wW{j /|mJlm Fls[4e/a_|c{"l9YiОXNAT'oy=8Yp1IFK@Ȁ8s_4G6$͙f;y,cƴ ݢ\$}@3K(v'ĈeԨQ@履~Zr%׷o0[#׆'RB@! |Ӟu@4r֍ l`mYd11'4L2_d~7ȉKlld҉o+++b ЧHNd馋fxԏk~9QV٩6{cBB`:COSG:X3eQ/br0Kbin ddncݻ7*Z]=ИZͩ uB@~O ! B@4!믿>X駬H{#8%6 s9?!̌gf%n^\,څt0p; *|]*=m/ˡ!T^bI'"(m#h bf&=qkED#YZe]؛OPQ%,B C=LB@! hrX%FA'ـ몥lVXXa<3hOpxꪐ6lZkaqK~D#_~Q F9vw>=ʚSO=54*,NgUSqVYe;si +pJ Kn5 I B@! @'E([o5F۶zb\zl`AAs9ā%8 kX~E}gk8hhq?Fגz`ErCYQCF>X-5ܦ2w;! .d]9%'xNQ`nޤaa (. iOjMB@! :)X 5!*tPr.!gqFhbo⋰y\$-JHꪫWٷbP7"0QL_-gyf"ӫHeʇ~8Z2qω_믿+?|N-tB@Ԁ'5"B@! 4 JCm$.um r*N;i5̌/Un1` qNf%LgjR8#L-^mG*Csdcpsk@} ! j@@g! B@tjؼ.4&l< (bC6c@iyJfF Kڅr}\}Ď:HTsd6'?p*fX"iOELB@! :;xEe[ ṛ>i ) 9zuy2O=dӕ$[M)=b iU5Cj;R.}-JLo!a (. k#RB@! BS!ЫW/A(YD41 #\s hDmAc Vl;EYPB*O45$b{[! @G! IG!zB@! =zlt r'<'Y8wMCJړ~m-_"n9 Bs=j8j!?jC}ݖܵ>6#>&]qsCsś,tE'jB@! hTLcb[mUn;/OQGyꫯ z_tEgqFUcbBE? + 2(Jzj#VX{bg?CrKTGH}(kg[-Ը:4hfm6dȐ}ϻ{/=ϡGB@3Ҟ3N! BI@A&,.h^ac6s9V??SryWxUk؞Pڇ_ǴN\s͐ fn== )aXpYл{,F6n裏>s_{5XUW]gSD!kljB@! @@_ӭ[7;&z)bxW~iN{r-7SP oX5F<rm!L/sw/26/T!Eȿ4eRs2ijȐ3$Um/_d@C3s.'0(ݵK-N[[5`)8H=u7לߖ Q$NN6^XB0RBLZ$ĤɄ8"!`ooyL,reےHO'*+jC8[77[= bEX Њ)Ҟ#B@! B@! ꊀ'uWąB@! B@G@ړ ! B@! BH{RWxE\! B@! hxF{nf_ԁvGqwE펞dd>d.Qw?d;0GB]CTDDih&a'P`D}uZgLʰߎWd)ChE*<@E3c)QT74$ہDŽk "BBqc Բ*$laVEGT@Xe_4ࣆў{OҀ Bv"/~N : N2]e)PG/G1t2P7w(CUޑ7tdkPw޵'\w?SI!БT8/r>c,FHJ/J#*6ic'HT{a450[Rf!J5w6AKkSEK!)R$ɖh "*冉-[%@=q/ o}g'J1c@߾c-eI$J1uTPRMWKHݻwnn<4U6Dɵ>ҡ ӓ 2اOV oљk1ׯ_r,̙UXRj*jDdC$-&37DJ'-FMFIdD5dge/BC@F 5_~a5cf0Y["H.@ mn"zN&JG_bJ ĉ_z%=̳Dߍݚ($ *|Yn *aXbhv}f55geHN)Y0Hz'^xfm٠.b?Uce ,ɹUC>UXn:,Ɖ9眤K"RaLm@x ~3΄H0^:<߶\͗H[Ml)QZfr!@nC8_j2g`ꪫY2S$sTmUVףZ4.ϕ~V?UZ N;K.I}.e "aۅ^zS,/>s)g$/g.ċ`_}O>/h\>/rg\zM /4// >">XwM~%g{wi(V4"~;kXc 8܆ GCcEr=>/-Vnk]9לK/46o,Z!C/?I\wu{􌼸3l(1 %oyf#k){?ꨣΒt(?/ 3P0<ǗChZpr;IHTv:k#!2 "n\ &P6v8)E\RN9KBЙn짐 .~V[fEktK&kiJ1JE>jX*! ،IWZ&Yy# nuK<sV7Om 7Cرcd8,NCڐ!CvuWRW[m~n/.?*mS(ZdŴVZZze[;E% F@`ذa+qok xބ zfRSVGT$ZgyM6%駟.$A% #;09r$[ouN &@YG aNq.aJZ$ykg#d0CE*Oy -Mҷhoko 0}.e+&f=1~7x#ϥ(ML܆ ;2:XθLi|2֞c!\0<):۸qB7w yml}>#˕\)E ǂd`;.οe>& jsr]#ۤ.#BoؒxR- ST 5В\g}7 N.(G_B WҊ+: Ow75V'Bc}Jk4M,Dz"b+O8+\9u? /˨xJU  . #" S<Ս%˔d?Lأ5 N&NV)Wc2hͤ"˕3=P84`UD[U%4DXcfMYQl%5}2aHڋzO>n-;xRz3LTm)0j`+196IR2֞$;oHYzJ" Ĕ㗈?&t11oF:}?3o j"-WHskǥ)bDaf2 -~Søcfۯqu/q{n ,#,HPr!\ FvDq*rړ=n!84}~$c@j"$9J3DyAחe\Z&&/Jeϔ,29ǜSO35gcl.p9.w|n"EGZ:JP,ۗdT r-Ԩ+S K.Ϊw~(1$%EN40%0r.Թ3#Aq2>by0+GTH݂II([+OHx%`:wʛ=ѣ?o7ɠ]iY)10ša 6|bE@_Ws[LwywuW>>&#&&5.P#n^2m\gYRTSM,aܝbIA J}ā?Ҥx4;6D«g*^t\[E&\G~\C]&]{#YfX8c~Iݝ q~)O N`bxQ<0I'w:rBUß"^Ey 8D IDAT Խ}V;{8l)Aq^@ќ^@^"\$z S#J.%0s hH" "ՓsB ]'ve8Ȥ%4?rR$Q򨧊[#3yҔ2En44&E4HiP;rUBh4~sqDUw-bG\atEC@uN|&=8uamB#NO(H^"Q򠧋0$=#V_ct%!k@$ x9">DZZw=r-NEGEMU\k0"\'CV/A'ZN=vu֍~w^>Z-tm&jm50S4 gF 8YHXAMVbuY3BZT'<" )_L(S#[ Ek%^ р@$!. C8`e mD$ 1s0 [!AQ /З8_$dVĖ(PC 3zƄM1S1ق;RVfVcdR8R@<7AU4\R0{>ce `j_?D>$ 6F" ( hmxgL[JGZ&,g6/qSj)*MiP0`JAq?kmV$ ɍ$}/ f`}2hcZ$.U,0 001Mt\RfXIt@٠$j[{YArO9Q$h0xkhҜTUSUi0,/s`E( ؒ͵RO?w1&Lhϐ z"jn>aXU6G]cE Azc=g2:X 6 L0I _ i 1]Z0b H֝1 2NxS'2ǘ9f'KQF3p(cVYC Ax㴤/:ԩNHj/}aɁ6NE?ƪ!&}aP(kjo ;>hJcY`5LdAmƅ0*%nXQ;10HZq4 hJݚBC&*lP)[Y֣|Ai'A495_I()cM|5'})^eJ1c>o<}t ejaE M 5xBnJJOKlDB`dJj>T7M!`M2+cl@24%Q6.4Dl@c1Y fÄP*J=' / /A DMl2+Ci-aFRMFJRSE@L) lAY$Ftp1\uUp` '4gy΅'u²aX{M)+`yOX`q{‰3iݵKO| KP!f l \fCR q,4®IV!ɓ͓CUdj-0d(?wx"{eLv&WJݯcR,khPj\W"L4_枋\b0rX-A|L :Ǿٌ1qF"nvE?? O,bq8Z=;s L|/XPwk~#p7%ɠ@l⇽;cq-5WbJ ̥qe)%"эEbUQ)F!LUR QF$F Y3b0 &T_9EOM\@eXA)4ǠykaOB18*M-Yd+*kB8xRه[0h1@r! x( F`f9k(")>*:010;G1i`LQ , Z+VWLE4 tV&2IJLŲbؚix]qĜP#6^!/rM^~eH !6ȟ=4XiK?sECtL zP/tybA" /J+"9˽kcuSb5Z0]`x|ݵj\:%8Kf^H2㭨U&w +jDd\ԓHUUCgsȂ?ל0Eh8@L)3 :&󘱱&||XM0s 3]hC( #v>-ӫ􄷽^Kd8̠ۘqaA.1?GH]A&,wX;8X˸Ύ k!7),8~ȖD$Ī?L?%)qSJ]@u9ڄ~2wduVW" &I 0m*Yl2b {Xq0aH~ U1}/4SA &),"e.S0J4Y J[B#,bIJb"` #Rn)霈tB-oܸqt?_-B 1dgB7$eqZkeF)qBzBlV@E"I4,EH~`B:ҹ=ħ|q࣍ia< V[ɶ!rkG-;$ļ ɿqCۡoƛ׃:ƉsՃ 9P~ !#m&ʍN C8/ ũRO2%02oRL:hJ|eDᛤb1$~R P ds]0AXSVUS{!V;1( ^;dk\3=hY@7`)M^9+Pe#dT51ͣUh2+0EɅ <&V M7馛(;,2Mi)S(I+t9AD>M߀[΋e Oؒ4I.P9@x)"e@O, .0ĘAd`ŢQWkDO"*NP{21)  4DƳ ¯ Ѽ b+`H`Ϡ"dZ_|RR~ Ny#^+&&}dAC ʭR&J+ܴ$~2X+bbT܎ Yy YئI+n",1wSj\PKB* 0b4q0cJ !NIf>l6^5:+޹gMlIbJN(<)Bh1N'<7ƕq}4%k"[&w;Mɿ`>U1=W SzuګBAQfǡ=jc\`h1Nn`\$2ri0hsY[A5ǩFP@l]GuʍTHW:*&C&Nk1`V10V@R C@+s[0L6l3If 49j /\mK/'3b'=믿ޠϥNijPlaF-"`3 Jĭ78t{BW%'?|Qc|V0TBVy pp\ zFw ZuQT5,^ :2"lʷ+ghB;bPY-*K 3Hd+xe&8XR ~0w;j)!FnJ M/p3^&30sO+Y`eĶddAoPfSlAc ؒ$#PF"bT\1 #[ʼnH~hN@A0C," }}E EK`@0B;O|R8HTBI*2 4X:`#&F)$?aE1*V.opT /W 3Mƍ;~ƙpZ>?n효sE1X.I-jAH1 ܹwsG&tLp"+pZveAJ3f1ZE <~ N9 8 o8T+VB-C)XQh97(r+ݴ X_a`P3 /aP@Ĺ^S- (yt:p`Sn3ޞh_A{(e9) f90jOaC:?[+릔L1i ,%Ņ_m E&qB̈́d;1%kMlJJaȇ L{73bRvc%YS PIJ͊RY,EEiIB.I٠OcȬ,x})1A]@, ξ/`kCqąHbI7FVڠV 46R(7+EQT6Ps]eUx>,8HHEFlZL>O-lFY HcZ0JX4eջNԛת@R+I^=믿>ޓ:#R*X{<^*zv堠oJL27&Qt& "!Ze30 a`^4~ϻ It0)t8? d, g lCF˂ \0DJ0*XK4`e1PIZ]E"1sUsMLUir"&7Caڀ-$! 2($6VRĴ!e,;Ѐ_8L\%S_]Yle^ܻyGUhȲ@C$d{DR䋔-(qBV z6l$AC7HO?d|dn ՘&IP H5Ąe~bp(U* \VTAq)ChH;6?D4O%H5S, a(UqxNCsOp<0_s5ud,l*4*.0 Ci[o}^ylkjH'ȤETK¤t *2di#f ݖ\Xc8 Xm.ҧzjҤIGuTѰ@9N`S~S1(z8B,4mGbq"Lcy$h3 HE뉃@>L#XG\HEVOp6e*0(è74hFnlTS"dEY#v7R6%,@ &L)4:: dƊd>R0#8/0Ȕdb7gO?sS*OPJ=K49L&@߀)+YA er 'L2eUva(dERVt@VFGoYJ׫|unEA) Ha5)hSs98Rzuo͠ `%iXLefBI,JT4Qr-'tN<Ѭ{w9uŊDx+n#}OxqIz7Opx`/ߑktƴb 0\Wŷ>,R$Y#_U'~5W_sԑG}{oCRY<6XN% J̬@xr)BYLRmX@ieH| R]ZpgڐxCS0 \ XqHFh{?\×uh@̙3)A`6S8 R &HD*2%x8 q*4͏1}U<Ѥ6Rc3gPO͔OV|cS48F'TODE|DF>C2ie01 L"C}q_Ɓk؏[q3LHs #Ib>X !,]ꂀ`&W'RB-_"5FJ V)0*i Ŵ0jt0"Y$mp 6axG: 9 uxChw^ƖrYxBւE8'HsL)$SLU!`RSkq̎nXՕ:6zT]A穋qHr*s4} Mi{82HsBM)'bRDf)i1@BP-0M*.Z`{k`?[ne+HVż6𚵅0.N p$K0r`&L-*IYHFzR'Ia~BC2|'8X"9w:,UBỹ*C!6Rt$8<^ >u'v6matu)]aj> ZJ SrK̉ln{.BO]BCx:Q]MRXB((U?BJ+29%@ >4J%H^~ .tÐ|RMad UӾ?'pηRE2JakK)r@F`M)9Z/KF9cl% ?ԃafSQ&bMAuT7h+äz0c9l/dWƱtbZC3&fMR1mZ LI֔dIqTRewJ0"O QZs9%\2\'Zî]wݕoFCZ&BHzt }K`x%Wkw ct?'W_}矟1^t 5L 4ζ1a 4(aa$ISIƜUpH5H;&KK0Y,ba,Rfa@R}S\'8yVZHe Pc'O<㽄 w<pFڊ+SX-bHeNjQN-@( P X.Xgra|\Y˴˳8S_}UZ);~UP-fIk d4&GlTH+N?ZAY^IJ1J?E1A;9Tx s<J j)A R M#N&ŐC7a~t}H$ H # giI (a_|qs"\ȕ%smh=Qs`FF_4"&@a+MpCGy$[.y@;hu"HGU@:8'ie7q'R#te+H4Qeh0BLL$) IJ4Nߞ !s@unxt&>e[}4BA7HP|axc\w@:^nJ*C#_J#dE֠Uh1 dz^dչ9V3ɌݔQRs,Ҁb 4`, ĂT" {-1r4Y,"bLɬI@.)r$PT:CFz(4+d!$lioqi%#˹K.hi=Q_8-ͦ:ӟ7ֿgwN:c9AK1BZ`iӝ.tR/ÇeA((@0yO I 0mRLLR$ NzxT|F38m4ˡ7(5iC)@#VF9jcPnJ@(4E JEn`_Ĉ=>(QZԈ44)aIżĀbC,7~i!le`p)"AH" `4J6a*ؚ'vKpqVJY,Xb%-{"Âa'zjJ 玓YizHN%=Azd{ޓs=kz xU4iă8 :=1ibo|% B5eE!L{=VŲYEg5"\8qU^19qۓR%CyO])|:ګ9TKun5gϕ{FM6rEFZ#HM 2@VG@ZT0fqF`\92OL%r=*SL T2m'[H4fiGvb~()ldJ[n#dܸqJC.B/~u]Y=8#mQR,R zrH5aH34Ns=kz)*VScxYqck" b>S|")7wNv5):ܛj_MshDMcZFR/%*g&$5 Y˱R{%sa'7X\.0"|ݔqKq Mq5GH#s`ֈIB#Fi H{GIڙ+%9d %v%\>n$jsO 7H3Xc曯=WWvmQEov%7[iwkv6&^@'.a(n|;(J `Y= {Rx1<2x2ꝝm3t tA먅/RUtX,C7N?eS ``@1I^hi 0d8P\@].vIgj|Mb(nvac㏓Œ:…;Ț=7Q" 0qğ?{Ҝ \P*>>(&I:yOR&]ڨE{g.Re GR;BfxCH$+X> HEȼ'Jv%hQ:<,Em/~M7l=heJ{'ܡ|*z sg?o~S?ҳzA=.ɔ8~@C+^}P2"p=Q:$0$P%0$o*4s0Rlm I^eB?,bJ4#FAEI"\.#EYK/-9ϳST=5,^S7̴iz!׀ r-r/?q Ϥ\!u%|A E7 1bG`1KdOa,@%C̛pV..NΛMi?JoJ@cw nJ}XX)yGj ȁY \*C ,t#%hErWb^x!'ªCemk[/EkO{^ o06zli{BsO'zIn';ȎxO7؜);3BM$Ј^>_@Ddbi]< |KܜK`J#Ҩx[$ R[Y>`aB3WI  JR@xK$yK=X驶Yc=JyO^{5zO`'woo?;s׿!u.K#}hLd)G T[NB.8ML-u I`bJa!#v hy睷^%77%W @+a 'ܔZciYYa4~C806 x9MC`E`b[u\#vLo뮽Ӱ%%\8'|_&nŃci/I.oؼˡ#ޓ\FSCKTDIH%qqi"d4 AtE %G0"$KMikwE ȧϷMEIsCZ\^_$ZKa 'r4K%K/m?7`:[&}ݯ>ZNλ}Wړ%\i9PpwfϽ{sr+lG%Az>6)%+EAB=;Pb|=!;$ag;J$4JwCa"ܔ:$ᔭr0 #֠Oe 6xF%v zAo6lO,аblێ^JE0ڶ|뢋.>P;'y_چA8nSN9{2ޓb$J&?t=8E[X&"ĂE[8S12wn.#ܔFvHĿJpFi,/ +&p 8o~{{e-ּ==I9X&xOھm8uVbm{6=t^1-X t{~& _V.rkMǽ'd7<H4&\K@)\}Qkz4׏#~uGM;ZUvRGq=sTmP\ Izp zGҗĵ_wiXh#^kOyOZvGMzٳ3\|_"{O*ze_S#>ⴈSDLR) pT.דH Ze]VnJ{+^M++' d(iCIr2Ǔ.@I[i6loYk+mO#PPޓiӦw{kw}Q´wmI&ʽ'}ؑ]طS@`iIa! %&IMғ풀Cq 6 h77轿핀3mrSj`Sn`!,`%4)&GcIK`%\C}袋o[U=s4믿~%.,,UjE{C=[nӟJ+ J]@'Jx@R'Y$)ewd[$`Y-lK`IMi=qo$`#:ThfUN9R<%i.@oJ+y%FZuM7q/'&yly^z40g^s5}Ңc\j .''pB["侬D>4N^Y)H@_{i{-},_{׻M ړnJ^]qpK%#8묳8s駯*=ҪAm;շDvp 0kjX!2(ؼkw+h'g}O~Nv ^+>YR'ɶ#h{E%pSGܔ:- N7\K;u]:îqҡ' /0 cNT:8v:Gti]WԈz{r w * 8X _[]VeA)ZEfA3)dp e-|7*gYl /Zo)FkOw^:DX\iM,??~pbD^47קE`AIX@%I@-OƓ.!HMirRK%PG[li ؼ3,<t-Bu{1 y;=v+SLYfe[5ho|K| J]]+r \.K%p \.aĉs@.=ޞ!5COੵ'opG'ɥ+r88vH})%dqvl)#K=.{.K%p \.K8C=\} ;w Mzu7¼b~ӦM#ڼӖc{ +|'Hz ΝKs{D { GK aPnJ]WpSꅇ[ȿ:mp .'lڔ r-1g";nn]`r:u*cXZk dg`%G_-REdr-Ǟts̰HW'ij]@_L8 H>k<ɨҨ}J>FsIIEd9t 4HdlIJ19s;o}[첋Gm;ja=_Ї'C[#ړQu7r)&Mz׻HNIaR|D%(-F$ 2H>}z#Np J@DT*G9MܔXsd  ^q[psR.aX"EdSBRii4K/f` :z(%{_wum;R?"K,1A2)%IBEL a0O>䫯:rHn&Ǐg995Mp"mxOϜvF) 4),Ťn{1:,}yo4u ).A9rH% 6TS2Q./c7U IDATAu@H@Jkm2LƐ' ͘,+S! [o5se]Λ}=R.I馛@rW19=yfk)X*+ |/ƍ>Ygu5qǺquޓ~ Cm0F ,M 7 @:ܢyYyqhh~6J>FJg sx; $)њ]#T"xR 1UZi֒{筷ʚ_}{KсhR۳Rb? 3+@ٴx6]1HV@vY5@#؄ "i9;mɁxqsytZ:=XRH0+;FE1)ӜxysR.IinJ.V$Ԋ/k󇴈FbzJ2`0&0(X)˒,t4C" X.4I)9|=)1<@GO>d>s@0^`3\4VBJ2&mYs5ǎkޓ1Fu \ɲ@dPl!Pp(o>a̶n{ꩧru{O>Ku S$@ ]I-& $@E8qu Hnjsp N {Mit*]0RDZǦLI|%ֈ7ј-VCQH@7%N+ XR}T r%MSFЧI Q CqAs/?=øE4ޒeI" kκK(Հb駟&7${A*1B,xCeޓ_WUbY] :=7 [৞zuYFetL[$ >?nVo Og ܙ4 w>7xMR-*Nɹ{bs ^Üq L^Ǹ.i]{  C2thP]o|R&)[׿kqHpdBR1ًl)0e.فvm.eedK_M1J6Ö*%UJȆGVotDEɜ2yAoKTwy_|q(֋9eQVY׫ `Gtt<@)nVW^~'OUӭp-J )E.ťRnLd !138!? p \.K%0PFL̸˃4H+` {A3ݰjA s+ROf13 _|$"RJxR,,C>Lfmڴi,6yGW]u.[oEcVI3Q%s"͐#Ή1m*R.#zsAn_搑gbx$[c17|tVҠ&IJ*1Ih'C4#H _ci<5*;/߂:=1cyc 7"S?_ m",Ϣ tX?Ud]tEG.V%} ZzVyy$١y!3!MfiZCK`@ 24fa`×"X4$´up*e6˼2Q iu ( VIX1aSJf̛0^fe&L:>NrWmURF x5Jb#:eIJ%@xl0yr- yKQB(DZK}@࠷bSƒ"&^{Y _ɶڿRK=\p6lΤ9 t{73 y @~wsmAZk;t|[ne{9;7{V:ͷCUκ+~l+2̽kٹ5!Fw>u^ws+ӌI&@55QE{4Qq ԓ l5RpRH1IcFzKH5xGdHbz/YO'/aՅb6˂v 퓟$nMXi$' <6ܔFXW2.<&x +of9$0[\qq+,\=22p/\blir /#?ebUD[ yRLVFI$1%\0A3~xʿEw~/-rMK+5ˌČ@01/L_:dڤOitK%p \.@w$l鄝v#חþo1c֡ O hH` 5NkH1X )tOH\R<wlb.OaiINna/ +p SwyLXl=E%jV,nōB##p^!g} O ;'Ȃ@@D[X4) W8L~D#JyLPn΂˕Y}qr ,)=B/_}8 \Nc[㎡& _LG`DԂunKkD[g@'iU`K/ ]xk/JG 8~p /4BVMuV>:Ź>HP9Q CDj$ Ah 'wmK(xx^%p \.@OI`ιc=8䬳e]Xl!j|o5 tS 94@dd8 iIMOy%Cd8,Uky@ T ?l^X|Ad([Yg[8Y~ o݁3b&1e'OJ ;Ѳ7npuad˂lI_Ѝ}{d Dzr +Y[^zy晛mkRNATVӢyRv3wJ`'T @ q-%`OC㥜T )&KwO "K3gdG7֟oXRL ^P9ƒS 3ܔZWRcI)<4cǠ#x,Rۜ%`6.A7!VV ɫke<  Fz׽<+P8Hf 'CDFlV=Egs!md1(f} 5c~7ޘ^*Ch#.Bxqm2@ KPql~0jn4*ʦ4)\Q= a*,NrN:T̸Cm߿bܔQ4h+@1HR tߚxF6fBtaqdJRmhNK?9m曏G~„ =ة$p Gp1s嗳&Zkqf'> ik:'k=|jя0\y"7# d]Cפ}r7܉ock9; ,NmoO^m-6 $+P$ $E &r lRcF4czx47Sr1k#"%( ዘTUOJ1!S T >IPnJ<,3S3VDIU"i@4T .dd1lEPnJWU`zL<2JZ N`،-"p1#xXja'z^]jI?ycP N()Ŗ`xOxx& I LQu|$L )r$ )͈͐J }0)0>1w喲&rcJ_If$"b^#ȢQcġ2[ϚҀQD$D#J&R: M}01)ǎ)bOłơqaW[/OaX:Aĩ &fq= {a!dSd!5%?̍Eܕõ7pM7/}R_g{aSw.,rCAp5h˼xOo1I-6FpKBᩗ=DVFW R,ҋ|MTa:=>qEA6@2 OE͒1kT=KJuTHU~뫽0EƋO b֊ hFiV0"YL)Ik1&)$%l.ד)̐5"0%*bLZ oEXȢ)5%O&87hJe$C ǻ e2 0dim9~iF>Y \- H$Qi0$ l986bM î.,~o~lf~w!p* ^-fD% ._gk騣z̨qsW_}ct WLҞ0J!R]6Z˃RP5&ebPbpʼ2G3m:=A/>b}c`qKOO5J2#9\ xRH1tl~!cUH%]#qJ%5ً>Ց'q )L?= QfJ2X,@0"MT> DYM R:NA씋!#³;XBElIsAAq&52 T(__sS*Ȧ̔( 1SʒhYϘ=SC4Vp@>B ҂ @M8 ꉁ9;89Nԧ>5d5R-wy'ypǎ;um}N=>,KE MfJ4̚zbJ\îęLQ>{ZjѰ!~>ڿ3flt_F[”`%\ P&P@Y<%`IPZЬM ϭ:h,ǝc3:țǴ[p : mceeH >?`%\AKvM.^1C~bӟv~xlkW~ 246>6 H %`Ԇy .{ahK,DŽ7rn;Dkyrt<#?8cu Ԙ\:oT`%EbT]BZ %z 6xWNb~O<&Mߊ0+t IDATm#^vǙ IZeX!/"h *%ip衪f>ҡH^)ؐ:4$N *l9Lv x/;ō^xƚk}lV ?׊ Ē D&3fr s1)V[\=%dJN~>f̜L}9$U5QZ,L}=#z(4n8>Or >,2}'u>}$VXBd>Mcu, @UAy*300{"`rAY? lO>G*|aOqޔR$RK9]Kit@C} K7aQ9c$vC@wKvQrrC9wajW>蠃X{Hcti ;$G/!=΋vj́8}Od=; pdb<xLZ'#n$ɺQ]%&<3:rpl->묳^u*L[X&5n 04<=^[o=|W$yB@-pk1O~8=mH*֪~2~'ͷ~`H8.>rs ! `fDO #dM[qʩuY0? =FC5~sɂ0%%$p#TSbɗ0; 7l3<ӗn4% }|8s_I0Qj%Iwfc~dx_$13+(I  5J_'IQ3_ M&$_r{8&xX7uq+nO:V Myхg Km6 _p3+VPSKضς.Q?&NU V`V#(kg>RVWg!^ `Dؐ&=6`T\-~-[aT\'( j>M _scc>a pebfឃ/3n'-?_W\'PiZËSLؗ[nbw5l!mlx OHdKT< !GP'N&Lx8u)SpD?GL-l!L~0A>KaT i@( 4ᾗ@NHJO7jO ̹Nq?KtuN8H}ө;؞md=0Á^}4H@"pp.'dx%>sZe5]wݕV cGm݆{Gi9,fǀnCIgzzYoކi$Fd~̉\6ۃ>@Sjl\ÃRߔd)2``8`뭷`3$X70 8 U XqS;uzN0Bɜ,"mtd8!~7?3=+K7t\qOF5c;o8Yo|w0k<&J odX&\cxH> 7\?>lGL@5@+6@S;t9R?U bUQY.$',"VGFbR}YSs*, %j0X1}#YPS')1 O8Rrj'GFIGrߨ:,p[o=\! @bKSu'G_q_WX]rg|%4a1q Gqwm7֡p%"?&HwO:ΧY~"wBĠ/}_r t{뛺2GCҾaqt30U,kh9 x?I (|5G`F_3WM".q<~ՑI͔fcfD)) 0.&{Ͻ/ԾoJٞ7fG/8Ta p'lXV+yMF=\(=mb(ϲ{#(h0׀`n;/b{0:e]>e 56+D* ͐ذYu{B+%kI0)@.vz'79_"SBRF8Y XV<7 ,`LbhL) L *pS# %0"$eCS<>OFu9<A{ɔr5;z)Xy:Tz),;Aߥe0Sb:GKGSI~9EOLzݳr`.ǂ_]fe_E9iҤ &ɔjVZ 2XM9"r;<ڹ#cbP`1:\ <@WXa$D|J6Mp2G>luae߅^>u}.<C߰&n :-:^ _b{ W߻=I,YI۠  BA|9A}Um !8FYx%XWw7)%?^L)Q43%XLPʌIT_k S( HYH|y3SJMoD ~ 2P,E*XN0uG8!_.K/$JT R6N-0O0}٧!Ic0/}Kw$1Supap Tvd"[NwN|'X˩xp8v#,裏zKzx뮻.6^^Umj@t;2W1AH^Y'zK#p+flBs$0)~ S~}6hʗL*Y4l+.w|+⦔ L%!XA0.L$ iS|_op' oʺ52t$ Szq0%Edj2Ŏ3`11},T1ķhx2zA*huG&Z+QZ_oBk殻J9p+_>EP[~nDBӏ_*'ZE&#̣mj&l" N)?pFp];a8sF#L%0r%0)Rgrj@B4Ik^uU,7s+yoFvEvuQn:!Zhv5ĒhdN 9yDE>\#$Z߄[9OH60K?. Wj/UD R';ԦP crs7lr t apOS΀ &nR D`MoG. هp5BpN!8?~SdJ"G}b*T4ó\#]}fJ̘0H﬷Ћԃ$*E{aĉކ"CaRseC#rFܐQ.Ow)`IglC(HERT~,aAG0`B,]'E2JTJ.?GFcpp ţ@rɻV[lH=AnDq$B4=*KeF’K.J3z,71sPv[{YPQ0YXB{CޓBI#9,bJK@UEp&? FaFg ;ilӸz\;F/^z kRN BqDQe(E$ sa2b@ @1I H ?87E@!(hea=v3YKW`?g41Goto~t[a4Nql~AO"cB^#e5s(.MK F"` %2":fH/K \<JՇҦ^!0Z0Q%E,r2V>Fðz. H@owWCe <Z"1F vP@ߏUH=~{$>ض\zO{LY-0_~98me]&MUW]?iGm!;ҟY92k=8\U'|Z1A6ݐd9,7a{6ڧGw'=Iy{+B_\+oh+`rҰ0#M@ǩc" aq#g.C!"!C`4s/!zxj** GP:KH`mÓ |eFCe>1Nq48Ȃk{ڛ;\;M( zXl2Fx8}eēa4Z2p iɌs8$ɂ ]p&@c>uN֞$zM, l#.cggcOA'ڤL\Ā 2* J!p˯گ\dGQjKؑ;4ҎLľ*z:JçG3s t2gH;uKeՁN J̥ DeUV)fFΫfpkW1]s51?ѵo9F=QzIz׌+r˒i1RGɈXNu : lC:(dXieVVQC'ޠ.cG|A>FC`[;z65J/##zv~KK`s`vY[ ފr3?_E1>'{وֺkS F^E&yרּEu>SXXw "<>"a^{)0@[]?.Y LxWwm:[[vLKL75d4%O'&K%%JHxy^'<$ &;E?KbPUYuŗe7эXr=o]],dJJcwox)ΝxZH|d3%/zAdqPa6WJ2KO3b Cᕀ~O?8kX뻁A0g$nIMB %4Ձۍz_|>MMpMQ#6R*DY7MPh9,q|;U ᦟ>(Son{na)>lq U-nQLZ5,/';J։ʾQ.v,~Wir>bHɧ4O7,l^[bY~df E/zt{gf& VLrnw;/ax oxC5-X`D8?v:cbwvsLwl5M⸋=iAϞx~%U1{]U-š[s_% l31m 5܋8dqf8up|iၕĦd}iJГ:mgW]Q7γ>Odӓ%inx#/}KIaݞ^\zM2?Yg<=cg|{n/K.)OyJiЉZLj{xunws9gQo2br6*$@: \t)k޹[5L/t5/>_ZwB N[% LsG#h4F)zC&%0A[2qL0h2 lb-""뗇Ksˏ QSmHp5f=MĺPvE4g?/\Jvp݃[8fF r-<:]#IIQYLTկ~SCP9 nٟ$^r-?]=KbE6}O7|Wη.DviRYnÎzի :q}.x B IDATXgy.ڠ9$ eq'ꆸ^&x;6'8 mcg:jW8ޤ^ [x׌C֧ckČU&i}.Jbtk{ ]ΘE݌k:H`ܭNt <Wb8G\{@z>J u'-MYdSF(U|D)͖ߩ!)JN4u r౧| _h.zi]^'ԥYF"* A5>#\dh7JY6ZщC(Nt^ wbCȫaئ]y ڸ$}c]$0WfqrLry%ȔW"Ѵm-R#e~KrOJ4gwGC)@;n@W~$3hA}){{>яځk,i8ĭY؟+س'8SOUiJXwCR鐏ycZ$o'V+,KQ:W_׾-paDq[R2'.x4LK,Fi S@kozӛOv10v%9z.#tWåq6!K^.7-mrnIb CٻG'u ' lD{Us2\ОK`Kj1Yңb?M}(*͞H`GaH{5Y=! jNjeZw%x}s93|䬳r.V.ީ=T?w?ρ`8|_?p(%V>H +& {d^Wr`ibB-ȸY˔<9V<?=Cԁ)uWdd pT IʭU޼lFzF҂ZMDc48LJ ']Zs.~flǛZڮl/ ]xᅗ^zQU '$k!zR/|w5s4k>a.)QTr׍Ɍp xBl8Dʌ:{̛X:Ųf:˺Fcgیgjw*ۂmK&g'h T8Sk3K7`rmnڌ_֥v%0N7?=!Ї>AOvY<tCUeT>/.~JwBx'6)m bZġwyO+=gd~lpKGV& !l(zDrqxݦ\gmJOg%cauO7't] ʁw[ND栘A^i=(^CԷI猾G<(7H,_'܃ɵГZ>?-]GSy#,y2+dX\%/ST.\s' :p&FS<ښM:m5dfxq?Wя~E]4? #27'{3I8mYcgN2j׋:׽]z 0(7IL~-wP(iCaSOu<˿.}u (χYO8)r Q-__&Zو}Oj2i5Cm *8p`A);^BOT )jΒh3-e0V Hn3.!\,Gmo-Ə#Y?<-H^A- n dŚ!s@zf\V$38Ut׃S&'5%zb{6}~DX{p/KJ@tUj4(!ERk9GbP& S j.ݡ \mtz:WcnhwPomVZeD?CǺ:ұ;%0u+{{nw],^I}聃l`x:Iz3 //Cnd"Yr*ЉyЈ}UWM>hr7p*_#IO~T7[@@jɃu{{AK@kKcPҗ@5\SNO(>پMcߩ47=T 6-e~߯S1hA:YZlwJ=򑏴eCk^ (1 ;m0e`YFhǸu̵7ym?׬d}_ڄVdz XPš鰚ѺՉ!?I6-@hJ&>grVY%g8nqe2$&7gjx6G?J/E__0ȹBla Y{pk)>[ M=ơ\}=q&=2x {E8ckg?bF㱢,`eNi֒ث~Ȇ %e~y `h:"U_XM<4Pow&X]!=q0YpKE?5Oc`Qu\Ӎfѳ,@ EP >PEO8$b<1TvY]ZR,/fȌDc 6Z0mďKu2e"z"DOvĞKKK`%̟n//0kKѓK'-}ͬ%VSYuYb[n-{?pPXUĬw0OUx̏ie`٠dJͳΌЊIdVIPzroIgxȤD@tA13DAJu־A9r?H{c+GO|"OQ(4 SӯK.YK-bgviܣ؃X}}.-;HoI0ؘf1mPN/vI0'|rھ!#xK~OhR%%%p%ɩ5;85Agv s v$5!6ۙϞN4C H\?hׂ)l)F1̏=ђ/}KU 7rd8!4%/Ŧo}+L0^C38 _7ƓQudeREuKJ Ib(Ip'߈C !{pf<;M_ma]{p[z}csW/P Fn]49 :IU Fm>,Sq{\qvEȾ#tI4NhN|CIw4 ځnpj0e"c8tA mX%n!Al|9#v>,WTϽGx_{ac,1&|pjsl>8z W*Ϡ7B'ֺ۞W../"JWjA-N(~}{>w?"-fv2) \&n3؏Ȋq6$7 Ǣ L̔y,(ؾz+6`$Wz%o4jsoL< 8a.xzGO6!^f@@ dh5YLTg 1:v%29j<Ȳ(O4(qHHOV}%FhiNq)wKK I Amް+[7vD\;wNn=}+^UC/i T<1[pZCp'}8SDӞ>ple}+_!hlB*>RO\2^zAaUѓUv t t L ynA"z+O|bp_v h dc*[ؤP"|?cf< } _0C4ܿϪɎ|dhf9]] 0Ѵ!&dxiUӐNP>ӛ+&K Qlx+nk>)"7=>Ie5ӁYu6l=ٜlEư3 (空ܾkDa},ZcnѺlDf-]]9&I;mluɊ)YHnt9N)!3T[;=Ȱu2%Co(ϓ-b>Xʾ{.zdĥBti%Ff,Pb"}׻Րt((^\>ݛ`GN?'ɜYòG|oI|({-Pԧ>u={Ӝ>L{z{KNp(~WsKvfp:XF ;xoOa=N ZݔۧAKIZap.=% _wKK@ϩizq ]M[r8%cC7%O\<%/2>IOjJre8N (oy[ڌ20#뫪bo}[e@ EΈ|sw\Lm=o|9OV O&jGNhT0 'ɟYЭnuIsU*5ٷyկ^k1˺? _yH~s%Q D.7]lZǧ|G'< _YG;9~b&]%K.ik0&sSń5A5[-ßCa{]X^x9ʔҶWֶ}[hSO甃K`?H dɹk|3]Bީ@SIݲm;U ~{o}kG |Đ"#Kn_-of7mGvm數]G\4gzGDlD ULvX 4, 2;Ttz+5Ї>h?%V;>Tv^$:=| :/)DYtݩY0Ν99xѳ#ԇ/ӞKӞ4Cq]Irײ@C|޷|މ.'; (m/|萼axgq$f.qXq' jdǬ @pafl>:XQ@ >򑏜uY(:BC^^8^sTiQC^QϽ+̘E%m_SZMg%Ћ(P%1<i_{|#[5*5&H&??5с^߹cީ ,肶\Q١n25Aaİ:+"3[oaV΀5D=y*9tԇ,i Sӻ^Ń%sJ%fd:gi> 0]%d56eW-nq :[M<]=ِ`s9ʎж*7L6DO(VnFy +vIơ)f҃KpK:ZowtoBq^q:%ph$`zm*vw˗D1֖ G_L-s/đ8L7-J#_ܮ T C+43Q!\w<TKaD|Nn;z`[CHC[%t׸\C?C#/O0Xo(n+%C<1N,?B4<nzӛZ'"يRdߍ=h\ZWx]L@ !j&ةVuP!&Tp+ZΨ1IZoixꩧZhěD@an=ٜl[z9㡡`0ȋTE9qʠ@o 6ueZu7 ٳ`[KI`wUm; :<5lb}]w W9Ex2Mj2sc F=>%Kd 2 v>R|xk&SoY;@y | ..4" pe@4A%,M%٭IY^Ttp^^P `LTI y/Xάdl_>՝)"TYY-a9c񘽉H~v}|@?,뒤*l* sL m!ȭ%&Y)?Qo*| ?O?_TAIڟѓVn@'M2},[ުzOZ%38ؼ3dJFԕ]GJJ_q M a֚4L»n,q:Ay{..HɆ$ IDAT2V'MM`2E)X $BJ a2;{+^a bQCk2N_5)ծdf+o{SO\W{PqeB?ڣy:s`9ծT4?(=imbE gyr;~ T`bJ艍E z3:1ѽq>g|[婤MYtF5  ɜ÷+ R xfY@[8:i!$4fW4 E儱r4162(OΊO0T .Nw5;f $`(Če.wT1vr YD_ЀƆZpά[.d-l[ohMl\lIO;΃[{yѓ=1,+G(=87(z€I{WcF.:zJ]]]]]]]GG.(.& EL/MGz l͹TMlPCF\q6Dpˢ9NΆ4r;w.}Q<^N0A'9=3{9|&jGb@s( x`6FnNP϶*Y'/KN,U0S@B1sdU RbVƷSo{6.y MBO2cD@'w wS&q/;&Fpp8Kߵ׆/rΏ w*ܶ-"YҳQ8ͣ3Iʹ}tJKxq9Ű5IB/m=YEz:-~Tuz`bI~Nq*^j0sC|0ASݙ]]]]]]]]]4j.b*P.M\ECT( [ b"[ƴ^݆4F{,`·Lz cmمkUpeV'@2ǧJܴx{>- whw, _9p~´!+v9 zm;ǯ}kuihG¤|Coⶖ.ia'D @-v=Zjo'0f AINj*udwE1j( jf x8w7lHN&Ie:d$ ٧ P[~$6Ռ((HKH(ϡV h\nH-q,&-o 0ʟ<‹_b3;Tbx ϔnTe!71Ov(¸Tq,6Y9O6W\B)H]`1X{%':6;EJCGLɤȆ5铷v$6~|".TH. ĔGiG1>>/yK^W$k'ԇAJW1l?}P`GO9,CzcaG? % =y'IDQ=K@@@@@@@='BhR1]q8#dhMJB#NvBLy)'x4s@c}z)Zr*X&ܐZG'>!ٍ V:9p10 '%`š-$??˴:؜`"My_p.wKSN9łz VjA7aVur (tLKNDm804bX5Kz9ntQU )M$evds=n%?O6{ֳ5Y :bz2~Ar)H6T҃]s ӓu t t t t t t t J D 2B ӈ$s*vK@'RJhH&nI H %w] yUC&;k&zXl]CBv~^Ƭ@ Q8kd}1/qiO{׿o~#~y' (VRU0?n|i2lL' ?w%OOֻ)vK`KA9IƸƣ8 1:4Y5YP*MpUzb07@8[}NڃY Yѓ%߳޺2S2EqκckjcՁ ^N@@@@@@@!@ v@RPFPbZ`[ `ʋ/ >'*nq&A.7C_!@A`Q`@*@Tal%v7dI3DLl :r=Ka򖷼Es8 P@#bQ_n8餓5~A}#N= bB"O* ȯ_HP<|Vp@]T0=qv6X_Փ :{bĸ(@&6Ș˔Pt%*ۧyo{۪X˙jZX͹zĻos>!YcὨ'6J #M;'[M|dj%8s%<C~f{]h]]]QAGxG Te&S#DR!'8E ٛ%vWRp.sK, x'қ乂B*#0UThBh Z^"Qh N=B#8PsRio~ :Q7Ф'ʾO6>^x!44؆5pWf:zhR-h2* /KW`Kb©c%"=/u[fT 4ZǨ@F8&X$AR{>R3a-&oGdKhi"'2qTގ(ҥ^ 54c`6r~T֊Qt꘷זi+سn r,"ɠ\ pŘ1^}V_viw[(yЫ~,n,:H`P2G"dU/8*+H1 >XPǽT 4554E&T$"b)1"[SZLHܛ8tr/X gBtu[Xi 3 /-B.p07~7BD}6Q_ p%KrXوI a%8I`RczVP&Z"$K'g9@pv[AzOyЃ4%ϕ` 2P(l`F8@W4E)z8~#HB~%hVb ѓ99<^1!SN8/Mm0ѺitqTsg)Zz9ZJᣧ>YITκ .6c%zn+IҎOeʚX4ıUN LJmD$ں c͙Gozӛ:L6Ci|.%G!)0(Fbb-e5"@%nmO|p$ib56H)F▃.֛HaF?a(j%JXmz5Yͽ+a.dMZbJH>ys m56GpX~1EĨ\,&DGnmzI\T!L #*w+'$}61/S]+Dl=1 !5Ei6s98ҋy8Ui88P SJ{ì$?8~>?ߨ\7M;XA ~yp%`1`A%e6iCР'v;%0%q>nyqn%c.[Os+׷'\XX 2{Qԡ䉬e̼ق)Ej´7mGLDUH â{j>Yi0Hi8 "[ܸX_I},̿]benVې):|3l Y6?,+__- s+cƎ(%OKfo}Mt'E <CUSy 6Xh93#+'gq_G+>T=cs/׽up;mNEiFB% nw={ZŶ!w]`+Y;묳 NE}(E#"ΠF/Kb.%)tq-w'h&q '{.:!Jy4!t ZhJ+q)3OLVsXI$L/GXO擃u8A#bH5!q˜]3q{/͠~fJexS2e,b |Ps{t;TloWaGRYy%ĊlO /T1&U=}\:nTƄTA4z]T "[(msuf$>o]?Hc'-M֘=ς2KRԓo3P5-GdNPߛ'FVܵ1N}&[~a";5MgKCIɗ\rW$ek^_r, [ ^5 ^RO5ּCɻ%Pڭz.[P|nґpDL]t8b!C'yhuYA!")z̞i{e )~[[iZFrl˿ӟtعE@RЗu$N!$9o= Spknfrꩧi|#1kN)BmYԯ'V vWK ~Ģ]=YH{]]$@r;+]NyWp"b|tE gvۡ*|CokVbf8T>Z+8tBk Z_W<)Bs˒:TP;'xJB93bcKxlʀ,(<8[0\WQ´@vb ZTHilE{4o!/;LE*e'%C V9[݅ǝ{mM]:ߊ`ilܛ, P#L//8$vVSt;.ncTUF⮆҂E'bt]MZ^X2}((I O4XB'K4[J2{k87qKv̔/N]s U$eKW"Z_]"B.Sgj̲FXݱl`dc'.g$.Q#t C(D2LF$ɤ)wMqPgn 7ZC{7̀#QƿD昈ЃzKZ3 #Ts8Tz%`Qy,7Q{LPNnAtBOg,Г^W{"mӢD@zEr 0#Lo`Mck2)cHS寅|IySz=Tqd5@ mnAꪫ6>$nG3(Љ+i6K/m}<+_g6yo 49knmqi`DPZ2 t9OO\aezX2}(.^!@*ABRt!|A~{66-/:)-]I8%u_ s~$@8崱b#uƯmY9眃dLf~bNe ,(M0r/r h2OJt9H>1x3I=?Dg%M>Me54٘gku߫MO&L߮s9x􊗣r{ P!Ln!i'Db۬62\sLQ+o6Hi8]߸?39/W^Neac~MߵrcDns<]IBIc[aFhwZZ yvĬO:$R˦[BALƁNLwE=<0#7"16'n{9HqBRĄvm/#EۢXoŭJWtv{8'' 9$#.Ih9\ |Ap2038#[ mQ@.*Z|xMC7V=ծSo)>{mJ3 [e d?d(FMSC{:fll'+Rn4߳c5 YoX̪!‹Rm}(tH o^P(n t>F$sy-1+-SuMdR PqUdI'^T Ƙ[!-QqsK=7UR`nQ327|<,^PY8|h`F'Gڢ " nUy̐[Em_s1wKPMT^-nZoĺQHwDd+cH&d̉:u/XbbUY4/{$Gwq|`0+]kͨ>m+ʷB eB/`r(sAMlAXEP-g>IKeP~Aiql23R2GJ\ݲ IDATЍC DX0ZYr[ftҎH19sC^4tq@)W >bt*R~+-ݻț >STdĹ)%[̔YwJ?+T--}.SGMPDhLo0/cXzT(@ɞu'|?OmsfȮ=YxKi|l0O0 DܭFd"lP!J3X Y҆b 'd8RA&@?HS\ih DK]E$ز5>OQYO/vb`Uz ?y!$Äe˃?($u{&왨V!blR3TBq?-[=q×[MN&#٫EΔ?XN p."uvv/Zv(C,{Fyb@?7͠zHքJulj^CM L!4AdbKJ8>bAbE+ Ql[֢G1O 6=?cUrXMѤ$F!,e".t..!6:6%҆jӋ8L,/Eŗ]H*Pkx'ͱ#O9^wutv T=u{;mn L;3-[93ٻ%{˟EXԙCЖMw5,Y3'U>M|$yM8vTydA8 ɟsI:Z}T1oSZ^8mPtr)K ”!Nq7|= ])n!['9WҤ\HY[=YY& Ngy oK=I*ߗjoZ[UjUePaVF!KK"t!%p?q:-UjثcsNN8pZzЃ49mIzfz\Zpz?2o$"MF'M .C.o~ClH=ِ`{]]S$PÆ}QiL7L `.ĭr}s,2T qL^] LsiΡ0e UhVh9k}̙LѬ {l-̵?8k(!ph`'W f0l;&Ua4mh(i4@O,$#A]K gTh8W84eCݭ[~RA 13p߾9."y(J5!2mq_UO'k-tR%Ϗ H뮓\Wbe$ۧ֞Ѥ=!&p:O(z)DdKRDz=I!q}"`E2h ~Lr`O(ᬳ*^d-{&u=ȧ=y?@; Vc"HHlO$Չ#=G8ĮûL@ iKjZ?,qJF>iԦ}UPoCyr 'yL%8'ڪx{+m907qV3Cs$ڒ2θ,vo8QJ^=uE-X R^})< dq5_m m|5;f %% =vO>ѓi ;&: ` \}(0M^AGJ]K`4 # UX^tQJ%v gJ%MizuIkm.1+|ғ%zL_ [mZ\QJ ;v%a<+sYT!{Ldco8p?d8%pD~'}O,AB8W[~%<B^O?ݾQ3XY8\b"8?7P c艞q!wXeK3toң3+ރwd$dC[qn*g9?u; tr[!Wų$%oغQmޤ?JL)_1d)XΊٍp: כubc7ubH=Ip~pKJ{Hp~)<6,ytq,P:8'|tia7)V9 h>hh Bhog=N2vī=ƞ%xE!Pƅb:h]{cSs_]ϱXWW)gCIerF ~YDJ$]$PZS3"B(`S*-z]9V"4%<.Xh˸^4cM\JPITL-B+7Kz;3l;XJ^6& 1`8p1w(g=Y:%%_x^tE TA!U|}m%M@& }PCG&4-T3z2>4wɊ 02}Yv^w|"VY,Plb+S|t4UktTj'Z \Є .^;C q*@87 #ׄf)rkˮBbJx3{93KY-Œ4envя~]ѩ̃mz.diל} *`j g12Շ҆ۋ=:hW3HA6c7r[Qq۔TƖeDHm+/vi8Q&=ev^ ]3Bj|J5ǚ sKD@#1dϧS, hoǼUEN)}׬-C5Jw/G ;;Zp7?S*)dI0J7NƄ )`'cKz5dFj6+Q@YB4%dѰKQĂ%9+PfX58(۱vxMUzJ25A8:XĤ8Z<DIV 7Qfb'RUG=1g ̜&L?*+`.>&/'WO&8pSY[Ϛ~i.w?Ձy^c:vԿ2l!X05\cj&LuW&-1,>Smhrati60fh+fNK_zg{WZQϔjn"ɬ}7^SB[%`OE9ZT~.`)Oj%T9E؞.πJ&bPR'[02L}oOWʤfJsn>Q S,j%AB f$fsZBMeL[b|m`B&@sp%p@ _Pq8I0PpRaq"nj5zvԚyUL.+O[m{a*zE"Y艞M``bt`şqNGu6ːPN wEz (H:i#:0-c4- Ty@U5ߋèNeC8^sj$UH}5Q3{6WAW+E9VVmk9SAzb^a(YCح1/KI$}'i˩n ه~. *CГKウ',~ɼjU}6^CI }}YcmZopw\nb(EP>}(W0I L$0δre-G v$Qu đQ5R+\EpSP<;M5Xg؏ُ-3sT8qLox>9Cms $N풳(V,Cp,h4 + (63Bٵݐk Eb1Ȑ z3%%(f/z3Fp"G{T(Rr=q'% Z'h/ >z7XEJNQLV4-:d}R=LՈ2Y =%66l)5ѬJe{E΅&]ukk-BOz2J :1jp~n2>N䏐-?A{&%5nÙ!Lżb&*Siv$@0)StLKLd da[H +:4zi%&TFqQTbs)s%hHbCӅL UTK#NjGq7tK|dp> Blhc#v'gxOGKXK'|2u =^Aw숑3uwr-RZ[y, u⠪"r* ],E{ jbA1^fuH ԣ< F;K$N2) 0, sluJbWBEl"&p 8;#Vv *H{<a'~L*pL ۢ aR*x J% A>d.Ci|...I Ѹr+p6{uZ:VԓdGV @Hcұ 4&=]n.3j0EJKej1K},hkwڒ{DD \ !1y &e@rL#Vi#d_hwb"j*`4@#j_N\R,昌U7iWedl8z6Zka6vpg! 9"3Jщco,B塇B KQ+R.B4Q2@9"[:-@ :QXP*7PӌP;\z$^4-Bn Rft-)P$I!6ttv]&aT{#rb GWb" dž4B[~@_0}O%%0U}(MKgv t t lNEueK'`bB<%MsȳBTu`Pw!I;0,]q(9SS9LM9&3y(_zRO 1x2R3{jvwt sKUָ׽Υd5EHoj >͜vi{|tҫ鿪~/!@bjN؆,sN~DhE!dIM y N6N?'t~sY_$Ir%t6fe@Q%j\^LXXA"1k ̺mi6uN#?Zw!m5Ґ 1VK@v>:韵MMXLOf;ZǪcOi6攏 - ˈ}'OG:ڋxp_.",o=M@T 4U,%%%WM<I A.LmLW,`r^ʈc Se{:ykjB9yֺ)7L-l;-ԝ9*mH!(#<Aȥ,D߆hV)SU8Uo- ! jៗ_P3pȌbTRI׷= _L I6ʰuCA0,@IWFA8 V/M=I0D%_z1NKczV9,Osͺ`i77'B?f>A?Ȓ-f <2j x) 8/dIF076=$ iVq @.y]=Y*rX\t{`xnmar ',^TMz݇#Y8/Fb\G%fJә1d*FTn?sBM@&@$q1[y&1S2kxk_4DwBk˂%0Kl9  `v&i"j\C@/q$XH0CcTda%8UTr !%׃ܕlRdG=A N2B2`:Ce~sQ}!6WNO*ĮJ {q@ʠ[F IDAT:?e d#2N޼- :EM5^𛒃7*|I=v'p/{U"zNw t t t t t t t n  ~Y}J&b2'}P6/((ZQ[GYvAxM3]Of)Жkm!N:r>.ih@)g)wXتr?q7aI$7Bm4!`G!CBh|!b%0w'$-wHЃHH'|!쾛#}U철'(Lji#Yo{//݅%/s7b1($'~x=4%[V^@zHRrhl'4~>᳟,}{u 1V&T{q{]]]]]]]]]@0qh_ЇR3VO\Rm>wMCgoB1̿,EkGg ޅćnJPc`dX~FťaR6IDΣ,X}ڳ;tE(;Pd\"hx@Đ⌁"G #'{an 3=4q'Bj"aSM:ГlޱC9v ybՖ k/w[x2d&SHsq}2STvd-m"G E^Q|H9%.g0=[s*ou 7 pïSO"oطCi4b]GY[ة!@VBgt?~R0[Nj,4i#8껤ػD>|AS%8^mY~SN[0hSִP+:%QHBvH4▖8dih0m_}Ҙ慀ʒc]ʾ26hѬ, $~ݖfߖ`)N4JH@8nT٘#wT nL::\:[Nj>-+ҥ9p)268{Wr~|C@J58(r@ rWM Z qXB$$ɉjXj:K.8_0( ^E,z _my o 8BkxJh/I/Km39!b"H4zr;On zwch tVK׌ Mҋ\L'B\[C| K djކrWI}c?#yl˩giIrh`喽6 %q0/^ GY 43VXXo4i8X0zk%$Ї~Ez}ZLZF)tW1ȡniu&˫4‰(J t1:8 ,UxEz <)9:ء6g~#J\p]V8zP\| ǥ-Z.rRBpB^ ,V~{ثsQ]ALb?O_d>sF>@JGU8:0R2Cyŷ2 =.x% >_:;}8Lqor=McX]WhN"[̶L 9HFBؑ8m0IQEGa՜ s2=!^Ğ2 TYN#Ew;NLҀƁ`mALKK^629EOa`^Ր"JoV0c1e)b\d=sϯƯ YqhxAd4Olzt{-]NO~򓡺:p?Ci&]R^1k?8i؞; PdFAF)1$"¤l۠xB7_얅pZu|d2 ƀ\dpZOJLL T\m)RY/N]fn8j%.a<6&1aZKmMOtB.K@EWgvՄ&>lvG=pD{jZW L~œ\3x.;HgcΙIrA@*L0!|W[#/6? J %ٱ{F:k>I>԰&8IpiV ~WZ^罯QMNwS;YęhL MbT3M:P,gWYb# ӌ1% J2-S5F"+^s9gWU* CMG§V 13u] E-gNeҢ0р2"F(*% E1 5`^W,0`Ί=o׼sowOO3U=Suު:uԤEmB;2H( /;1<57TadG7 !s/f|jpl;ۡP#}nAjOȍFF}.]o8 e5@y10il.I9f7HЇJfCN!ԁ4X2HL~);%4Vl@@R`~(|8$#HH/vHj!%A'A8.B oE˪э[Lғص e=taIc)Vl414xX5p UmhEV āA`VÐne\udUNZE_j֤5gb)j5yiLSxm/駟C6}j&SIItb$!k]b`x1%Zi5 君8fȐ P)}C9wtV@ZAtBHfN;o9%)?;:5ڑMo"Ƭ(xWKGoڣ'ʥ3C'IPZG6xY }F7$#RuAHH7+Zβ*UJ3钨͇Di^j>*v.:L_6ɉ}O @.)O\cIL'rs`1|ӎI1cx8$֍KP*/AeY-k;z׻ԘVM,l&mr{l0<5p\~9tq4@fIfb DsRnqjhΜ(P wqAʌ,4r4u2]1d1yvqo.P 7:/ƕٺ Êًj#"1~?$98Wӈ;ni6kA凤*4 p(0GVB踌Ne+!wJ" g%<:z֎t5_X %\VRy͐/x t oFeAl>5;UH{T.6qOK@aBeYzލ4)dw,"kC Y[9$ϻNά E w]ICd3KZUs v- ߱ev(#0c "~ϏNJ'C.q5{iғB[K.7Э>Ї7Z?:Bq]X香MC#=b>Vbՠ|e%gCM|:}+)FFuHb\Z@y'@d\p^Q5EEbtWYh \8GM!E]ǒYfc | W+~ҭ0)054!9ܕ?saHBIg_3~?8pbQ铌bh뮻ty͈r5om[.ӗN6Sկ~CLN2#H4V4;]]"o"FrBh>{(v2[kj@E*:OQ̦Ƥl%Ƌ5g<ծbhs+)|s jg[gJI4^).3@zs4UK] U$Ӥ'+64 4 /ȔI63tj104Oxқ5{d *bb0c] 8 N8O:j]TZW&f%*Vz7-7GJj 4Zхi45QQ`\E_t LVysL7e0]$ VPU &#'נhYEq!8c.VVrֳcv~7A?vq'48n^<4W҈q>硷IJI(6 8sf>ls!??Bh8HƎa+KJo;a#օ!o?dU((0~ Td\TKfܛ\Lɗ %8)zM̆CȮu^dKifyjq@/]4 &9 Jv!wh>YIQ&"ȡ(5V"K 6 4 OM8~4M QT6@j@>?X`~Uo7]vx3Y9ҁL)it1$Q+Kl9xa-JtOɃh.Y@Y1YI!&Z/|!*@c>Y((P/7ÝxD'UV2'`FVvm) b1lfᎰO4;ڹ%)sU_.Jc^"=$BbF_C] H cw(JPF~tp5;l!nVsCIJa]֤'FF1SO{=\OI9VQ2|;W.'g+ @a27hgh4KO]p&s!7Yү(f\&2qx dHDiFD钸ec~c@iljCՏ&&1~b#zg}g$#@ν OI6_6HInJN]x분vsgҿ(((@Ag">O!Z^w"7 W[k;+-_QQ`S ˏۚ|XRV5a0eX np:v yJ`N]8 \,b|sgpq4k<$!U !RbDo4hLpcpV P"3bd)l'KIVW@eV zЃ)_։7L!!_^=rz򕯼ZZ)XiY|BkT{roJɇ$LEJ.6##,X 0ms!Or!2:5CT3`-F/'~I1cOW:c0gi&u_uFPE 򗿜"o MEmޤ'KLV]@qN0\穘e!eeA)XivFFFY(ߑ}Io .H &j6ꌦзeŏՎ=kxjReEAqfT& &d7ԗUH%i,y]5V7ZFFqQ *(Ğ6 |N4PPM%.0ϰE8igߣx^m1f>@NDOF̐eqF_9[E_!b.M3׏7;^4 l,XB:܉n&#Yh+͛MQQ`~ ֔8~>鉭 b!#:!.G$(|i*Lӄ 9|WE8fbӑjf)+r6.ryQ [Z'b!uX\XO@h-ohhhhhhhhhhX lYlVuvD'8\\r% &Hl=BAClNBv;ޡ\_cd= .jNФpMz#.4 4 4 4 4 4 4 4 4 4 lX a;UB@v*|G$RR%Yٿ/L{sp-`tFtO؃;묳o?P<*^tnҘ>1~2ER2eD!j 41a)j'KIVW@@@@@@@@@@bQ [Sꐋ4O儤 !r? \я~sꩧ^^vOF+}fme+w25ѭ`PI\'6&kٸɵr7Vቩj-|æ@aQ_(@7)Ŕ2@Wd]ءZQ`@7h[8aK҄hPFb% FşA+wDhj@ 얅Y ;7w.w9{_$IzXXk2;{ e\g"āahFRnLaܻ?ғNFFFFFFFFFFFuK"G:`pTHh@F"q/0c Γu۵D;<1ߗeW6EPReב96IOHQQQQQQQQQQQ`()ɐ!HI"" P-P.oxso~?Y^{z/~"Wկ~ Gdȏ0.^iғFFFFFFFFFFFJ!1`NF JU5VʈZ`mso9yo\1vtWΛo1KH?)WU&~6D[ou1O,Y&=Y 2 M"(J(P*1A',̦n?%ÄU>x͝Esl?to}LY<4_k[Aw׀\+@x{oV׿8>&[<[¯uk PKb$5gjғohhhhhhhhhhX(0*L Hr' )r" >OgW"79?9^e^NRcC>  ?7MN]vNwS'|o|CYdk_2r*&C6c)K[ғ RVDP>g)4^Z\1j7VZ?t~ (Tܴ]iU4  A"( GlR/χ7az15ˆvۍ(o|/<Lu'taO~r!.z׻}{ĸ/}Kg~|eTv~ l U'>lvIOz /+_ ^Xx{VP g;o6s8Ɣ?Myk}T-LɊ2g;<~eI-~H3YғplaDW4 xW֏ߠQ  Bveр'BsF{`i:\ϛN]. CW*<{T.FoX( b)*+m*I*BJTO6l˭&=-bR 6ݬR#SSS=NaG[O AOƼ$t.MO|׽hbŊk^/=7{ ]B/˙G~xE;C 6L{P`Y,[i}—yg>DȳnCqGΒ`QeVQc}QW,4s_[J __g7MƆjXn?׸5l`yM{S@$v˿O:Ӊ]x(0Qo^h0L@Wʜ*-+p}o&RӾbݨ/밾vf4 4 LK¶ݒ/:D"i]#=J A7!7`G~CaXuo~k\<裝w$vqGaw4 fڻ-ӦɌՏuvgڔi=5S_+FoSXb+EN2Z笱EJ@$vz׻+iC0Ԟ%+v'Rv9(6Wk=]2bf5Y7Ci?@7f+9: IpMN^6lˈƳW2H7ux^9eypPZ*Rnq[|_;m-(,(pꩧZkfx7M TkdIp(.&!W?&pYf7Yů^)r+]ϳ,ӟtOÐETIy ۨRCJL|p;ޗ+^[kp[lvy*Lr_CTTfX-.F.?fJOqgǪϚ-aSFu]2q@eک`&&C\l{3ңo3mbާvv) ,?Y9̓f|c[f6կ~Őʴi{+̱$8Ca ((0AɔI}(a~ #TKɐuXEX_u$&JB.Yz׻djΫ2%0E,PX`l tUmr/sЁ\R vٱU jX| X@&}[ZA<`Ԭ$%4&nRnܺ&W]vWDV*z(ޙN$FyRdz׻moa^wۅ|EjlKMNZ2 NUSO9MG<v}._1G2i['d%"}ʙrħO]+{J-ߟ {b.RXIvE m<&W+a~P/>㴇>^93[:u{,Ǧ~U.Y5ew~jD85+"3┛ߡ6lͰnNmbN:=y+wi Y iQ`(@|XD/ fةꘇYi:Z(Q t8px6p|,#XO^v*luvl{7̾\^U|&n &5>ءd5hmWBO/s! )$^IQ-oyz)K\n$)-_κ+[RN%.p'6}eILYo:տnsU ѤkKLV]B(p]oc7"հpP2é%@ ADpLTdT饛\z]޶mG*ZF |nRev0Ved{JS'o `\KoMx;Xb*f4 yqV2i yϘO9Kk%hhX LaS`00'L{8ӏ@U/w%/yώIo~Pk( 3*=e]\=[s)2Y.jX84tw=ʊo%!rn9&һH?˱P҇ڃL3/KUsYv}o$d%d].׹'Ƽ#}nЯXH3\(0M-EH'6̤'`ƪUk\/eZ/M}jFYIeǶJT]2^Ņ..&QvPJb#B))j]'hx(Љ:RTF@IJXƠ a(p0qˍa,~JDh.jC/=Sy璍V]SI{ Cm1^2+mQlHO ut0%֙Y [)(03+F/0/ր N.wSaABlp\}@S O21ub5;G=Q22/:틨P8c=#=q~*u*L\cSY*p;$&R/|?"?> )tؠb-N~G. R&߼0)ғ5$Dn,-ا{yf}OHYbzxeYRH)%\] _YN* FSUݲ:!!Z6qe'7uIZrPMĔOܛ,nVӕY8qnRd8n?./6xr}x o?]Bu'*R>SlUuX:H֪}X&YnzJ @1E^"pV*kZ-Ps@e%cf"B:&@u4rצE-m@DP܆W3;khF භ-վ/Ez7*ӖH!Tqk\c ){9ƥώ\5ϝ&%wgŊc2uYK߄XZ\4:W&Eqfb "4تMO#4ˊ;N/t ^9$7a{'t*'l%L첌ͲU*#lʞ[ƪʿ.+2ANKw0_;'o)s)hi83j\eϦ.|q]RnP0 ͊J.(71P#N\sC `5tlqSn&oe[,VDDO),~⧽*3@1`S\DXA> =U)0M3 p 2WRTH(pm9!]O! LIqD՗bVB@(PVkSL>WߙM0<͛ц\*6/'|2eqv%x8<(.OZ *4խm-u8K,1ifzݔUϙcC#'GLC'ӎYjب FoeםAW@8Js\6ؤ"*)+EhRl-|ť7 , HL2+õj)25htd/~Ь;Ѿ(md%\;*幐u.WX,/EZJ2Zk4 bq([h ;M)sᦁ^HpVWTSL bNAJeFL[XZC SAE_i"䷉PŤBb' &|TN)X @rFFE(+mż;c>`p'pB1?x3>O*>O»0o{\y#?q#8kߝs=|fp>e7gH5 Ζh}<%X~e:lp r dj@,r$ؤHOBցe.,k6h/X,cre1us^@)Ӂ$S 7(8ȮJYqfse\5h)O CO>:J,C(+gxEh0 Qy#^;76h2KYV-|rTAe6$$ ǔcTJQf@ڝl|܉+N`|ŕnPL &0 wtfd|l7=-z֥+K.|-t'EXʟr /qzox)0Jtx;\5v 17aL[aa%Eqm|+QQ`9]e\ywd/ >r. ;~;SO=cMozӝvIs9U?ԧ>򑏬t m!9裩01+#k#g}#RJ{thv0?җ$KFkq٥!3^s=M\|Τ+DOb^`0ܲh'EzZL9$B򶓛yûd_gAjY@э/.Z(e8%: ~,VAv\4ˇ;+‘"-y,!=@/EMB`-+Yf*Z5e8bR+u7Z%ҥMk ZR [vk Ak8 ;͏_^)nIb JyS"؈T[hO8ECRՓ63.f: y >$k4_/8̘NްRe" G_)YOiBxXi]hiLG>'OV{Eޣ:K:jv/؟WL[D6/~Y?#KԎ;Hc$bKя~70nuRYo} SfN_!@fL3&8 uңŨt'k))+ˋ˶ ]s[sMSr"k;ysh|[rUw9auh26iZW1> Eڛg> do?'Q<[qsD\7Q ' l$JO][вcβBq%p=pV$T i /t4,Vؤ7TTG/| t8s<o,1g4-^cfhZ6 ʄQ,!ym|I[6FCU+%"fX,PRsUD7t.b|S*_*MYsX;)o4oj4J2b/  dJ#ŀP|f"MnЋYS7M:eWFEf ZB(0J28 Lux8BN+) %Y_D'j!ly*+ܹUQ7-Rb-\?>g\?)Y2owQ9s?^ouKVaE1KhGO71~) ŷݴQ *Aِd][Ԋ&Kz8لm wt FSmNX lH\mbj{͍-R[#n(5}b-_Mt$+Pץ)0po5{mIkg6sWbBA f_TIf'>ALqB2aiĆGb2+dJ3Dٹ)&*0V۾a\},&  &Wq)'4&°KJCwߝS^9 "4oX΅;) Q:7|y{ rK-rB e~!(~urjń8ғ]褡e9 w?_F3S^Y")EY脌d5uT T13"\ɝ_FMK^ Xm(PX#mTi\+Ӆe\)g 5nߏrͅa6SȒ3)ru%V<ͥ4Ҙ'Tr.PS3]hATJ^rMQĄ"-&fb SZgtZ<|mC^c 5((l)P8[6؝ғN8O}o|eۿ7}BJ%J`#g?W:}{]vمHbo_X%7r^zӝk_avO`4PKƉ e#Xo':$JO~fv3ei*AI+ѨtKYp=閧ap-(0n ܭ5!}V g0uWS_I ϱkKsE%V)i IM:Fn;prU,\MKj\JOj@)uABjSe5kAaZQ`!0҆gfV8ɢKʈZ ԒgAp\0HIzPJ,)L+~(0_Bh۫TBzʐDl "Dz2lԀi~x'l l~:dWBtKP5AJN\(@ } n ߔ ժOOI3rMTM/nPcU,Y%CQjUM]sU,7(` d*f(PFpƙ fJ}LX_[XY 58H֨)W{J)-kiCo@(W󻉩JbӟjIߟ_ZFFuBU >%qV.w!=y5yu楯F\M!&3؟D,7qDrvO"U<5: hC1qt[aQOKM~q& eZ鉔:/I:rUݴm[?+= }A M|i`A ~K6.$2?H7Q`Ny3a5IT96f0JY nb.rA)~uV)*`.T`( v[EX rWk$%;Ogh5֠՛Wdi'JB c8x8A@eU(]`˂a: ޭ^{}>HE-.Gy^aNw"aCD+_ 0ϑe\WH@B6 ]omI96oA#d.%|@$-< *`e0̤{R*z4UzHQ*2i؝lr'q26P(Xe ;WiiPs1= ([̗* ܤ_V1s+֗im~׼G}N`ۤO|lGxZ׺V R~01 GDʋ;Xh xEp%iX|U>횴 IDAT .kUJ"qDE|qIcԼJ0AUۣv0s+=z0FI_Pů(Wh[rJW7(0& Zpr &YnewNT^1}d-j@%JS"JXcAVj|e(f/Da$S~9 %2V4;޴3:15nZOJS>\d5nLWd2llhX.GhY lnh^ ;dw@tI/}K]Hy\;v~}ԣ%#s' B hjp1 #I0 O$(:~V6$O{4 ̯],62DVJ:~~ tI~=cb ZEԒ tG|%q=u[wéb`} Ԛ}2TZ*rJ0J&L~F17 -Ȭ fU$|*fHp!~`9JjOU;ة%T(P k׊OLL,QFFEԀ<& !GbQ^wH\jW}>z`h};HU!4V 6kdNWI)Un(:%5%*D/Y`A wJՠkg``%N:%jxM|)S`P t)sLj_"2vk4 ,uV@ ~W=K+{ՒVN [cAVAu@Ie-^j.TϿ△QQ`R)B|[6kol'6(lvAC~I<ٚH7|#T|-5( ݪ6_T`0ST$*UW&O`\LGDk2 F@ߥ I35{=/'I?C\}x&̴+c@C>G1VH!ڧhTӇN]҇S(fvbQ?>>mTه`@r@ M2ࢋ.BK /h~s9^2v}+_=x+͚:Udp(C V `$$%*'ԆUE$M`HC_;#VI`R4 TVۼ^Iz2/=[i m?zbPbP((((0Dtu6Z#r)wW^2*9?򑏐/>΁Ȱ+ pz͑M*\"` H@CLHO/q,JH, ? >5b_)/>Z0W\"7mkW䲗>`_D>E(R>}x}oW@@@S ?[lat h3/|6q2rpn|_YWU*Oi[kx+gs 莾#mG%BPFȈh>Շ Yqo$8~rs:ʨoTX̯ _ғjlhhhhhhhhhh 9g̖S;vڲ[mg׿ _»ciRrr!~Jwi'7w 8S #7%Qisd"|  YXv@SZ#cT8]).JLD%&H02&o0pm4s6 4 4 4 4 4 4 4 4 4 lplH4 d6I3lt;m6PBy睏;8 %|I sO]D$`tO|o4P"OT.GBzGzHLr%{ևaX0L(L$It*J!|~Չ 27Cn6 4 4 4 4 4 4 4 4 4 4 LF~2o#m;H8~ӟ>=䓏?&N_xڹ[{}ym;r1# ?"Vb@`>H=~RƏ$QJ >R\}S~L`Q~~T0Z &oφ7Ɇ6 4 4 4 4 4 4 4 4 4 l%@oGCn㓌@F3(_?o~a=Ͽ!ƅK YSkފ fȯjL|0dml0,}_vHʊj7.7 ̛ykvî{g﹔po/~1&].iT jz33Js9g4 (pq7}n3;4Cn~?OgNo(Z}@`@@@@(`Kg̩Ga~b7`%@A0+Їw"Ǘ%;}%PWXێ" щwuWZnzӛjp:wZկkBIdX[X+WK-=j25*@mam<Lm0Gjo8ɚɆ[?OPs)1䚒꠼so޸مc{?jl Wӑ^ϙs%M=F)NM!ӈ}{>2bQ<#QGN##KDI`Q0 /;:`N|u|Ds$U|#21.5ubLJ Pڙ};d%+J4 4 );>m}oۡ?Z:کڻ |so]|[6%>Wk|y{~w{Qc(((((0 gN3066d sG|,00I&;p;H I1sPN=cٜG?iBc\i3k,}*GQHR0qLJ tPIEs`O!X W$8w~/شDT4Z[D%eb쐁elO&=SNxnѫ*hT~~>_W~|]zr^o2+z{ADao{,hhhh쟥";xlk$&xR}bYd'ֵEP ńHF'jVHny[R nf:׹ΕtYzgLw:_EykxP٣9Dyh_0$1k"=d\NXds.%h)* &A }LC t 5FIhғQ4L(py]p7>zF&=ar}w/~IgzwWD^c[6lSka9m?O+W uKk)s%~%eV@_kk~ok," ^Dm[yw{s{/oq_7э^Q6?Ѽe6sove1?5淸_L꧙%dW^ WI }e/wYxr5YWNΒ&)go+-ni,mt)yhT4 .52㡏YFad> .|E'@{S{N) #@)0*=â` a5C*/A?8\ 47Ez`ԠoXGz^o DK {l_?|ڇk)>OQ1>ُx#Z0T!{nrڗV$q1[¼ otG|Z 5 SNy _d*e*CX=eS9kZkzciG>1yL `eݍS3e0_o/gSg|Ïo<)_җ  `L<ݗ?.n&?O?)V̈KfhǾXg|.}X9=qO"\]7c5c~s}f :XP'xgM<;W+o hv23%58or8QNiYhpD0'2~0[IOFwVY{g9pYUCDkŭZuA)ҚjǗ@7!"C/$E($i!@0MA0 [(a_"*m7@0_[5 .S?#,=?F>^D92od_W9cڮ3~/zo~k_ڛ/J|x.Fկm>x}߽IO@_o&{ #1I+VH2=aoI^4׺7_A=Cz~L5>YϬ(FP@ Ypu&$j䴭`X;àH\`Y*R8Arew.'\'oHI @>T'8*`N1KO^qK(>QN>O'OzrU똩ǽ8oy[L!I[:N<ć?4N÷L ?r{O%6/=1 &c|XZG}t>!շK|ғ4z嫈NO>ڼE'$Qp%)vm%s(c^r;Nj/_W!AÞt"$.@͗;שUZ Uv'{y졌hN@/sCdSGq'?IR8&:5REys(.E7K@{IiO|4ìB?px~3ySu_VPjzֳ>*p"?9@ş ROD$ٽu/})Wgۏrt|>?{g?j&ǴX$zoQo_>QMЀFFFFFP L`.,\]-ZR]'0OEJ_x+ut>*|jTT $_睝M{@l%5{%EU:@ֶ[@5q2r 5c?PRڶT5Q @Ҥa֚٣R 9jf6&IRNT(l(KyIMz O|+$>yw3Z H>0׏3HEzsLІ `##1){?d#9S쫵gUE'ri Ʌ^h])1D'}c+>___0RSXfϥ̤v1 '@Dg?Ĝ\mݨ ^{5Sik ,e.A=ܣN z{UYG=QL|YCIڶJBD}(鉅~3e,oeBhrÂ]^<?Τ {a:G~ɪ*#@rT!u0%Of?(((((0F d"zK0`q&D)D!"q$ I@!4JZ~ O-QKu5oFכ;D_8xZ psYK.s@؊Q *pۈM IDATb[vSN:S%mؼv'?F2 pJ/93ni?\Ĉz)33U? b;\D 0b Ɂ.V! "6NN`v~mS:p. )L~ lԖEb <,^cޱUM[PO-*N[DõÖ7Uf*R{.b%'eK@fwmF}? q3& S'ŃMMVŌ s羏M{'-Nܞ$A $R K#I\IlJ@!NkTvi=LIvFwGl[6{%-c즬(yBM.=)OzuVD_qypCߖ|r\}ռ֔wfesi~~kB=ʗ3aٝaӱ=֞twrw= % R0<@~ Z0mɂ>uc`;TƳL(lxCĵc@Q`v.d^M%Uu T+~;/N-l\<.Ы2\?ELW_}S8̯-f C.I #}EI,@E،0$M*fXB.YxMIV YbW>$ax^6ҙ~$V7T'@TO\s\ /{lcVVg۲&@_a!X򸕇N13) l}6tV)AxApҁ?ql$ @d+`Y1CۃbGHIbgDjb8UR>1YqbYAHgb($Rflswh[\`oY(yUG{8-£QƙL'dSu]5uE!ރ[Vj5N:$zY,2_ Q9^QgJ^wu_;ɊϼWi/d.4Xv)Vk1uT}{uQmr1{EM_b*r5uU-4j/0y{?kK`A`dߥlqvR6F  81!PJRiX, D nsb'Hxt8=L;1G_@E`NV60Y~ O8x_ꨆ3Jpx qt :qYgo~-WtAk^ˮ<vayZ:}?׿YxE䞺lHn9Gq5\yeTs9oN`uć{]e* ?a2 ox>q@8Cg7.{챜s%卼}^*3/ffͷsѻZ Vy42{y 'XYٞ--jf+'QOjZիs.Γܽ oz5guRG7~SN JμWmG(d$ڣ_?w/ET.6Ϣ==oB䎝O=~]?N?tn{S˄)YSWˇ/ʕ+)ISvyʲex6.O#_s$U5t d5^elQwxp}AzFX_~9Bą=i\x+^ 7+b'܄~]ਣ]65'3\b%!ME/d>7ۋdI^}N^r%_|?qt"B3: U1%@ats9l'9 eqYLbB*vK:z'rEsξ !긬!BbZ ,\ |)gaK KvuW*9R巭Ru]+y kٸ'm< º#?i}o{Pkfs$ǜn&32L30c*G"vpSƓnOfTZ8!F~1 IIiR⏶Zmk$ '4u $1IJ=aM>!VÕW^9!IS zR;7 Nޙ9M;}g?=q~77E- H?:],mC\j<1s\QO PyxoS$ L(gxH<b'YGoNcW#,$$q~%0HOxJ SIV`){HOvc=qsi h8X΢6# Y=( Mo~L`e#yRfX =bV/k'֕?U@C@K? RϤΨ1imƍmmV[؆ŒJ?1؉[ %BIʴ: JCN T2N6ctB&W0?(29bm%TO#Qr {<<, G: HL~%/yOlAIꫮ^n֭c>Ao2il/R*-^ yoy[~߳1N;7?e>s$ Y!U[>6-2$q=!Lr'{2+H` pQ3σ<ƈ8'aSj㿩H"M%I 9pF IʣbDIS N$cKC`NԓQO D.A:;䢞#\ve/`{l̩(̤8 ʰ,oƝOZu*^Ԟ~zj[&@`;pf/ۗr“ M0H2=نU<(&رQ$|3=Sa+*oک$F%cGh+OT n]Uq !5.BROC!YLgo p Y$  H@G͟G: {Dd}GhcMgTB&HS C\%bk$  H@(Q@Hf"Il1ѕ$6i\o]ٝӟ)FruH`fY=׷bn\D_t #pmcHgu 1鞳%  H@`FL rM̬{ \BfܓVŪbxg̕\u"]N gZ7Wdbbns7%Ʀq$gU$#jK@$  4 [bQOJpF8eZ0 MJWmD$̹z{dl7QTbnHp&ƏiG7!f # [$  H@Fu#8-JGbđd1#ܓ() #D=IIJ@%0Ipw Ί218DȢ  FĔ$.ZhQ%  H@f@p4[bjh"IFa\dT b'IL,vզ$  H@[ܪ'uwN9Gݴ$+II H@$0+ťzD!FMz$H'ĄKL.Y駍H@9QO#V5G1Y)L-qB `z:ɲ5$  H@fB <iemdefqO2(P!-Ġdi|&]$  H`G~Y$#`nB8MR&KڸԐ$  H@ 2*#n%Ap&ndl*wC{mV:i#$ C`Nn>Fl2S2RIG1$vv<!frvʘ$  H@(5`jP+6qB<ؔoT'}r2$ I`Nԓjn u_2'DgIIb&Nq9R 3d{h*G$ܓ(5b$JFOicw*ʕU H@$ 0bR,IOZ6%  H@֞{Rr/uR[)mAr;~$  H@%Pɪ9&h=Iƈ߈ H@B$mDVdfcwdQ+<$  H@>)FhĄ:D;$- kH@m6Ŕ* HJuVnViז$  H@@ RYJ,ܖі$  ,DضoN;+kܣK@$ T:HmTvœ$ $0/枌+Wy$*! H@$0!C!Yc*$ A`=Y$  H@$  H@H` UUU||>TUU @MM m\!PVVDaD"444m'{)---()JASSjjjֆ&BZ5ȄtpE~~>QRRnըAeeefֆ:455>###:::횑BH$JQXX6UUUrMq{QTT6444###햑bMw f|5M?Ѐ:ttt󡦦]]]vWII ZZZrZ-niVY6..//D"X,FEEjjjP]]-װ?766ߏgϞ0339,,,`ii [[[Ҳ>|ǏElش ZXXzzz?7SVV<|vP@( +++pttĀ`hh3!ҚR)222NsssUKKKG};uR("//OnP 33\666-ggg8::6k !/dBZQCC߿w")) IIIHLLDaa!ȯ.;r[[[޽{CEEc999r52==x![[[pvv+,--OHG 2!o 77Guu5455&''' 0\GJJJp=$''))) wwwxxx \G&n)))A\\RRR Jajjvj#wAFF;H/=Bcc#:6 y5Ȅ<\rvB!1`<݂T*ţG*Q__={bȑ=z4|}}ajju\B{+W֭[Da2777Ȉ݆D"ݻw!<~0`|}}1zhx{{w%ꄴj yXЧ@SSÇȑ#鉁B]]먤ܹs111QUU~t@!@(ŋr ^b6l@Laa!qܹsѣvA&eee8{,:xxxC PQQ0a„N4BiO<@hh(pmaĈl:"yEvR.-- >NNNXr%ΝKo-BVwD4i1vX(((pp(55?3~`Xv-FHtسg;c޼yXl u4c8|0j*,YVBtl۶ !V\yؘhӧ?͛>|86l???FA&]^CCv܉-[>Cz\RrrrǦMҹv.ׯǑ#GЫW/]ϧ3ܸq[nŋ1d|4L:-Z#Ch 2Wŋoc,,,cdff"88VbbbF!o+Wx,YB1yiÆ Å pm .Dqq1ye .I"`ҥuVptRFFFq=`Сxw!HF!%55nnnXj-[̞=\G#+qQ\| :!d<~^^^ ѣGNHZ._~ Ǐ7E!$,, PWWǽ{uVhhhptӧO_`̝;>빎EKt)ׯ_o9ND3g?Dcc#(#BKټy3Ny!22\G"]&;8qѣQZZu,B5Ȥ˸~:hXYYqtq{ƭ[燱c͛\G"Znk}HE3ȤKXx1\\\{6{ L<<***:t(aee___={Y\\Ca̙3gNǽh?ӧ?7rTWW㫯‚ -Þ={п,YK!/K(bŊۼ9:ɝ0o<[SL%Ο?7n`߾}OB^ CH'wqFGG)((h}VTT0cDz16mb0{i07n`0Ǐo3tVgGUYY` .0-zϩ1LhhhB^d̙ĉmT':tR&00J 0̓'OsssҥKСCSVVn$Uk鹺bԨQou<ǏǹsT*EzzzyϡJJJV^7o$B:t#66C iRNK?wbРApsscO˲gT ;;;XjUWEKI;w`ڴi\Ga 999\趸~Π B(rBdBCCaiiٮPƾ}`ԨQHKKCdddPPPɓqv!dҩ%$$\\\8ND"ARR ((SL~ǥR)6n܈O>+V!C¶mZgmm-`ٲepvvǃ'N`٘3gRRR0zhhhh`ĉ~>`۶mpssqE,\sEDD UUU <=J*"** ͛۷ojjjX,FDD55NwAܹqBy޽{T'N"** fmmm׮]Z\\\pv!5ބFEE}yNRILLd---&22Rn|ؘr 0 x|6wS6m€.#UQL:=^oo{Xdc#GDaa!>,w;PRRѷo_v汥eP˨ӧcJKKyv̛7.\\x_{M(((`ٯ.;;o6֬Yn>Chhh 88U:m\hJJJ][ >>>moÇcعsg2!d%|۷/F6ߟlّ۷ؽ{7{ ŋݻ1gOx:J.p5>}=֌ 444hy ?{~`ڵOwҥKh{=&ₕ+W˞dccyf2 (++cF6mš5kЧO;izpSSS[ȴH$vFYB%PѣG7onBRVVq,]O]:꤫+/_O? maɒ%o0p6!oҺWWWܜmܸq:u*QTTd1 geeefڵLMM 0 sɉQQQalmm{}3ZZZsEf߾} gX>`0zzzÇDˆD"f޽6Yn~͛73 ۜ9s*fڵ 9z(#?0gΜa;駟2 L2裏F-Z̘1qwwgnHRa^O>a0ٳgrfÆ FCC9qS^^n˄0Ǐg2~z&>>x-92Ga[7@^tt4cnn 2)//o}Bțq󙀀AucF櫯bfΜ|̜9sg϶o}ׯҥK1uToNMh%]JJJ033k+vg »ヒ>Ni۰'n[@qdۺt HDDDPsL: jIҥKHLL#nW#--  Ŝ9sjMdv5uuu/ѷo_$''#<<6m B:###cÆ زe ˗/s]Ql٘:u*Ǝ gggܹsÆ :!/5ބZf˖-:ckk߿:VHMMeƏhii1L߾}3ml/MMMf۶mǏd_ٷoccc|/`jkkE!"++2e 0auvAuu1Vb|>cgg\xH:t O^-:H&o>l߾;w.6oތ={rBZݥK駟"&&#GĆ u,effoիWރ y- n%;;۷oǾ}`,X\G#Hjj*</իaaau4Bis/qe 4K.ŬYu4AHR\t @XX,--fCUUxjITTT~ Ǐ`̜9fj?~GTTlll`,]\#vbΝ8q1c ,\>>>tn*++ áC  1b,]ӦMi nM*իطo cbԩIϣӦM 0j(((u !D$~+++L2SLСCoe'OԩS  A&ԩS EDDSLرcannqBrssq:u x=z4NI&A__누aݿ ݻwѣGL4 'OưaàuDR)p9޽{011IѣGCQQ똄j iH$qI\t ۷/F___1\$/ r \Thkkcر 222؁X(**ӓnnn춓HKKիWk׮{fW xyyJmPLhhh@ `X466pww;츎J<|qqqABBAѣNmҊFFDDСCwwwxxx*wޅ@ @ ͛7! ahh#GbӤۢWT]](\zsahhwwwΰ a$ܻw-%%%Ѐ+<==1bв?BiG7 _A*ޞpvv "%%ɈGll,P[[ SSSxxx F j yc HJJBLL ;sCHRhii3aooO3a4$''#)) III>D"ЧOvNNN4CL!Hee%[#PXX077'''t*+Cff&/Z|pwwGϞ=MHD 2!m@,#%%Dacc[[[,--9~K,C("''=jvH$=z3[~ϋB"$&&")) HLLDJJ b1АvvvA޽ѳgOtd IDATb###CFC(a0`X[[0!/dBQqq1Ґ6Y+--eՅ9ttt`ooKKKr555())aox ! CTTT '' X[[044BikRl㗞G!-- (((Glm LMMI{劊 ())'O  TTTT //,VPP% uB: j JKKY\;w zɓ'x%e @MM fm~=k\n?~uu5jjjPYYjH$TTTb-=z􀩩)_[[[,[ 9F!%RlbсPPPf_###@SSjjjn2M!*Z"Hn" ^L"FrV7ݏ!,,,`ddp888`Ŋ8!uQLH؈cؼy3>cH3 #7+tĹbbH$Ũ@MM ۰6-@E^KKKn隒VH|>χtuuuM__޽{t^6!vy̚5  @XX fɓ'x *++! H }T* xz*lhyp.>с\}444! )=]ӧ NEk( !Ϡ o6._`֬Y\GjS0a*++qrBHc^sݻljzz:QVV0s.'Ɂ^cALL 'NpBHU__%K`ժU/q. ˆ#u$B4O>CcPQQիWuvz EEEXf 0l0zhB!RL88v.\uv3gWD"QVh5! ܹs1tP;v :::\G̮]b L>ב!p,55'ND]]Μ9pS;4;ofh5!1m4̟?GnwqE1\G"¡pxzz70|\z^^^:!] 5Ȅp裏?jtw6fDGGnnnHHH:!?~<pUpB\\(#ePLH;+--N>>8uב!{X|9>/tM z쉛7oػw/ב"]45 O<+W0tP#uXx! n:ۛXBPYYpY`ٲet!PQQ3P__իW cƌ́"]ׯ#((8}4-{=VZٳgcϞ=m=!IKKC`` D"N> WWW#u*GEpp0 \G"S%B}???DFFRsV\gԩSEQQב!k׮5̜9QQQHNNҸDHD 2!mH*b͚5Xd ֮]_\ƍh\G" ~gcԨQב:-WWWAWW|2בtA&TWWcڴiÇ>ސ#`nn///?HB^Scc#>C;Xn;Fȭ 7nHt*t.B@^^c„ \G2|>̙t_ZZZ:!W Ǐȑ#X|9 ""%%%AYYk֬A^^Ǝ EEEEie 8q"455q9pon:,\;w2ב!LL8%%%8uݹԥ>}sqqrXҊ~!666zjǘ1cPZZu$B!/pMAYYqqqI&!::?߿u$B:4j i%| M9s?ב@ܼypssCjj*ב!7`iiunqqq055'Ν;u$B:,j yCXd ֯_۷c׮]PRR:V2`\G"R)֮]`|8y$455ʕ+1c&Om۶qA& a_1qD#uk/ѣG:!tkUUU3g.^={`޼y\G"{Z gƞ={u$B: "5c„ BTT\\\ȑ#۷//_4O!F`` ooo#rJ3gDZZBCCabbu,B:ZbMk~:ܠ?ᣏ>±cp!XҭGll,>nnnF\\.`ĈH$pssCll,בiW gϞhpYf!22wޅ233D!҉''''PBlllAaȑ :!dBZcL2f… u$Ҋeee͛\G"Nalٲ3fE@WWXikkX|9̙C*r6G DCC/_={૯իDPUU͛ct5B ..בH;8|0.] /turҥQL+++ìY#G`\G"@*⣏>W_}5k`֭PP5򬂂L<>ѣG1f#v ֤ˢ0apYp!!!:!t  Μ9#B"'':!IHwMxxx@CCnj޼y@ll,u$BN:"66n7o닟H:jI/>>>~:̸D8X466111\G"N}ט:u*~m\t \G"DZvZ;Xr%EHtK `͘7oVX'N@CCXݻ7QF_:!Z,X}[OPVV: x<>3سgP^^u,BZL#,, v… D:FY;v ex>BHKNNrssM1[Յ._m?~vLGȫt)7nxcIIIpwwGYYbcc1|vLF:ŋ˸z*fۼ?㫯ի9NJӧOcΜ9puuʼn'b۶mJPVVq-鐨A&]®]RJJJ000@ll,mۆ~ [nŚ5kI|8|0x<طO&LpCII AAAعs'Nx 00똤 IJJ¤IP]]"x<:srtz%%%胲2455QWW#G`ʔ)&$]H$ A.x_osB0a} 000:Μ9C+_HtƏ}y<#r9Ȥ۸q#jjjH$iD*yT?nˬ\IjkkrKRc6ٳg0M9!dҩ%$$`Ϟ=-6* ƍ9HF… yb9!4}vf3xY+WB pteblwxzsN$$$s:B^XNaxyy!>>3y2?3/^NHW~,Z襶UTTDrr28!,;;}6000@||<,,,1i*))ata 2~7m`ɒ%zj;"].] ===x<())=w[,]B>R466CX,nd+? eenЀ۷oA3ȤS _aeee˖-ٳIIWT__c޽px<[]|MMMBz?nvoTϞ=xb̟?={().ATTߏ'N\^.Zv8)! hhhH$Bee%b1b1QUU%H$j֤@AUUUٷ^Q괴48::ʍ@*))A*b̘1Xh&MxNy^ H˂:ttt>F۷cݺul!ɪ1c.\:pطo_X?_`b1̋. &Ӵ1VAA]ӯH@ r7 H  |ɓ'x JJJPTTbD"@5xgo|>Djkk\Q^^ fMo[ &@WWW6FFF0669,,,b[%ԞXd MFYHDX,FUU!QTTxuՕm~wt6ϲc^MMM|hjjBWW|>|>:::χ ===COOOn-jZģG  䠨VAAjdd---/Vmmmk2lLK#?nQ;EX TUUA$'^jjjvvvȑ#ѧOLy% (..FQQ؏KKKQZZ^ZZ6:::d_7EEEhhhq㠥EEEhkkhܾ,^eYLe+Fdbk 粏e+QZWSScs###ѣI^Ymm-ҐtBdgg5@ngklVv*O4}fd1F}QSSX̾jjjPYY2ߦTTTPWW`ԨQpwwlmmj6!/(**Baa! غشN6*))AKK]Mشf ''8p Ԡuuu){* YDZZ>|233!Jadd 8...puu=kx?~,B|vREE055\S%4228~fYAlDzpCCCvP mmmall"o*77qqqHHH{L);H*Ʀ Vjl 55޽{ǀF˝\mm-2226r e 좒 1[;eVdTQQjP^#??yyy248D^`kkШA >|hDEE!&&kkkݨ`*$&& 4$''C"CСC!CZRUUGxe311ag}nbbf)//Gvv6fΚ$-ֆmLLL8~&F$''#** Ѹu>}+dȈȝJAAp]V>|7<==1tP899Qc222SdV4@477na@("''Ɇl<[/mll35)((ŋqE\v EEEb^z&yy >nܸI/ IDAT`K]];v,ƎKoN*++q}$%%!99GJJ {^;ٴE^{!ϒJmq%##D>}~pttd/|D^jj*"""pDEEA$6l<==1h zXxDGG#::111(..|||0n8ΎFEERRR,W+<}KKKKiiX[[999lLOOGzz:Fmmm۳5ٹLjۉT*ŭ[p\x ѣ1fx{{,B!n޼WҥK رc燱c7$JwWxNIIǏ<Я_?[YY>iHKKãGfeexz!4'''8::߿?HWnb/_f?~ cȑb8C ""(((;|8zh .U ##| ۇlxyy!$$̓N*++o>8q1c,Y'x0l󈌌D\\Ν;hhhC(#%Xp]$$$(d2tttOǏǘ1cr'Ow}Ç&Ly?K~---L>?<&Ly刋éSϣ:::򤏏}c}J~~>ΠD#F`̘1;v,D .R__b۶m/^և(aaa/_bm둜hDFF :88 >>>񁗗W51"q9$$$ >>ϟ'ĸq0n8!f]}kܺu ƍChh(c޽{8p GGG!OOUUUΜ9hDEEܹs097:a-hÄ={gϞEUU\\\(߭qqW_?rss1uT\&M➿>.-- ۶m,ZoM%pa:tѐdC`` z)N0QqiDEE!** ɐH$Ŵi0k, :T0U&##~!~Gax嗹1.ɪ*,\oMe222p9r񨩩+ƍ@;fffbXt}={QQQF||<ɓ'c̙Pyi&cʕXre-X*++M6Ν;xon޼p q v*2 Nѹzulذprrºuץ>w^|Ǹq-Zw}O䐆СC8x `mm3f`ĉ\3sdd$~w$%%ӦM̙3Y:m޽dccC:::kQ~~!['MMMz7R:-??># daaA/">|>ZӋ/H4x`ڳg׋!~ gggRWW+VPIIa=DZr%YXXrww~Ν;G bXE_~%M8455I[[ON?3tv@l>}:I$z饗(77WXt}/ؘɓbԮz:~8͛7444Ԕ֮]K111ܸeܸqlBC%4|pKb!?YZZ%TWW'vH]vQʊ~gCLF[naÆ E-[ƍbcG&޽{i餦F^CLN; М9sH"Ћ/HbLYY}GdooOiϞ==2V'qqqd#}}}z(--MZ$(88$ ~ݣKD"ٳgL&;]x{9!}}}z̙3b޽K>9:: ?I.@oIRz7H.e~211*,,;""*//dbbBoPFFa1ƚdm6͝;ibzt4h9::;"cccڰac}o>ruu%MMMZvCO>MϗGK~~~dffF111ƒGK,!TJ~~~uŌ2xbJ4l0:}tT"ҥKy]h!^񰶶ĉ1ܽ{SLe0{l\v LLL=XHR̛7/^pxxx ""Bx.\ӧ#((%wXZZ"::O=MTQػw/\]]q)ٳgΜԩS!HD1pw!%%;v,^yTWW\V|׿'OGpzjhkkF&1~xhkk#""jjjݲ݄v킿l1UٳgP6Xj~W|Gx׻m2 Æ ?vmDZގsdg瑔##n믿?VƍU1q͕޽k׮~7ڶNkfn"mmmUڸq#9::RSSd2׿E~~~4c 鴊 ڸq#-^۶CW_uАfΜ٣Maaa]QFѨQ_SSC Y DV*8q,X@H]]ƎKcǎ%www7xN$>7 4m4#Ʊ#>3RWW_ݶ+Vаaúg8G{=zgiݺuLGl<<s@nl+rݪUp[[cEG}daaYlY*qGRRg*ݿ~z4hPMX5i$zU˗/9rD +9W> Uʞr訢"M68O~u5HJJBqq %HKKO?Cb];S*Fff&݋͛7kk|[vqF8oL7"++K{=Y{C:sNmpAlMqǎ;```lڵk8fΜT8]'|@L2Ee*+9WSկ_?_[li3wlǏGqq1ƍt1c੧444X|9{9$&&bɓ!bذaƤIPZZk׮!44k֬ԩSf\]]6oތ<$&&"88~󣣣 mmmL6 T رcXt)}Ye<<-[аaÄG/>|8۷޶W%s [XXPDDD"ouvy:,Z{;oUWW>;x`x[ߣ:VtԛoիW @m)Noqh{UU wWpAW ڳgU8Wrɹ3:Я1.x׷[Y^^NW&TJߟ- M@iyO.FwJڮo>ꖙfV}}=See%=萝]Fpuu5 77fi3:uj hʔ)ӦMS?Tllٲvh;8HϚ5VZE...J <:;Gy m}ݷ[>#255Z7{OӼyTb@'NP6Z9̑RN,c?~$J'tqq׫l]s+{n)njθ|2˗/7{X7/B.w6uV9sCř3g0jԨj| ׳6^TVV 9rJ2dx ^|EdggcҤIJ7~]x*<}t <<\exTF ]]]7!!!¡C 8̥9Jر7oGA*asܹؾ}{F{\./,ܬ`hhz:xm}@>0xzz*=_SSS1<7Ǒ#Gsϩl&&&puuE\\ʶΑݟ#P__,ZAqw *,^wגr|se͕=۷oǓO>u.6mtuu?|[//1k,dggx=DcmM .&>rvލ &`pvv(:ǎSicVGG֭[ׯl;믿ӧ1bᦸ/x+,Y'OnuV}(_pj0T:z[ґ}xpX"+ctRZg}{QI #ˑd͖Q\>^]t ĕ+W-6lw}[[f!))qYog)xpvʕ+Dʕ+QZZ]vt;H1sd@@@3+f;v#o=vBee%/_W_}w}[::sp칹'3:꯿Bpp0V^-L-xWQ__5k֨t;x7ݯ:Ug=f͚۷oҥKJ_vM::D"iqEG ØmxpQcZ}nLL R)BCCj=űcǰuV|g*oJR/߿?|}}{,^˦R)V\ =T!C@OOo̘1(//ƍߺu0qصk}wxܸq?oaҖՑ=h%ݷ>EOzt;㩧ѣGt˙rCCClڴ J⵫q/G.[ k6;ٳgѣG?6ڒW^y}Qeرc> ,@yyʷQ+\ٳsnjڶm&N9sO>i}A_K;w455[+Zr%r#SSSk6ن\.'SO\aa!ѬYꎊ"[[[a,geeEVVVE.o<⾉'*Њ+ݝjjjmьƆ7DVѳ;Ǔ!)rzgH*+"L!222{xNN ===ủƏlI'p_eeeMSSSեH:p999믿ƍDd6| ͙3~6PLx "{.[}.\@N+Wm۶Qhh(}'mn#{AϢ#vk]\\Pqq1mٲEs߾}Ơ8v{DNJS~ٙ\i-NU8G#׬YCs^kuu5yyyAtmt;-믿‚&E JΕ)WuL2e IRڰack@nCXXo4]W!555@FFF4vX3gիW i$護?3d2a9ccc BSLtRuV:_|AzzzIo !<<˅ v???zAfϞMo>+[l͟?Y@ IDAThƍ$66֭['ctƍ.|:rPhhh;|dbbBִc|s%=z4;whÆ P`` oj*@yf5N&%%Qyy9m߾ tt]V޽ hΝd``@...t1ڱcҤI(??$#F&4iqDGGӼyQ`` ?<==͍/_O=zHSShmn{סG=\E{c{vk‘#Gݝ455ӓ"##iȐ! Ǫbptt>|Xі$ $DB+W Q⨬#F-%&&d#͑i& ^{-ZDϷ%dmmMu ljٳgEĹse˕b3S^^No6)66C'5kcLlݺ_Vv_ yyyrJRWWCRXXX`LUھ])88$ 1Μ9#vHTVVFSN-}~ho͘1=JNt%Cbwbڰa׏i˖-Jg cܹٳg4cXv-6oތm۶5_~%RSS1l0,^ ᅬQccOuu50n8x{{#;;+Tqe``Cᥗ^¼yj*eJKKb ̟?Vo#~&((qEcĉؿLzΝaooO?V믷SW|}}q9cرXz57TH1W%"®]ꊳgѣxWK vލ4̛7[nfΜCɊX5kK.9N8D|rPSSç~0߿8pai}i?__ؼys?EݤR).\>|zzz 5^{5">ٳg 33vߴg277'ڶm[N׳eeeѫJH__>cXzSNѣI*ڵk{ڿ?T*%kkkZz5>&MD.]$T=Qnn.mڴLLJ>ͺD_UvAf"---222_~!իISSi_WCO=0Chon޾}6nH$HĄBCCig'X?A "dggGԩSbH"##Ϗ)!!AXq9:u*QFQtt!=:uYHё^{5=byf  555קsRXX/t QE^$;;|vkkkZ ?ÝgRmm-: =z46lУ~Q8x J@7_WXop5DEE!::χ;1k, >\ԑ#G~z$&&bĉXbf̘ѣ2իÇm6O5k`bR8v"##k׮A___`~'`f ͛7;w`bb@bppp;L;z(K=zx /ZИ )ۑ)So[+[#G ** NB~~>,,,0vX3 fZSN!::\c̘1 ROrػw/mۆGHH\\\=bo駟 sss,[ ˗/ǀOwQ:# ]]] 6 >>>񁯯/1wΝ;D;w χ  g$(33| v܉"?Ü9sxUQTT$hcҥx饗0p@͕+WβSNFFF>|+=^JKKqy$$$ !!}61bᤋ=>.U$)) {qmxzzb3g˽Haa!=Č3iӦACCC{L Brr2`bb___`ذac[(ޭxoܸT gggcO>$)nG} ̄Z(MKKKCd?3"""3gb޼y:u*&W\A||+/]ZXXXa{{{ΕW+**Bjj*RSS`oo/㣲aӝΜ9~ ?3rrr`oo L0&LaΝѣGq1? BHHO]]]5jkkq%˗/:::puu;\]]WWlTUU%tB館&MI&! S]T1118vO$''COOӧOU,HKKCCC0d)L;;;fLIye\|(((XYY[1333n݈ǏرcACCFQFaȑ5j5FXĠn޼k׮-##CN`Сpss www>Vwɓ8vPx#GĈ#/vr={qqqCLL r9I&aȑܑrRt\"'''8::bРABEr9_7o6˙3ySѱcnn.r***ŋ ///q7oą X$$$8p Fc"((b)++ 洴4dddujjjfffB`РAǀ`kk ;;;QSa=[CC򐕕%'l444@"V~9;;CHH9siiiPSS'F ___xxx`Ȑ!<ʕ+HIIABBN>T444mcҤIۗʕ+t^ׯڵky&߿AG#bN&֪Z ++ oFvv6nܸ!̻wR) 3666"GrRQQx!66(++:\\\///xzzb< 2 x"K. ȑ#QFoV(-lmm1p@ [ZZҒ!(,,D^^޽,ܹswAff&Zlllδ4qGdVXX3gg2ϟ?\MMMBYv]#%%HII˗QSS]]] >\ȓ#G!6ܹ,W޸qnBYY'9 %ϟw$ˑ/;wBB^^JEbӥ/ST$/-++ ' Cuvv;;;XZZId2b)##w066L777 և @ldee!77uuuBaaakkk+gjj SSSKݬ ())A~~> \ //yyy(,,s333SDS4`ee?Aև###pPx;;;cРA4hc#~:Ӆ\y5TTTlll΅aÆNNN|cʐ-l+ܹlTWW kjjB(-,,`ee !w*򤩩)w>%%%()){PXX\磰999(((@AArss}xaliib'bcSd\ BHOOAEn/,,,`nn333ׯG۽{PPP hUYY 0 _1#77yyyBHM /LFFFB#_~Bc@񯡡! ===zzz{&m娬Dyy9PYYJ(.. /))Qmmm_h)F(k-O <4S\Ѹ vR6660`-,,Я_?UUUqVVrrrZbӎu^)(-\͎ZZZJĤYׇi`` ̞R]]Rd@ee%***rc+-//o^SSV;4ͅ|jiimihhGE!H Q__<XXXBC_:::J544e WAq#!QVVr1]ZZ !+v(JWqׇyS]]BܻwYƏC&C !Ԅ}J]]|H$Sj/}}S>ؔ"A+IP^^:ܿr (--LD}}PWTT@h(5n45mT)ad3f Jc>PRRb_Sc娨@YY+9qasss!'0@跳M?cκ)ʖ* :\z% BZOORTh@]]i1rb[5sy BuuRNm},PYY)\/Eۺ:!9GHv .C$LmAAJKKd,QRRTnxVƁPl7mX BƓޠRx,++Sj* PEAYYY%E(-ixoZkjjBOOOhk(: YƝm2 J#W)//WUϊ%MG]]]hii 700:"]1K3ybMIIR"građbjl1EGnKzLyݒnG vc3Ez3\䊏 d%$ °pBCa1zP@XXȑ0ka1c12c1c1 dc1c Ȍ1c1.c1c1\ 3c1c@f1c1pc1c1c12c1c1 dc1c Ȍ1c1.c1c1\ 3c1c@f1c1pc1c1c12c1c1 dc1c Ȍ1c1.c1c1\ 3c1c@f1c1pc1c1c12c1c1 dc1c Ȍ1c1.c1c1\ 3c1c@f1c1bz6֭[8}...c1Qegg@bh#-,,0`n1X$DDbzٳgZ*1ڡe°pBGc3x5 T*ŤI8f1X5kJe1=@f6c 费 _cAOO̓zWc޼y| c@\ NO? 6:uj7Fc -B}}}cѢEc= 8Qhhh`ܹc1 ԍ1(.C0aZ|0c}&ϟH+ ̟?"Dc=\ Njc13<3-3<#BD1: dZfPc1&@7c=QFZ c1֗IR\Ĉc dHꐐ#b1z%K@CCXd0k) 1AZju!eeeoLkQWW044c2FFFmNDtby [))cL*++QSS"L&TWW2i_KIIAJJ @WWZZZCSSzzz:::C"4[1أd2d2@&(--\.QRR"$lLjrhhh@ii) "'JaddE!m``mmm@__044add]]]hkk066c B@EEQ]]r!oR)VWW s"Gc``uuu`bbmmmNWWn1X)c .UD.(,,DQQ[ii)Z\D"P6NtFFFֆRS< mm@\\F)ݑFӳ2 DTVVhW,WVVvH+7Elll )/1:NȋEEEGaa#;e2Z\gG:H9DxFjiiAWWWV{gn +--mRwf[ZTTT):w7LoC[{-a|JR]*2z.;999{.rssTEEE͆Vikk &&&JWK7###$0,13mvR4EEE ~ fEs߿?%cOFNNR,,,D~~R1\XXbJR!G[Y.\n-6P,ȓM ~SSS7˕氲Rʓ<1SpSNN233~!++ {GjR"Pe^ӑ3 =߿?lmmaii1p@aH%%%ĝ;w<;w 77cyҙF333^rbQQu(N$ s@~`ee%E[[[p1T)ssstunݺ,aS[LN>Ht6%;;[i"H`ee'xn c*Q^^LܼySȏb> @QIkkk5:NLbdd$D+LI }@.))AFF^tvMa*MMMٵX<κUII :n7TC`ccggg8;;cȐ!>YYYQFbcW\A\\pddd@]]z0QƺEqE9r$F WWWd> 䚚ĉ8y$Xp]FxRRjkkcbɘ8q"LMM>--- GEDDbbbPZZ SSS W>>>;Tm I0i$*c@qN~'/T*E@@0qDxzzrAX/BDHNNqIĠFܹs1|XXX&c@q݋p\z... i0|p2Xrm8qGdd$O#44G&p?!̙`Ì!C/'N…  Cd d\0ܹ񰱱 pB<]'cž}s-Š+`gg'vx*** _5> 6mBCC1e5qȑ#3gbŊ;v1Z~)[r`;v,9b1vuoEnn.n:cݦ{~T=˖-ٳ {~Ν;qixxx;yc2 7nė_~ }}}]/T#cïO?gϞSS"ž={{!++ -š5k%vh&)) 9`oo{< ?XѡS}sNlذn›o1cuuu̟?8z( 5k֠Lr/^ĨQ#00عs'nj=ѣGgѣ*vh1S d2̟?/BBBp5XPWWXJꫯ' B||<7._R ؾ};x bݬ擾z ܹ/Kbj@!""[nqw&"¦Mwww@"@]] fbڵxWիWwh}+͛DDݻw; Hƍ: [o! { #$$-zT/Jlڴ K,;I$,])))x'0zh8qB{$D5kW_;;jL %(P+"RAxکqqS*sNǵNGuXuTuU SV} @HrK¢U1ys罢}ą lD8Oyu:JZ-?a֬YATT`Fbb"y,]˗/S~ +cq*) mbr7n$GGGJKK#"*Zb|5tP6lAy\\ :Ν;G&Mj1+++ f &4bĈ Zĉ[OII ]pkK^'OsM<-jxbdS[nuԉN8aPy8Γ5|k_,--McH$ؾ};JJJkc"66fBDDCiIC'/aoooQF޽{8ikĉA㰰0?j(bȑHKKʇѣGC !!s̙3qi 46664h~gꫯ"** öm YZZŋcɒ%={6ƌ۷o gggcӦMӧP PTHMMEtt46n܈"L<aaaHNNTUUaҥGVV̓NAVVF5wf̘z xWn]YY,XE{gxf-t`̚5 ?3^~ed2?5558}4!0n8TTTmjkkxb,\H$pssCXX233[cJ!/aǒM/" LF6mzsRIgΜ!F}]~vE2WQ׮])Y7n DB~pj ]v e$///""ϓF\.7G}D\{k֬={4yrvvg eSNΝ;SFFPVim6ȑ#l5k*?' /'|BG(226oLuV@qqqf[zKD~G% it@>>>?iҥ6md:tʕ+iРAǒIʕ+E'9Oc2OJ"o4yM$eP{dcǎ%)JSXJ%-Y,,,RNNh4~xQ>lܸ|Mm+ND~z֭iZ4w\~Ns[XX/"7jjj 'zR)d2*,,$"'ZfϞM>>>۷o>Cܹ3i4fջ1!/**2S*^u#7Nx߷o_m9t:дi \<,22~ߙ: F!;;;ڻwCiΓ'ͭ^m'-,,(22 SL1\ٳDZ!m<=z`۶mxw[?mvvvرcfΜ9s )) Æ O?WWWa;T\ӧcvرc  t_~߬Sbԩc7Ӓe˖?Ǝ;uVdggݽgM{xx^×_~ݻwC||s69GkGk>37gSkNaa!u֍-Zdr<0sW[ɀ2d,hԽ{w~ c"]%K`ʔ)8pk?%~hQΝ;`aW n߾mPv"BUUG)))PTmZ +W{Ebb"F #PXXyO>cp=rDwwwὕх;k5k Ve˖aݺuذa֯_8̚5멞=_kkk|''c޽7oޯIΓzV̓aaaFWG׏`Xmsž}~~ c2l޼˗/o*zވ1(wtt>oŋaaa3fF|n݊VT*DرC^~e <qqq۷3zlʕMj#+LlܼyST~=+0deet:%\攖LzVG 0m4m~9ګx 0 gϞ5۩mFxx8:tÆ 3hIΓzV̓sEII _.*|2|||/Xڝ;wÇĉ>|CbCɉ<==O{ZxhSΝE+ www*--%" aW5IRZx1}4c ڲeMJJ d2a-[ЫJE;PmmPV]]MhرM/$t e:|}} ")SSϝ;G=zbaDJM/^RڼyZ}S&L`P'Zh,>RSSChB̭^ݻwZz5Җ-[(!!AhMK0v|Zmt![ܺuƌC-[&,XG/РAښbbbfKΓ'͵^m'z-zWj o 6SP*zj1hLhٳgD"AѱcǞu\ͪ#KKK@N/M< q!Cܹs^!C|`𽱓'OR~ښzE}y)$$Ϗ>,|T*>#@111t}y&-[Lu߾}FST*>&>>BBBW_}%|׿?N'O&deeEk׮GKӧO &ЪUhΜ9ip'NP@@Y[[S`` ={)..N}s,##/_Nɉ QhȐ!SQQrvvcǎQyy9;r9:t˅  Tד 4\)sTUUњ5ku҅+R*b wѣq.]$lLjJiii4sLJLgBIP mڴ eΓpVXA&%z3) ڹshqƘ鴪L'O&DBoY07h'=zvi0دh(&&Fh4t5admܹsEG?E#uTŴvZΝ;Ӛ5k(;;a$k+'hDRIIb5w0`9Wb=z`…練CDصkM ND eRnnn-~߯ƍѧO9%%%ؿ?nݺ3fXGեK!++ VB||<}uE%Γ'J%ۇ &_~%V^,lذAxDc<<]k`` ك[)))1bo#55iDAAGFhhx kRSS={޽{Ç!Wl.((^za݈@JJ {L:ԥK]8x lmm1|t:=j6+_sdmyʓ1}tb8x 233 cNOOX%ԻwoZ|9}RiZ-''GmڴI(9"z7Dݻ4qD'+++k׊ٞt:JIIM6ѨQҒhܹtYFX3iϞ=4vXΝ;ӌ3?k$$$̙3FPP"""0n8 :֦1ւ:\xN©S \ѣG#""QQQ->1f~Ib9sFtၰ0bС T*}a0Z!##HJJ¥Kp bj;$$CEhh(O-fLTTT )) IIIHLLď?(?~^޿_yQQh?Lgggt]vtMxGGG( n1FrG QPP |'^O.c=D/A\ntrr2xc7ۑIz%ʆ͆I}­55-//GS gLF1J{T*I"+ H$ BI8w^1:.c3***VQ]]JjX__/}j:gZ7^iߌANDeܧK---U s}^mPSSC͡J544 ///;ay٫ ٕ J Cqq1*G~~~,dgg~tttFFF011A֭&"""zɓ'ysRRlޜ dffV:^CC/A555CII W_ܿ۠{e z*Ѕ(..FNNNyyyx)Ui DmV7011JuDDȱ@DTKR)8IOBB=zTC[[GĠ UN^.ܿf͚XvRԡCXYYSNU DDDD0!..111-ILLmCCCټYOO--*dB`bbΝ; VVVѣŎODDDD %%W^͛7XܻwPSSl\1o666.Ď/ܿHHH4NJJByy9TUUѩS'ܹsѣ˂uX@ "zO刋CTT"""p%A*B[[[6A%:u}߃ HJJBll#;с z b&""""u˗%edd@AA%:w CCC Cqq1+[tM<| 077 lmmaccsss(((ޑ qBCC . ///amm ---6Ÿ~:p%DDDPPP@.]-B;waaa Í7P^^CCCݻ7v)kѓ'Orrss;;;888;wfc-"$$aaa8}4233a``GGGر#'uǏ/",, ׮]D"A>}'''ٓoDDDDD\r'BCC2t ӧlllкukKA;wp"55ZZZppp ===Q-c5Cʕ+hѢ([nff&vDzGO>ř3gP$$$@OOG _/GDDD$2_8t>48p Zl)vLzGwޕu]9s999ٳ'\]]:j DD/ILLݻqܾ}_.7p8tplΘ2e """O/Gӧٳ'ƌѣG  ,,,0vXL:bG$"5z Ď;pi㣏>˜1cM#`믈11}tN۱sN$&&&LѣM#!.]Ca߾}HOO=0f(** Dhedd`رc0j(aС;Ν;ضm~dggc/ЧOXz5?VZaڴipwwGǎŎF"*//ɓ'GB]]?>ŎGDDՀ"jtzjڵ ;w.N Q8|06mڄcXx1ĎFDDDT+BBBj*_~;w.F D"v4csQc*0OHJJ‚ X>۷oǩSСCl޼RTxDD􎸅5hwĉq-̟?K,ر8pϟ|Ϙ4iؑ˞={駟YfX~=ƌ#v$jb lذطo:t v,""zK@ СC2nܸUVx@fܸq}60e̙3b""""zgO0m4xxx֭[,PQSSh(**G;%Ao0fL:gϞEǎŎD ֯_bǎpppӧOŎEDDDֲ0h ޽Xv-ĎE 9Ξ=ɓ'c'v$""z ˆEaÆ ضmN*vj$n߾ѣGC]]aaa;bРA(((ÇjΝ;퍅 bŊb!"7`;v?FPP.vjd2220`Ð;+ #F 11gϞE۶mŎDÇ1~x.""㸅5Oq/cѐCӦMѷo_ؠ}pttѣG}Lڵ &LɓC? :'N/_˗3f̨>}[n>|8;V}BNNϟ1몼<4k  ٳg+=" Smؽ{7ϟ{y>>ћ<{ ظq+\ ;Msgoooطo_GDD"jaffHիƕ{ )R)kmMCyy94i"$Jajj3gb>>ћ][nݻw!/_{p,W}ׯ_Gkkk˖-޽{_+[rFDD 022p #"j_Ń7i׮Ďh)((:cXYYANN:::Ý;wpAL4 'OF\\#Gcb޼yXz5ѱcG"$$2e BCCѳgO(**gϞVR)Ν;oooL:W\A~!C֭꒮]b """ƍҥ1p,ܹs011e`bb8s̿Fu޽;km<""zQ,L8 >\AJBLL$ gϞtÇ֭[ aaa Bll '''ɎYvЭ[7ٟmll^zvLA/^gϞW駟 :uA999.%K/^̙#V^޹sg!++KA(,, & aaaAGGGX~#l޼YPRRZh!<|Pv%ڵk'lݺUV\)>aڵBNNar7~o״`P\\,Z"""***9w[sg555ѱJ֊ߑ<裏QFxDD؁@D B۶m#QƎ TTTеkW 0wŀ*b lKKKUZIUTTm$  :]vAWW``` ܼyc|ro\|޹sG֬sBMMM0ocM[[͚5d?~@RRJJJ*.pٟuuuѾ}*?'Hdl'3x`?3f@jj**=ߤIJ0a[_ ZhQg Lu1ׯΝ;'؜;ם*˫<^ZZ PWWר%%%t_+ѻaѣGI&ؿ(_~AVzWccclݺUv;s .^X8L>C {g[hܹ˗/C*")) K,yݳg333^kii{yŏsuMVV&v"""*Qܹn̝۷o߁'kT~ =ztGDD"j԰h",]ϟ?%oT)Sӧo޼9 BA@~~>[/_;wBOOZr͝;_5VXo+W@555|8;V#_AAAXv->S#M6a;v,e[56;׼ AAA޽ؑ_D W\>lmmj*Yrc2vX\xR{t&M###5 'ODqYx@DDDҜ9sӧO{8uؑDsyV\ [[[oW\a񀈨`,---Xx1/_Ν;7h\z۷oGBBRRRɓ'c֭52fiii6D)))3f  +++\v ;6`\v :uƍTc*Νkɓ'aii+Wbɒ% ر-@D Z&Mxbܹsprrȑ#)vZqI 8sEN0tP\rRSS_.^5k 99ÇpB#66!!! шޛ.pqDGG_|?~,vZs#dn_DD @Dɓ'l2DDD`РAXx1ŎEufر-Z… 1{l4mThDDDD5?֭[xxx`h߾Ѩ;uV\gϢO>Xt),v,""؁@Dʐ!CpE9sM4ѣG"77WxTHR8qǏBBBaܿc񀈈4EEE,XǺu Ǐɓ'!JŎHuHnn.lقݻ M6ٳgq9v Qve(//~ANNNx$d`׮]HMM共cDzݚ28p>AsBAAƍٳѣGQ5a@^^~Wܹh߾=\\\₾}B^ [ YRRHmnnnpss񈈈Dcxz WWWHxTqEСCHNN-BMMMDDTX@ "[n&ׯ_G6m0j(==IRDGGرc 7жm[5 ppp1r!00GGЭ[7=#F@׮](((@xx8'O[npuuرcѱcG#Q b  DFFBAA#M=ӧO#44gΜAvv6 e&} .$JqСC>4551h 888&&&bǤPVV(!,, (//ibhh(vL""%, gϞٳ Chh(㡮}666ݻ7Zl)vFׯ_GTTpyV:uttDDDDD5޽{/O>lk}A޽acc]BQQQ쨍^vv6"##q%DEE… χ9AybG%""@D#44ሊ۷!JaffX[[ VVV,*Ԡ͛z*"##quxm۶-{{{: IDATbӧO… x"n߾7o"66HHH@YYTTTеkWfffطoR)LMMn""""ˣ[nב uuu:t@TTmۆB4ifff;v! ''ݓ-qn޼ @6mгgOdeee˖~ ===Q]"ž}PVVq }tܓ'Od+|n޼š 044ڶm讯")) ݓr(((XvBiii.]ȨRwAZZt8::ΐH$"K"""#;;v¶mp &Lr$%%!&&FX$&&III9LLL`dd$CA[[ZZZ"WZZǏ#%%E6wNLL͟͝+:[n HMM֭[,8;;c֬YpppoR~~>~70777LN[IRܿ_6ɯ'$$ )) %%%^զMA[[[VTFVjꭿ\dff";;O>Evv6"55@qq1߻~× -o=~yy9?___@KK ߈j I& ]ty***BBBB%tHRttt+?CSSҜYSS5[YYlÇx+**h߾[m ϟG0sLL>~^DDT;X@ "7n܀݋b;ׯ_%J.r=-- Ȑ0H$PSS:^~f*͗_y݇\ڵ >>>G߾}1{lb""X@ "z͛7ݻw...ƀrrrػw/&N(vZl۶ ;vǏ1l0xyyauv[(""""vA]o>L4 kApiԄ7"zwp |WHII6AcԾ}{|wHNNR}Xt)ĎHDDDT' &M.[#::pwwo4ŃDNN ă0c l߾FFFpqq(DD Dhݾ}s΅<==Pܽ{ .D֭ŎHH"`̘18y$޽I&a˖-h߾=\DDDD.;;ׯD ==7o3[Qˑ_~2d:v숍7ʶ}"""jTJJJo>8z(.\Tppp`=RSSo>`ȑ022ˑ!vD"""nzD?ሉ=kbƌ;"UQǂ ӧCKK 'ODBB-Z6mڈꠦMb C||<Ə~ ڵ BBB JŎIDDDTmm@ [lAZZ~\pݺuC>}w^ D``91o<$''#00NNN?vLMMfb|ccc\=;"n?ܺu aaaa``ŋ#99YDD3"jpݻ?#9sqqqpuuźu```cԩSJ ""zT`ooΝ;allQFɓ2Q=58paff~ gFJJ :aÆۀ]ǎn:ddd`x `ff~O<;"Q%6T;{"77CE~z<{LDD/mkIII/>***8r7@GGG(**bǭ[0bDŽ pi""""Qۀ$H0a={pttҥKOOO\vMDD, QSZZ 2سgq}9r#F1ƍm۶!-- С֮]L#Q#n,--ヴ4^ѣlmmg^ta`ɒ%h׮ƍ!99˖-d1uT\p2dVX}}}L8gϞeWvP}ЬY3̞=nӧcѢEx, QW^^`|066Ǝ;$?~Fby駟-[ ))  ֯_O9vP}6h 8p|8?{"%%?LLLĎH$:uuux{{ڵktlllh"QQQbG$""n8p ~7$''w^`Ĉ8q/LDTX@ 'JqI3۷dž 0vXܹsOƄ (vL:W^GZZ֬YWw֭lقϟ ލ6QXXÇk׮Evv=<~V),ܹXv-ĎHTohhh`֬YFDDu> 1c\"vD"""m@~$ ƌӧO͛|r /_;"QU+AqA__k׮3_aĉ6 zO{C\իz??? j~'E^ kkk`Ν(..;"QU'O`033C -- 6l |'aiisBGG1_.vD"""v555̜9111믿`hh///a…HJJ;"Qg ̙3裏```UVaذay&Ο?)S@YYYDB߾}k.aGammv%D߿?ߏ̙3)?]&"zeggcݺu077=.];"QӧOqFXXX`w6o,[5չsg#ѿhժ>3ܾ}OF1sLhkkO?͛7ŎHDDTۀ~ر#~Gaƍ~:lllЫW/l߾nc^… :u*7`vUSD _)))ꫯp XYY{ ;`QáoooܸqΝ fΜ){ĎHD$ H&''?,--agg[naӦM-[Э[7#Q5iݺ5.\w"44:::͛۷obQfgg'[t3|߿:tap1^tpss/^ [[[\|W^Ō3&vD"!rrrpppT,\Gm[o>Ht6 j|ڴi <QVVggg~EQ`ͅt[[[DGGcݺuHOOǶmгgO#Q-kӦ -Z}:tuu`NjֱۀSN5jVZ===L:QQQbG$"1, 52/_'tuupB~:fΜf͚D&//'''"99C`` aoo+4vt7nDzz:~GƢwٳ'xM1"jpX@ jݻ.]? FDTGikk㫯Bbb"hhh`000΋Qn"z[qyag}#U k׮:::7o:w . &&gφǰap!`ؿ?TDDDD}!99 ,ѱcG :GAyy39ACQ+Wޘ2e Zh!v#1rHH$ <<~~~ &M///nODo=z4:TÉ{.lق;wƍìY`kk+v4"-Bl߾{Fnp,]سg>=₺,ЛcٲeHNNFPP0n8kK,ĎHDDm@Yfprr6slj<̰a͛7ѧOt(,,;"/~^^^0`ĎGHaa!455QTTcZLF Arr2mۆ;vѣG2d1b(((8vPu;tƌ7},b2o"""yf/H0aٳQ} "%%ƬYnaDTaΜ9Յx,Xzo'N|m@"`ر,{H$puuEHH0uT#GıcP^^.vL""A_I&AWW~-Hx@MQQcǎ}JKK1qZNEK$&&"88rrrpqqV\'ODuQII 8???;wƘ1cкukQSZZ ---?TЕEXX  vPm;u^233_[` z غu+v؁B3gF>}ĎFD Duȝ;wm6ܹyyypvv7b]DլY_UVx1gFݻwOo_vv6Fooo 25矱hѢ:6 1M6ήD"'|||DJF Maa!~Wڵkڵ+f͚'@JDgD"+))Ag}|MT 6m9s`"'B*?O#`ccǏe˖).s&zO~~~9r$!HWD,Z =cɒ%,P0qDY@"`ڴi,1qD_3֮] }}}7Z(|||₢"1+޽C AaaxϞ=I&AWW~-Hx@uM&ŋܾj>Ǐa``'^v {FJJщb$*mx1q",, :::'DJMfw &&VVV"'"/.\xyyaڴihݺ5O}ɮѤIt'N鉈oK.R[^"K.6:/66;wDND]QQW?{յwAPi""f,DM05Fcěr5UobLzh5QBGiҥwY~sH3=<3s딵sZ{o IDATD^%K`gg'3񷚚pY`0 0AKK VZB"&xaܹxȆ`(=|6oT|wطo7555ٳ7nO2 Х ? PSSnʤmdd@WWw}}}./|C qpp@fff٭***CҢEf())餱Q__FEy!ICCCtY#G%k`(=DPUUiTЀs>) t$\_{hkkCGGFFF󅅅5k{]uL L Y:K/Ŏw%%%|zԠ{U44Qձ=J0551/2 ۷qmΝ;())Aaa!WIwDzUqNz3mcMMMe,mnhh hllOzבÇs7V%ƌ1c`0rU̚5 TSSúucǎ>8eee\`6޽Ex?\&EȈ{ӓɠ000P(|`_z\tI&;2*!{BH{'I냖.@R__e,Tu>|pb033ȑ#1` |.^S>p=if 6"Bqq1rrrpmrz[ZZʽw쥦c0\ڈױ!A'\ʏ]4(ͤFkk+p5vXp]MzkjjQFcƌeB3.beee(//Gqq1~ݻwQUU隕4P5u j NߤHwԿCϒŨ7齵T{;JAirCW=mcfԈ2rHN1}n':ٰ ~PV;3e(#k֬={z ?>SO(**|2//%%%\{c] t!w]]]:]M;Ո]ؓuuuhiiAmmovך055Z[[cѰY-,7oDjj*233qmΝ;PB\y ҒkX711QaBKK ***@IIii) PTTM$ aaaX?~<:ŋˇGII kyyyGQQ fMH-:VZZZ>|8ez=ewJ,ͨ6 cyyy-===sF333 >5j,--1zh+V_~@ K/?F|!--ۜrwt+5334(--`xJK &pttk`w{"//EEE(..t w] 1A(φ%`]#ݻwQ^^iӮPUU:tuu iQKK .]{N{P6v/aa# ݻwsm#HOOGzz:* HRCCF͹wi4keWTTC^RR}/(((\0a3f 1c)x`( @PB$ n޼$"-- B{{;0vXpY?D㍨555pvvٳgmBB% ۇ˗+b}\DAACqq1񮮮333aĈ2Ç|< &ZZZ2K4TXX(ӓ5 1b{芪* 6 9A"<0;|p޽7o47--J5jT'~677 PZ[[Iso߾l.CNNNprr#ƂxJO$(,,>p Aړg{^%E,GzJ{pAN~ԨQʊ KŸGii)__FGvm V @UUk "b9XEEK,AVV@+++nd_J_wFIۨ @D\777=]` qXA ABBD"Za\Cֶ,yֆ,MZZӹ A[[0446F---hiiqޥc999aʔ)<Ф]ܹí'R>ZYY" :<<<___L:X $!%%7oԔܱcbر%ez.(upKU '''_OOOs0tl:w.^ IaŅFQ%D"Çɓ9tsscmR  눊BLL bccQ]] KKK`,?hmmERRD".\sΡ6m &mB" ##׮]Crr2\! GGGb̷ YYYFFFpojic+\]]2&9skɁ.NS}Ҟ񈉉7  ___L>, իHJJ곔n kkk8::vvv E:̆zZurr;Y#K?ww`#Hh?cqmQ,q D"\t ΝÝ;w```Sdd~@ss3pid֖o3Hvv6]BXYY!00gG uu׵pttĨQ6蒚-991SQQ!U^^^pvv*f(]'N <<"ͳ1DTUU… FLL Yf!00AAA9r$f(qu.T$!###G4eu3]IKKCcc#ttt///-kh#LL:Ddd$JJJ0zh'}}}1vXdȁlrR055E`` ΝٳgCG8|0Μ9fL4 GPP6@RSSpCKK 3gĢEOގDDEEҥKr ʠWWWaӓe1 wABB̫ZZZ8q".P' !66+QPPa:f2Dyy9N{1\K,a6TVV_~AXX`ee{/ 8UTT <<ǏGLL aee̘1,K:"::Ѹ~:  v>|_~%._QFᩧªU`ooϷy %F,طo;Xd ^yA?OQ{{;bccq19siii22nshA]]&MBPP,Xqm\axXʛ|8t|rF`FhjjBxx8p)hkkcٲeX~=6İB~ Xf 1EVVكMMMXr%6l;;;M7rrrp1x"0g̙3Dc@Q]]̙38~81n8,X!!!xMZعs'ܹŋC@@G⨩ÇW_>}:^}Ư7oЌy^WWSNqի0qDaa ΝßpX`|||61e'CA رcΜ9 xꩧ04G vڅL̚5 *fΜb<,Zh B?~<}YABCC}dkkKB-ZDYYY|Д'|BFA)<<6T$&&ҿ/rssmݺutUM{hhͤOzzza, ̙34w\HGۤ`([n۹ 都ozX,ݻw<`lg LRSS驧"@@*𖞞N*1TTTh믿2a0x ;;m&_}UWWmZ`P4YE\\@ zRRR61DHJJDMb( , {%211]vQ[[&1mmmk.۷o&uKmm-}'4zhN`¨o!ͥKhժUCsQFFfuKjj*M8TUUiڵT^^ηI!D\\M4B!YFi{b_iƎK~!ݹso s%zgIWWhŊ̷Y'EIuu5=3$ח61D@ gy,p?uuuj*/ p࣪^~ej*w}H__x y&f1oƏOB-[tCٳiԩʷ9!D"19;;SZZ&qbڳg7B!-Y SWWG?@ `˲fPY˗i̘1dnnΆa( G!SSS+WmGXdooOÇ'OmADDdllLNNNJHe$b13RSSyQYY02ќ9sHMM a ?4rH6mttҔ)SԔ B3g. [1bY[[ٳgyfϞMti^l`0|MwbCCC[͞=e3DB +++211_~;2e_>x7HUU>3ֻH$ڶmЦM6*{┕'NDxx8 ƷIfmƠF__˖-_ 44::: +?!!3f@cc#;5k@[[[a3򢽽qqq+POOOZ 7no Lؾ};>C:t 6i@RqL<x7```b(#@`` (tyDB};IWW bTWWӿomN8x -^۔ N}}=}GrnӛuECC˗I__Na;v{w)SД)S:-omm_~֯_OƍukRZl UUU#???rqq!OOOz7zb\>ux @rz' RUU +S:"ǂfx3'555 Sh/O+oD"!@8tQ^^B233螈JJJjK_HII!{_NlԛuMnn.ѱc^Vyy9Eã IDAT9BCCfbc=vG&O~H'O&"~qߎ:@f͒YN+V UUUz뭷ᳫ}/ݹXܯ~+C$^VUU? :4z,,,: H$>}:ʽV8q"M4jkk^ޣtnGIw{[M\\֭[^߾17 >HXLr K~beM)֭#6b&L@o˵ nOox"-\Pf@zԛuƍ]儆ԩS]e= 'JLL{Y.;;z޾K@׿@ۖ=2=駟҈#Zlٲd]|U@2{=7n\'11B!eeeɵ[UUUɵ.GAo"""H]]Z3WJOL>(tuuݻ /%<}2&ѽdS]]]:~Be\TBpu,]TeիY/T< ,Z|"WTTTeyk׮!##Cneƶm۔b{oŗ_~ عsg0[T^z sG}|kMMMk9 |I 0U6v ===xyy,>}:#o8q"ƍYne?}]ʭ.]nZM`` }vw IWɇ|~̝;7_*9eM011ٳr1Ѐ˞>}:*++Ln/f̘H$Ś5kb D"L6 ={6qYCSSfBMML9YYY 1o<͛ivl۶ %%%D ? ASSvߓ+O?7ֆSNaxgpYxzzBCCDvLPWW王<8}4 ~a͚5prr/wvZO$W^Up=^yyyOmԄׯcܻ67oތ{ׯdlڵk/bժUxǑ+5k֠eff'… /~tu.OqmݹRUU5k^ow}:=˾^GPٳg#22Rne477#55r+;*ֶDnCxzzիr~||<***(2 LwYwb<*[B$ujV//{s=LLLTx |7dllKyyy4n8@駟v&Kښ[?мyhT]]M;v r'-^ F999}^]t@666'PQQ}oREE566ܹsO@SRRڵ455 t}ccco>lmmb֭g"kX>HflIDW_eÐ 9rB!577R~]]$ =ci=4g{yy9ٳgl -[[j*;v̶-_uWiwdNw""d{c~ޮ#OI .2L79::mL`l>$o gg>ϥӟ02eo}N|護"777!=/B!Ν;Kعs'䄸8L2q:ShddPUU444p"##acc#3gΜzݡ}S3g'w`֬Y2w/7nܐY0dpy`ڴir+#((ZZZ8pxTN8iӦA[[oҥeX[[zw͛7#22 ؽ{7rrr/Yfŋu000?{NoӹxqzWGtu777ɆĊ+V |}}y>ihbqmmm==nCŋ[NNNpwwM'LwtWyuw/ c0W|H3f@TTo3T)oJ?r0Xx1+up)DX` #=Mz$Hϕ N;Vfœޮڞ}Ǐ?;;;|=nkd~z{L vҥK:钖}lܸr+QoI&qxW_}p{kZ gt#tN_H aرoWtt4.]vz?ycW3&?mȈb^>>>r~֖iiѣQ]]itq=pD:y!R^l۶ ;wT1ڥ0ݽ]݁Vk "[N0e[ɇ|/_V//{sy< /(\ 8y$"""V/3Y hjjr^R9:-z.N>{BKK 7n1JZ!:88-#G̙3]NB߼pwwǜ9spmrssQSS$\|{`رBZZٺu+lق^&bS__ғO> ^EZZZ'"1#7~>]:BMMˉÆJ[[/_x޽Oٿ… 6ȵ0U>͝:u*nݺiy~~>Onerlڴ 7oŌ3 /`…kYݿ`;P"o{{ooQR򁽽=VZkעUa2T>)ozϭY=V._|/_diccM6uZ._ٮ ,@^^RRRdgee!$$ .LJN=>VX'ON:/\PЇ*{0իWWHY(~ȑ#TYL Xv-gC>ʵ-?닺:|G2we<سgneH?̺;vݻw-7{XtǃcW:u*rssqn4+cuHJKK1c 쳸{.-%>>cǎ|} !!!Rg} `̙Rfoa{ʭ~œO>۷VMLe!>mۆb^Za&0T>)o^x` Cq-(/.\H#Fk׮ɽL@k׮Fn;CÆ \nY{{{IZ ͘1ʊ,X@Ғy/뙙ӡCI:N\#]6sL}5j DDB\hDtoR $"rqqM61%wi!"64}YGD"211e˖qIQ466SO=EB^~enD"{nay>}z￶̜9PKK k|ذaMQQQᅮ-ӭ[իMgOiݗ눈z;8@dggGiii /?,,B!رCe1UN]~==1hnn &Љ':ˇҒ V.ѽB!RaaB Lw~Q4'O$+++255 .(|L|")44TfyyR9>ǷSO=E2 ?4|ե={ȵVRQQ!d``@~~~MέW^^N  jz뭷ÇSuu5!߿P hҥ4w\zitN6fttt͍bbbhӦM>8p8en}||'{mݺ.\Ho6bߞ}Yz'LJ>#n:tx @jjj9/^H7n䶽o>u1:`Ϟ=M!!!2ff޽dddD{n)6m7|e~/,,-[@tQZnwm&իW~G'i&*,,ڰaw۷ˉ/鑽=:uvM4k,*--%{ғ&M"uuu#G~111d@***OӧO'777rvv5k׻I$RWW'777":;Ǝ˝\GR7իO֮]KADߒP(UV5(4W95W,LK.W_}/_NᝎLdiiIr-'Ο?OvvvK7o:^`twnoEJAAA\ 7[m퍮Cʢ&66 hҤI2 reo}O}!2228P.XmmmqFrbt VZEB6nܨQRRBk׮%UUUrrrS{{;f1x8D"@ I&)M_DD3/=!:̟:# O6M)2O>!CCC>|8mݺ61`ģDO?4;>}o/0ꌲ鯢IOO'WWW244aP/dhhH&LLaĐ#ѣ|=znnn8qN>,YBBP-t!3g B277_|Μ9MT`<,TÃ=m۶|P^^ Fg`DTZZJ ڵK) "y200 ={ps0'zGRRmݺ9㏙2_EFv"266͛7w?0"z뭷А'"""Ç' 11~~~xWt$D"ADDoߎXxzz7ŋ.K/8z(`hhy!$$ś.&2D˗/رc8z(`ee `G"-- [lÇaff矇>ߦ18iii[ׯ!ߦ=4pq:u MMM2e cm"1hňq1ښiӦm#!/*j|oPUUe˖aÆ 0aߦ1׮]Î;믿bذax~zmC`>> 'O-/_+Wʊo۷o#,, {۷_}?ttEQQ;cǎ!::BSN?=2%++ шATTJKKₐ,X|aΝ˖-Chh(M6 R__G⧟~Ÿ {{{lذ+Vt]cǎ!<<ŰG@@#Gm&1` "ܹsAtt40qD#88xP61eCIMKK 8;v 99 ŋ1l0c @*++q!… pss+e˖ACCoJ <$믿ӦMʕ+h"mC X駟s=mBShdeeAWWW&0qDP`0 &&k(,, mBO?{B$Xblmm6bDGG㧟~ѣGֆ`^g ` 8ubbbpe477ёG|}}Y@D"Azz:?w***0rH^u޼y5jߦ*WDEEw|͛˗#0052z ɓ''x+W6#"`8rFPPАdtM~~>"""pq;w*** +0c m"dTB[[򂗗 cs]\r"W\ABBJKKa``___nnnC~輌 Oؿ?1aag(Ѐ?@yy9N˗cҥC>/_2kkkx{{ `Õ0 yyyDHHH@BBQSS |}}GGGM` q>}9s&Çm"C (++Cdd$"""p466b֬Y EHH61@`~ɓ'QYY wwwaf2H[[\SN!""ׯ_ǰa0o<̟?c@nnf IDAT..^5^~MMM0227 !997n,nݺP;;;.pド'@cwH$\x8~8233ajj FFF|ɐ3YYY?qqDGGC,cڴi?>,X1cm܌xNs+Syzz...066d㡑H$}6RRRp5$$$@$ jjjpuu{ z/`l8qHc̙"gϞEDDYf!00g%CrB,… \>##:::$mmmHIIiHMMX,GGGёa( 555HMMEJJ RRR$TTT,-- OOOÃeh=YYYGDDbcc!H 2m4:," 6 sApp0̙V#GNNNprr>P6򐖖ioJJ P{{{xzzr{7776#whw҂s!""utuu1yd^^^$ʕ+8wbccq%C`` Y#aQXXȉqMhkksôHZlꄄ455Ύ ‚oS-HOO <!1n8֖{l=c҂dee!;;{ݼy&6i/ x"W\X,#􄛛Vb޽ HD$$$ >>066ԩS___6)++iMMMEjj*Fꤷ$!/***8=2/GGGhiil"// ?YYY҂cSɑH$Dbb""Ͱ/\ x111tp H˸#ƏnLkk+222 3*hjjrݜLm򐧶 (ddd͛۷ 0115ƍѣGcԨQ`tB"ܫcegg899Ғ444 ..kD{. OOOlCDCFFpU$$$ ''򂏏pB!99unݺՋ`aa!PFsss6'[둟"''GFkkk ƍ{{{w{`0pq 466BOO2mQ.剺:ddd -- 7n@bb"]:hkkc„ z6)a% IIID+-- PUUرcƆu|DLr999hoo:n^^^pvvfXܩW&n]SSSXZZְ⾛ԔM24hkkCyy9JJJp磰EAAPQQƍ2moߖQvU.kpttakk 333- //YYYHKKCjj*Ґz2=<!222Y322hߩwOOOiFλpqqa;9"-- YYYʒoff&222%c`I;;88(w /5*$%%… 8<p9$$$?{С Vh4r_499׮]HJJBYYQP#R[uFƃfIII: 3vlll OOO厖/T^^\͛7*8I?KJJurr6 |Ih4###C:{GGGy`UI?m:˨(,,DAAQ\\|HKuN}`)8IgHPҀ/L5ĕ+Wmbb" JT*I;iz9V=o322%ڵkoaIr~Dvv^ޕZΰ 4QZZb@yyy(..sv~JV:innwwws6)02fkTni(}ѦMC^jI5KJg֥!##CyCGǎy "2z, @{CyNn`gg;;;XZZְ\Л`(,,ؓ:}UUUGYYJKKZ"LnS,`DJR&!QUVVlɴQvvv-aaa!Æ ״,К@ h̙زe‘l[nŜ9sjMLDDDw&77ضmfΜt8D>3<38q(6"""""""j~gaرJBDM'@߾}pBN!/ٵkF˔"OpQ|JCdX@ """""""ҢV믿bĉJBDM,88 .Ċ+t8DcHˁPPPI&) )_J/t(DcHˏ?`) )6l_|#G(X@ """""""/!~'@zz:z-C!jR, QWXXT:"2Bm۶ʕ+[oʕ+JCdX@ """"""V_~Aee% DTKSNxgɰ@DDDDDDD^TT C!"#R_Ŏ;I@DDDDDDDFݻ1qDC!"#w}GҥKQTTt8D"""""""j:\LmZ kkkԸ :u‡~؄QQKWYY!!!͎;DhDwj0ƍ}1nܸ&eČ3R KAMLMM?ND &66:tePRR m,,,pQ#3, P@XX J0XXX(Q3gpttѣ8""""deeSS&&&QQKUYY7xׯ_e8rHFFtwX@5{l={LFrJzƳZ+++x* SL Z*sssڵ ˗/u;!j-2A1z1bĈ233Ü9s |.GGG!t_}BQQKfff7T* nW\\&ΰ@ ?Υ,,,x(ݝYf!C(񱲲+ !c899)sÇb aSSS)QqDC=ryyy9gB5-,,0gΜZoIDDXYYR~njj_!!! FEDŀ@333㍔豀@ o߾o߾ FDDD2Gg3hODDAJñb #"Fxxd >|XȈ5{ * * =Xe:u޽{+qp{{{lݺ%&geeo֭%$$DjƌI⡇BEEoHDDЩS'`ў+++@UUmۆ6m(f˗/? BJET#7NV˕BTVVBӧO,--accS8::qrr aooevHhSCWRPPFSTTTH~ޮ];\rؾ};ۗ7Ɛ _|UYYJKK/UoKSv`ȑ055ELL Dnݺ;ww}z O`bbRg&Nm ްYR[[[XXX~!APj5򐟟/? VQPP 4ݺuK(((ZFaa!j.4g𜜜`ii [[[򁟳|0KKKprr#ZF|ɹχZFQQVR!//jr54@*0ԕ[mmmaii '''XYY󮣣HRN5/ު*H A޿WWWՕ""TUU͛rΕwR&>8VT:wgI=֦zڝTρub묿$b4YCQ=Ky nnnpwwgHKYYǭ[aM3_5!K;GwոؚHϜOgFAAAβ?WYv wwwyjʐ#gee!''GJڎ`aa7^[@m24FjOk4|xlsV@]g)T'h&A46ݶշbNW/P[?gJ7 Rff&233q dff"--MHpMdgg͛zpss/ :t vr14$-N\uAGwE*XIgKz333x{{m4'l׮]l\t /^ĥKpe\t iM&&&N, 1(..)rU/˃h߾=t.]s֭:w5"9^| |2^y;777KJP2Лd" iwzΝ;vڱ@Ԋdff=/^$6۷R^eaBII _LIIkא4IС ! -Wee%]&wRweʃ:^VB.K.퓖i[:t>ܹshmAի8}ޠ4`accپ}{n:BTu:/ҍq\]]ѥKtUNcǎޜRRRp9\pAp /֡CСΌ5婥T:/^DZZbm߾\Pҥ z聀𙨙@BBΝ;7YM:AgA;%j)****.`ptt)(t={D׮]yif"77gΜABB|%\vMܩS'⨗{@԰ tRWBj{+wYrss3g̙38uΟ?]v:IIꨵmVЉ%7tB[RR*++acc=zw@@@xTVΝӧqi9ڶm7K.qMC9ҥKc| tԉ|"#.?zyB`===HqILLCRQ]{M6Jj… 8{|yY&-}5>3ŋrw@KH`sߌqQ;v 'N3g I0޽;$ 8Napb>}`РA0`Raa!?XzHH "88X˿5iqqq޽{qqC4hDFD{TΆ5ѣGc0`8gϞELL bbb ??0`wAI"#"ŋq1yٳh4С 1c`ќHT2>|111طoQQQ??? 8P۷/oIdDt}Gbȑ#1f 2VVVJkT.\ kU:T"2|ucǎ!33VVVӧFѣGcРAMzEF/ \x111|tG:4fDq9r{EBB價1cƠsJIԪ97&&Ehh( A{,53rG8pJKK(ܡCFP%@||>VVV:t\X ҡ6tݻWnRSSѦM5 C AHHyf,)) Gcǰo>={6666l<7 Qۿ/ TUUݻOOOʍ:+D-ύ7-\fgg]v0a1tP*&QDEEܹsLj#۽{wC$V^^?C.>>1c6;wKйsg̜9ǏGHHg2طo?FBxx8N C$jPػw/"""sNbذa>}:Ǝ˳`ZK.~?SNEXXBCCӧH\|]t30n8(zmt"RVBBsN}( Z_~EV1vX̛7&Mj(Ժ?ۑ#<<3f@6mDFFv؁3gēO>޽{+^/Ė-[;",, aaaj?ddd~@DD> GGGL6 O>$ pׯ{W{w/___̛7֭]BDCBB"""_"55&Le0bC#2JN»ヒGY0`CD*--ݻyfhӦ ,XEYꥪ ;vᅬ#GgϞ?>fΜKQnܸ۷/Ĺs0d,YӧO7K=zvggg<G߾}tر_5//ٳ;* Xz5=P xg /IЈ B`֭?˗/cƌXh thDL8p6m]bʕ7I95k~ 4/ԩSZD;v 6mw}X?xNW511'Nĸq#G &&'OfN}!66 ŴiӐtxDѯ_?ddd`׮]믿0o<cڵHJJw|ܹ3>S4-шT\\{ ƅ ݓÇ#22ϟG1g <q%%%aڴi1bLMM~b֬Y,Q8p 6oތ+W%K~^_g_p5߿QQQ -[(Jj՜ 4?ك/"887oV:,&@DGGoѣGͬ(j-5/jnb \|>(/^q!==]Јhf 2VVVW_}.](53Z~57]v7|ǏJ>^4y,~-={ &&mZjRG}sΡk׮7n.\QԠJ,[L_|Q״ěo):v(z%aff&4bq 2D 2DoyyyXdXxܹXhQ^3fyZ/aaa!VZ%\rW!/ xz^VV׿%Ĝ9s)ВHGmVVV&/_.LMM / I|gL̚5KܺuKp0|[?-e*++Ś5k?,VX!L"vmps"..NtEmV;wNp+))&MbӦMFy \hsa+ITZUUظqSNM/011?(++kSl cW?-eۦ5O}v"(n޼Yv5+++?4J 7ŋBۉDFFbq <`cvZ1x`!'NkȍY޽k688X#FkhK#uT@Dի=~~~ػwm~w1qFk˗&-a*++댧>(aff&:t(D&))I؈{OPj|5:y ;|բsz -j1l0t( }'A?^Pj\ε-i4&gϞ>kٷo =ԜmZsn}Hvij>kp{ Z O<ƏohQɓ'|ɒ%T(]z_cɒ% 6\bҥ>Լ> x JBh֮]}|u/aoo,5j._]gys˹غu+:+"x[o{JS+B}̅Sm9N1ٳ'֮]5k֠W_SO=1FvZ}۴k|gHII[W@1k֬& ^5 9r$.^(/6lBCCQQQ_~?ׯ?z/^Ĵi0uT |{b…xgca8>99֭C=e۶mܹsQZZx̝;o1i$`С8qK,Aݑ?8g$$$KIIARRFUgve̙3/Ƅ 0e\tIo[nᩧO?^xV2zRUUCᩧ#<Ǐ~ &&رcoVpB,X0117k׮j;v9s0w\?ĉQ\\\XpR:F}v+Fooq2:t:uһyNE `СT:jE@so`ĈpssΝ;WV?%̙3pBEΝaee%֭['*++b޽8}شi"==]!DzzOS:s011z" @lڴI^ַo_Ѯ];!cǎk7o [[[kb˖-z/˖-O<~Ŋ⫯RRRطolƌE$&&D߾}| /^>YMh| ŧ~*ڵk1a;<{ bڵF_/={B511:"22R;v?DllXd ֭[gss68sҡ58Z-LLLΝ;N̷̷Ƹ_Mo _ZZ*zHo]s˹B1k,1eàVd֭BR)z3b.d.42Kchضm['qFyv11{' }ۊ$PXX(yajj*Am_gx7B,ZHtMg]BrJѦM۷ބ ݘImjj*^*/;v+LIIJdee !())ݻweee5c=&:v쨳L;w...B^V^>Ym]1n8yvv]aa f͚e(Ǝ+?ٳPT:UUUb̙zY[}Q^@XXX4 D{lm>!nքG1yfqV;;;lܸ?0͛#G`Ȑ!?)ognxp)@TT`6^^^Pz4c ̘1C絫RKO>ƍ{!99>>>6}۶m[?ƪUe̜95? eݻwG-/۲e t>3 ׫gv朝)Eƶ_VVV: ѻb]! T*y ^q4wX~={1(QXh֭[ŋQpj||klՔF/ }b6fwFBBϟt(ԊbȐ!x7_+N m[oaÆ! Q^=֯_'r̍ힼq{mӚ{7nOP!##Cxxx ]ɓ' _@ժB3fBann.FY{T*1|:c{*'O@:iD:CZ)5G֩YZZTjz|f5RK F#:u$m|8\S~cҥXr%Is!s1_j4s=O?}iBdd$6oތٳgm=1c'1uT,[ 3g4p˿~7WƠCx饗KСCabΜ9#G"337oCNNw/,,ěoƍR}݇cڵٳ)Yҩ0e˖}ҥK|ɓ'#99gϞY~eL2E~>tP$%%2@^WϬ6z cۯhTTT믿n xR!880l0C"jt;wÇ@ڵKt02Jm2Ο?9998}cǎcǎZƢ+VرcbR 2͟?7oƺu0j($%%)BB헱=[r F 6o&y#F &&Cpp0bcc}={c/c41}tmV(!99Y<3Z[WEpp033=b m̷̷F["<<\9R b͚5E899 6 ?ԩOW⫯BRR =z`͚5u@j'M0w\C{0c bРA2sssx{{yXf zÇƍ׿˗/cѢER:D"ajjYfԩSؽ{7,,,0}tDZcnE|20֮۷o̙37x&MBBBmۆ@C$ ֬Yd]G@@BBB{ƍJl1 ̅~:{= 00[o$Z ...J?8RL0;vċ/h&mN l'55~!ݻcxW^nܹs1w\c˖-믱zj!,, رݼ|nr7\i,Caa!|||0|p4<͛ꫯp}Ǐ駟*^|2"##3g3g֭[ѻwo#2z&&&?~<Ək׮a֭{gv!,, 3gD~󭾲2ٳByy9ƌO?ӧO!{{{}=jZlj;v`?)S`ժU GNbɒ%G⥗^{U$)))uĺutnb N*,--C=sj< Jݻw/P:&QZZ*ŋ/(z-___sωX߀;wNZJtM:t-;wyyyM-5֞o%W\|>}ppp_~]!UTT_~E̛7O8;; SSS1tPkXEi\HMv[+֬Y# "LMM?_ W_9}bQSc?СCbժUbРAT8;;y={{/!UUU8|0~gDGG#>>VVV:t(ƌ1c 00YT~vB:u шCV1cID:u~'DGGȑ#Bs:鐈Lnn.ߏhDGG#11nnn ѣ1e)&Q*//GLL ك$$$#GsaΝȥKnݺa̘10aBCCRQ>}QQQAll,4 ѣGcu}}z᪂DDqњL;}u2L%I%QD"ygEcM̜p49]k>;cee܄N?O_~"Dp8,gϐH$rc,//cyy>o]BVVVav>} K|>~M}>L&[󖖖t:;}Dt l^omm hYSܹs_i 47 2=~^!V*VWWoߢZbzzZ6gLOOwrH$gϞ677QVq]s^%ђ$^xiҧiM~?%:^~_U60t:e\ZZ2Gt^_~3b'OZsX[[C)mKKKX\\D/[ְ-b~HQ"+DG%l6cnn x1??K&R)yׯ|>ժYDT.˗/ee}}h:3332׊Kf i* ~̷"B!twwcffFIq}trq:^|BՊy,}l|rd`2wi\=]޶"GDx<\K% |>|>P,aZ#0xѕ{h 4jƟ)>X vep[XXy $Od4 P(p8籸sA xf CCCoaff^MDOPV@ m;b^YY\XXK'QΰW^ɱ7oJ# sssri2:}DW^>Oz[niw><߿3Fx⅌wT*l,`jj nuKH&x-e .NfÇxdX,i(lll`kk r0::iyLMM teݻwrqttME 4z" 2̵j1l6,x^wRnh4q锓uB uppɅ>Oغ066&sȇSSS6(J`@@Cߏ@ uʆuJ%lmmi=b^ "ݻw^###zr2ީ@ 4M9oݺtתJZޞLT?# D5>>ILLLvcbbpV* "/ jnij㉉ ѥjE~;;;8;;\.sռv1>>WAt9TJY5B!d2bisaX:*pvv]D`ggHx099q199+Z#' vww8tj8<t:]_ tZ3Wݻ2։zs2]WB`PPH {HW~5ݻwp\=[}/nHDH$l6'&&x4t݇_u"Q=ppp 'Á;w`ttT9۷qm&Lj)xx\'q?b1TUucccL2h_ѧ9oo߾i(8N 09Ѡ2ɤ̻@Hc>_122"'ACDV+9h(|N'ţScX<CW<E\qB^-x_ƪ{CCC{L&H$DBx/kcH$t::  yݎAXVl6XV&kVT '''Hӈ8>>G2lzT*iԌ1Pf6;Jrx<. b`NpcbԡKOOOSm{Vr̷'''e@}N '6 *wQ'rş &I;.ȃ91 w8\ztZt:D"D"!jY´xdaT*YT;5 VCkq"PyVqf2 NNNdN?5&Iby^4ƼPnl  ɻUYG4=ܘ-KM&f3, F#ɑQXb(知RIHq^,Lkw4Àk{ phyppa]k2ت/c՚jhbl`f`PHK}R \\rtZW$PT* FbYc Pq||YAp^Z999APloG=QEoo r"֜o5?;vv\.krSWY-*|RMb(?&QyQ^PVZTjj¥V 6GDmScz/6 Q6 DDDDDDDDDDDԄ """""""""""j$=DGIENDB`nipype-0.9.2/doc/users/images/smoothrealignconnected.png000066400000000000000000000265721227300005300234510ustar00rootroot00000000000000PNG  IHDR !bKGD IDATxy\I%-"hTiQh%3CD,dk0vg1M22CcDE *&ER*ݾzNV|<<=3y|Ec B B!m!AIBԠG!DjP#"5dkBy$jaaamȂqiر ;B}Q̞=H'`ҳo8Dv aees4$%zG!DjP#"5(B!H JzB%=B!R!AI +..ƾ}0fطoFEErrrpssCBBP???C ضmTTT`dd333  "mֆWOHtB1e̙3PUU}Wƈ#p9hiiwѣGcϞ=uιl2࣏>BXXtttpm8;;rrr@kkk/-)H0"!Ϡ@.*1`L6 {***ee=`|*mݺKx֭ZtMt6@\~D兔u.]dd~eΝ~5Yoppp-B:3JzHHHH455ajj`ccchjj"44Em\аEmҙP#DBn޼ }}&!55 ""FI ˃R唔BjGrѣGDD!bnn4YÇ033:&++디k׮-iGI ;w.qܾ}9993g~!''zYYYёXH+JzHȂ 0l0lݺ2… O:iii̬ɓ'akk+x F999DFF޽{Xt) ddff"22‹!mٲZZZ6m8x N8wwۮSMBH] mmm$%%SN޴\MM:틔L0sE>}KKK}:ѣaii?| Μ9#6_|@888ѱVVVxBgooQÇŒzcǎ? ,, gni(3f ;0ap5ddd`ԨQO?I.]@WWaaaNBfI=B!3VӧG||Vݽ{w[tc׻KKں|BZ_ d9~8 ޽W`^9:::x)oBi;mmƏ7"33k׮m&E{au޻w>s̘18tm۰zjXXX֭[Mcx{{fII V\+VZZZݻww ;;;6fZZ\]]퍏? BYYΟ?/ , 0|ptÇGJJ wqqqX|9.\X[[CAA&MBQQ.\sss(((`ĉxm!Foر/^O?3f`ʔ)mt1P\\\g1f055".] 8pp #쌤-++_???<|6l˗,|@||<`!$$^^^ šChAGGG\xzW_}3g8z(0tP=zqqqXn&N;w@VV=ttt0zh={=aooѣpuu/6m$wB쾢,%K 77\rPTTƎcرB弼U{VWWo'''cbmmmU;t"7,]OO;wȑ# Bk{.bccT wqq2w}&&&puu V\ݻw͛7:z!/_ݻsɿ֭*@/!M ,ʕ+qɶl#GoO>111(((J\ժ1X;)4z4jaa!N<:2"=,6JKKԓ㎩@駘={6 xzzbĉB^yzYn@n6$44q5;C|#I4s ވS&;;zzz8|0'&&B|}}h"L4 - wErr2*++YgGs?~| a``ooFWت5`m#>"/j*>0m4,XnzLУGܹs1a2PXXeeexyyǃD8T...ͅLLLk.۷ʈƱc₍7BMMY[@CCM!I***~VSQQApp0[F}gXj={=6~xx^x5044^kl_(++Cdd$Zrss[?`…8w ֍ ϟ !=Hҫ^ٽ<x =fii;vH&U\j'Y_Ç3VZÇsˉUOgڵkAdd$yh\\233Q^^[}ɨ:jٳgqFl߾Gtt4wYjfffpqq @ fTylق 6pnKqq1iG! VEDD?`rrrlӦM,((1`666,..N݊ 6~xb1777l̙ ҥ [nKMM޿}6֭c7ndŌ1Ν;nj*>y7nP޽{cӑt-Ӣ]B|||CϏtVZl(hvb۬>۬N-ڣR̍H7nƍ6QPO)Vtf#JJJ 8;;x"-6m222tRY_YY OOO"77W\#G`jjh?Fhh(~gܹsG轒 HOO&tuut:u ꫯpUɓB+4?ynnn2CBBpY#55044͛ֆ4wZZݻbMHISH[|1ۺu+LMMsrrbGf1ciiiL 0???>>>ܜ{mii,,,Ai,66zjfdd(6 YXXKLLdk֬a7wQ=6l;tw,22SOĽ%nSSS+cEEElʔ)bǩ¦M&4qQ#|=뼷j*6h ccܹS/ܩS] i(&NȽ622brrrJ@ׯfoo/V| SSSceeeܱ%j ]k}E[^^EDDpŎsѢEƆ˳˗/Ujt{t(]vj[BPP "HC|DD駟b޼yXr%VZ7oҖCDlH kk&5"&&SE6l233e#Jݫ1D3gq l߾6lhV{V?;wȨa)EIt}K(Q◓%[5_~JzDl4et}K(Qⷲ_x]7OE[S}XAA!)F==ҡToTQM3`bbx :-󅶄F4%ThNNNPWWGee%###|駐yK(Qą cǎaP]xxIyb-ֆ%n|嗐w{+q<6~&DdGMҖ>c())ҥKҥ !Nzzvi˖-@jj*%<"1!/ooo@__pH'B7 !۷1rH,^;P#o޼%444p} Z.`ʔ)`!""iL»"L>Ϟ=˗ѧOC"%=Baoo4\| ;$҉Q#&77ߐ4:Jz^_2e ˑND@BH;{,JxMQ#2vvvE޽HIiׯ_Dzep};v |D!ݻwpuuň#ׯS#!Uؽ{7JKK?b|F%=BD#00w˗/b C]]G,Xx16o mmmC#C NB-77+q%hii/IJe˨gG%BĒ󈋋C׮]1}tDFFbҤI;DBD==BHp-\z ѯ_?L<'O-uw!^ƒݻHMMQQQ]]]5 VVV!,䊋3ӧӧOS.C @[[zzzׇ1`jj 555/GHL.ɓ'gϐ7o塡~AKK  }}}BAA+"uQ#í[̪%4MMMhkksݯ_?hhho߾<_ !GH;R\\k׮!!!IIIHKKãG ##p=?!<׹)ɨ1rH 6 fff011FP#III AXXɓ'cҤI5j !P#! FFF1w\̞=|GTYieƉ' ++ŋc055;4Bi%Ϟ=Ν;qQ ,2ߡ"Gboлwo|Xhde׍o!!+Vٳgؾ};֬Yyyy" aڵӱ~zJx3#?~9sΝ;8q͛wHPO9޿JxsiPL2vvvr >#C"4!pA̛7֭?LA3=B777|Xr%B@ Kb|C$DD!!!Xr%C zzʕ+7n֮]ݻw!(҄ >8u!B!(-- ^~WBUU!-@7 i)IP#8t9A!D&!(,,Đ!C`hh3g!DBh!شi޼yP!D==BjIII%!իaeeŽ!50W_ᣏ>BRR&-- xߡ 3<<YYY!?.]]v1ipsscX\\ߡ  :F==B+??nnnXl$Gڵ eeeعs'ߡBZ %=Bx~-6oތ^z!P#ݻѳgOXP!Vd!Rٳgnݺ!QOH}ҥKʨGZNN9|Cie#RzS{{{))) )LLLPRR"TҥK@ 4nB::JzDj{}V\<{{{F/// >p ==BƎ @XXfϞݦqQ#Rѣ(//ʕ+yi_EE,tO>mmm߿Bh=!@IHJŋw8:~8 ޽V=__BE5D*EGG#33W;?7nDff&֮]w8t T:vƍ}}}CiԎ;`ii~ Ν;B:b9dee ,Y|DHFIH0t3g;999 !D;v 9EdƉ'0}t!â*YYYHHH6k.VZw(tXTFϞ=1vXCiPEEj***F~~>a)P#R%** 'OF.] 4U֭[!''`nݺ˫EP#R"55^+z%nܸQg}5kŋ|ÇgBBB SSCSSXdI۪5а!R`ӦMlС" bO\\ȟǏG}0˅ʼ|2LGG>|}ڵ`LJ0l׮]\,.^5kSSScbN*#˾0Ԕz1XQQ2e +((`٭[XϞ=YNNN?Q}3kk&YYY޽{ s#9  yyyPWW^JJJ+D}wŪӿܸq6m/6l؀SN!44{v  X|9`ҥpwwGEE֭[j`+nݺŵ UUU;v@DD<==E-}}}lذ|r5ݻw3g[npqq2lll&&&puu{>֒'ҢJJJkx$ EEECrRRR`aadaÆvh풆F)++oł _ʕ+=z4ѷo_5ٳ'k cQQQub׿%vPѣGuO?ٳ_'Nzu\]]qMLZ*,WXX}F d!R㯿7NLL6 O"''HܻwK.ª%K 33uYnٲZZZ6m8x N8wwۮSMIGB׮]!##S9@ 9sdҥ |}}qHHHnez윒o߾]zKc`A]] PPPԩS~z?~qqqի6oތ|١w\ڵk w}vXDDWWKK𓖖kkk|Xl6o̍n_8q"͛]vI3iMHJJNʭigg###\ru)))0aΝ>}@GGx 皵EDDm~zôE ޿dee띋&''&L*++:UsuAZJZO3OzzÇRVV&G܉ VVVͪ[VVY^9_|@888ѱ1"M(N-..>>>MҬ͛ԢX n7 99=g.]5)XU?=&N1<55`̙uFa}7ƍjjjnv;mӧs NA /nK>]:>߿1`Jz쌈yIIIضmF Pu*<</^ǏPDDDϟGGGܹs&L츥BBBbё>(ݻ?3f̀q"UxPCHQPPΞ=\\\~*--eLOOWXX<<>C9bzzHĻwp\v uVܿ&)) aaaxիWqܾ}7oʝ8q^Ϟ=ãGaǏ"##1h c߾}?~<֮]ۦ!핀վOApp0ϟ[W6mž={I!ߧ@ @PP͛!AIBԠG!DjP#"5(B!H NH#Y3;>i!&B:.KKKߟ{MIBԠgzB%=B!R!AIB^MT5A}IENDB`nipype-0.9.2/doc/users/images/smoothrealignunconnected.png000066400000000000000000000107731227300005300240100ustar00rootroot00000000000000PNG  IHDR=ǃbKGDIDATx{L OWEE Z*>Hj Tmj)hA-JJ}`wVZ@Etp~̏aY\'s;3{8ܙE "c1ƚ̨c82c2q2e1ddcɔ1ɸn 1*AAAHOOov9~ӿW+?ZdPquDGGy[gpp^om=߆m=d@)N> ppphcσ}_+~yΔ1)c1&'ScL&N1ƘLLc182c2q2e1ddcɔ1)c1&'ScL&N1ƘLLc182c2q2e1ddyyk1X=d% 00 PTطo\\\  v+ƍAډW!"lܸ 3 hosy֭[#Gbƌ=6ٳgHLLĘ1c ___xxx 11Ϟ=ɓ!LLL3gHl۶ J@ll,A0l0BaÆ۷ mom=~O< q|GRD~0a:t,--EZZfΜpwwo~6TJOO[Ǐ ;VR^RRBZJk7oop_N"m֯_O ɓ'rJ@wmp;-ʕ+DDhФIZyTͫ)瓈BTVV&Qhh(۷%zv @/… BS-Yokk ۷!Rrr20p@%wETT@X1 SLA߾}B!S(HKKC燪*qim@m,,,'kZS[ߎ;ڵk'`ڵ0`6nجY[[C3gh pz3ݵk`ܹxXnll\=#̙cǎa033áR={Eg!CgϞطojf4ڪ֎_]?˗/cZ0uT`Ȑ!/%5 "##K .]zm/^`ɒ%Xx1!lllׯ### k0~xXZZbx> - :~^E#\]J%, Քk;277x)--4@}zmذ/%$$PYYm޼І  k׮/eӧO'kkkɜRCw<}tڸq#B#Rtq@666D.]ͩsT\\,4DSg׮]ݩ[n2]Ӻu^^u[3٦oi4tyyyHSDDteA2@kRInnn:$!{{z{ҥDDտsAx)߿=EFFRnn.-[P||}o1[:k`$aʊzIZM FZM~!5j;7:f͚%͛7%m]vPHHHJ^^^j.SSS_gggKګ_D$%rL)X2]DRփ9b kkk1b_ꆅlРAb#"Zn'MDfffZ}wL|}}oB!O'''Oh R-6aggdcɒ%--I믾 gϞΞ=ѣG;gZFΝ;PM[$f`gg'iѣG]O:uW^}1c ,ZEEE򒬯_y/JʛrLEP>}*#&_k/5 gϞŨQhVѣGzByy9/^,.={odHdeeӧؾ};EzQXX(ի맵]W - 9~[`߾}ػw1bT*PTT5RW7`]Q}?HKn|jhƨv„ ">>mu ^mC~m7obذa=[ jwF.]'Nhչs푒".'N@nn^rr2͛oooMׯܹsh4Gddv - 9~[Vd!,,L}m۶MrP r֭m666qV իW5: ;;iii@xx8RSSu֯-L3{lܹsW^Yի(..ƬY${Fqq Ǜ%>666رc4 ̙!SNvևL"aݺutgC,_QQQCtt46l؀PZ r-|R򳆕كǏk"""kmjO燂\rER~ 7 ZǭV7BCCqa#Gtn{i!88I}9swELL:psse6i$\|nw^6xTm!~u2e pm,ZH%%%عs|͸>`˖-ptt%}lGC```26Lj5"#P*Xvܮ_DXX+`mm>H,G6zѰEdd8''UUUK= ףG\xEEEe뚲ϣ6 ˗/{q<|cDEEO>Ad}111Aff&h"W ,@~~>233擢`cc???;wN,Wزe vލիW컬Lo(rKnNHH3~W).\0_ XW lj')^v>>4fJLLgϞܮ-[Fԭ[7%GGGZrB}8@~~~iժUtFɓ4m4@ڵ?.\ z*YXX 4d255Pjj;v988Б#GhԾ}{~VXAsδsNz9jJMM;p-h233#+kz ҥ ۷j ~G7%9~ј"1)4-r糭$Ӗf绲"""(77WRvI[Æz>_a1C4}tJ.Ʀ`m0CS3S!X]pÎ;333T*lڴI2ۜ8-STTpfӦM(((hQ1,_NNN8q"T*<}16_o>} )) III=  AO?Xmecɔ1)c1&'ScL&N1ƘLLc182c2q2e1ddcɔ1)c1&WɆ:ܖ^xiEϔ^xR_j}1ƚ^eL_fc1DϙSeB"""""R+R """"c!ODDDDX!DDDDDj<Ғ:5N=z+WADD={6ADc!Ojݺu?~Oqss:5>>"<-Z+-B ˗+-}}}M[SScĉh׮]曘0aBEDDc!ODݻprrp#((չsjkggJ"""U,䉈k׮SNUgccHDDD<Q}}*GZZZ$""B122BNNNeggظQ}`!OD8;;#..ݻnݺtBIIICGG3ѫc!ODL8uVܺu ?￯ҽ]vxq=xVVVuj<Q3yd>>>b ?T>rHܸq厷o>1NQ'"jbqb̙T@KK˗/y\tI=336m®]t 8y<~'"j,--qEt#GĈ#0b5 8wˌ׶m[DEE'Nꊇ"22mڴ)w:u* 22_|s15!""uԢE |/j46mڄM6hqaܸq5j[䉈 y"""""5BH '""""RC,䉈ZCDBTTlllA襤QDp<y"jzpc5zRG F[䉈 y"""""5BH '""""RC,䉈 y"""""5BDQQ"""{n5 ,N 2 r˖-CTTLGGG竌bРAdpqqAppp;s }]e~:z-:t癜@L0eͭKO_;O>ҥK憽{xB,XGΝ駟(U QsBꄗ}/z & ,, pM,[Le``?^;vxzz""".\ݻwѷo_=۶myjjjHKK+_QC_;fffXfM_~=.]͛7c֭xZU5', T,--a={VVVVe˖^ oЩSW*BcjjZ#""ʴ ٳ&&&.kUmCDԜsN)S쮥ՊWeeeGI6޽{RG "R;,xxx`ňǢE7n7,/66cǎŘ1c͛7/))e˰rJ̛7...~zLHHڵk˝g~~>̙Yf2 pssCLL BCC1i$x{{􄾾>F y ,\k׮E^`kkB=zӦMɓ={BGG={DTTr9'?DTTݡC"''aaapvv. *AEEEa…:u*Ξ=_ RWNU 55| fϞ/>>>m>{7*"U.}@DQ9ٳk<1rr !Daapuuđ#G|^)_II~2d044OVLB!nܸ!d2غur gggsWWWR<@>}ZؔB!L&S#;;[ F,[LDFF bڵ.HIIB#.233! aque+LLLǏErr bժU1bX~HKK7nĪU^!ڨ6S* GNNNbٲebʔ)_~BT'^^ѣؼyہ* JMT'wyu"455ƢW^bڵR#BU_BܻwO sssٳ2;򦦦W^BWWWhhhoV<~̰s*ݺt,J!0rHSfɈ#]n###1ds{{{|^RR" 3@xyyUr\*;vL%СCUW~pÆ S>ONN.3nff  L y!^C~کNNBe:+*ZNo߾S̛7O 2DMMM!ɄL&C nݒ:&5OT9kkkaɘ3gנӧ>%6+"b$ႂ2ikk+rrr*1~xL6 +V!CT+UP0a.\k׮t/}*Nv0@GG@'ik:wލ]{\^5QѲ:z tY尡Lɓ'ѵkWl޼s̑0)7$ ooo;v v»]䄵kbȐ!P&11tZ~~~8u6n܈_wylK,… q%X|+Mxqᅬ۷c׮]_V8|6m8"U6^S䏍ŀ^%nNԅ!>3|G9s&BBBg!##K,:$???DFFbܹxw0yd\~]uqm!T ! b˖-~Z`?___8::bժUzرcرc,Xŋ3f(wxɪvvv|**op8__Tv/]r3fܹsﯼ%h!.\SN#>>EEE?D)S/_ 6رci7M1~ ݺu ۷oGaa!l˗/D.],77Kx*TíQ JSNuXZZZ2e """7xZ fK򖵢NML&?ݻL$[jjz՚P1vX@hkkbݢw4h/3nqqQ>*'gΜ 455.oݺ%Zh!ŋEnnB]888\.:u$o߮2݀ahh(lmmѣGO?$Đ!C… abb"v!Dffؾ}hٲ ,Y"=z+YYYbhժ bѢE022TNU|6suub̘1⫯*O.Ə/\]]իEIIiV\)֭[C4tR@PX޽[9\RsU׭[WGک*B9rD8:: \.v*N<)ĪU}ě7o (3ܹS$''WMusW>P5|7nppp(s5"jdBH5www8::O(dpssCxxQTQQ/_ѣG+ܹs/L#G2̿)φ$k^M>wڅ?Qa#5"ƍCddʮ~---XXXZd5׎$ٚ 77:1^FJJ DEEaݺuU^ʰ +zSϗ׾}{XYYA.#""}ԑq==BDM y"j УGc5z5>%%BiӦTX5a›o)u"c,䉈$A&Y"jx;QB'jE:~(CWWCENN ]]] 2*9*Z*3aݺuHJJBTT}]l۶v),,ѣG1m4L<aaaٳ'tttгgODEEU+[WVUTZlynܸ bڵիlmmlرc1tP鴉Dpssgϖ:F777cPLHH>>>prr˖-SLBcaff&!7n2LlݺU9 gggsWWWR&ȑ# ӧ(s޼y^Q&)瑝-acc#-[&"##vZ!"00PVVV_UV bĈb"--MlܸQVRfl٫eJff?aaa!6l _.l"tuuxqm_zlgU&W899!999bnoWWWakk+~T~~hݺ[KDꇅ<<5ϼ<@8887w\akkҭK."[!N#G:::e2W̎1BH 2D^hkk+*<{L^^^e=l0d@ :T-33S~jZȗtF!po}e뵺>M1ۤ*[\.BCCϏ;Vf SSSabb"OBSSStQW{D~xI"R;:::++2:5k9ϗ-[ƾ}ҿ22x򹥥1Պ񴵵LMM999el2(T5P3 pB\v @m_z:vHUU<1m4XC t^ikkٳg>l\O?<)c!ODMJbb"_p~~~8u6n܈_wy.Y .ĥKУGUNJ/+))Q_eoHmڴm_^w܉۷oǮ]_/4Zhm۶}عs'>DEEԩSǴij y"jRqm!T ! b˖-~wx9|}}UV˫N]]Y8jjn8vv؁ `055Ō3j][[}Edd$>|Xhk4-"R?FDMnJJJ ҥK%KȨV""5#95Ԑj>ܹs-֭[.\.:u۷oW 8z駟2d8Xp LLLĎ;D^^۷o-[Ē%KģG\bT"++K,^XZAAA"33S,ZHFFF"99Y\R[iiibҥ"--M9ؽ{())t٫A! }}}ѵkWqʕGWWW1}t1~x*V^VY// ke֭[n IEU:U3fꫯ}?ԩS6++댈ԛL!g www8::O(dpssCxxQ,**1zhVբ";wNyC@&aȑ8|Qҟyyyhٲ%p=IrXIq!55WVv҂%LV7s] ,<<%%% xf<+FJJ DEEaݺuRU0*aBCCB2'QǫcǎaX`1l0DEE~Çxb@dd$֭[S5}Ϟ=CPP [[[#Qy"WԥKf}\xalذA(ʢEPTT[[:DDDj&33wN8rC*"j>X!vI$OR#u ""aÔw%扅<ԩSHII:/`߾}044DFFqHb,䉈={0i$hii!%%FFFRG""DDD\XX!pUD'5j'OСC!޽{ u$"j$E[oK,/u$"jDX|J IDAT52BXӦM}V^-u,"jdx+Q#3f $$:t(=*u,"jX5W^ĉW^pԱ5DDDB`ӧrrr={'J'""Ǐ1j(oC׮]Ct.]:5r,䉈$PRR-[111۷/.^www\|YxDX5k׮O>Xp!fϞ SSS9sƍÙ3gGDj<QIII={B[['N;p%| :"ٕeggcXnr9lق7|C A~~>n݊3gJ Փ"x7zj,XCbb"Lϳ'Zy"":~ 7n_YfaÀΝ;#""[:.)n'""#)))aee%KFll,6oތ˘={6bccY+y""Wt ΝO?[F^^ǏGaäLDML!ADDnrss[ܹsŜ9s0m4mۆ#77#G/\.qr"j*X5O";;[Ғŝx"v؁]v!//ƍ̙3'@tt4ƍ{wVx"s,䉈^ӧOΝ;GRR,P[E! mmmA__-[1Znv֭ZlYH[w^!..>}:LSSSp Çh߾=Ǝ'յ8.\g}(v"jXlذ{Ell,+++ 0|<==YmoFxx8q $$$ %%yyya`ffN:[npss' $LpbccqIɁ F#G[ne)**·~ $%%ASSÇ֭[ann.RQsB-[111())A˖-ӧcرRǫsǩSw"11ʭrxo_~T- 8y$N:?033 #F̬qqF\|EEE055Ŕ)S ==^""DLݿ99|㭷W_}I8y$.^۷oѣGHOOGQQ9LW^h\,((իWq9\px1` 4 ${"::ӧ|||!R y"jV"##1g\v 2 Xb֨=}ǏDž p iӦ x ١GpssM+..\z/_qe}􁫫+뇮]BSS4ŋx)hѢ駟bĉ,DDBSNa>0zhFAգ8L̙3|2޽$deeKjhZ%žj4tܹs׮]իWpqqQm۶-3˗/رc@LL ?~\򼈹s'&-::&M۷!1sL_RGkRRRpY\xnS@YP^3_OOOyN}}}hiiA\ 99ϟ?oahhVZ!)) %%%x!=zD ==YYY(((P ТE aBY`!O$-- Bvv6 dgg+oNZZrbddd(!33S\1ԢE NqoiV>ۭ+C&3444KO15,7Ǐ&&Mȓ'Op1?7oDbb"Ґ|e-ɔ+KP^[F6mо}{XYY{:t(_DX7S999xyGjj*?gϞ)oBE\\q]޽{wRRR(`:t+++o:tPnj۶-^{5>@yyBu&[ᏏP(yQxPePPU(|T(*ϫ:+55U>166V5Q100n⼢I|{bbbp ͛sӡKKKtIP/ڡCD/9<l|GX~Iե 2yE爼exQP(]+~|75 ,IAAn޼+Wիz*n߾trlll#ut"e̛7::: ň#DDDԠXׁbܼy|2^7o޽;GGGX[[CKKKDjkԩ DpU""f|-ܹs8w"##qdff{F=ݻDu}ŋ1h 8q'RQBrssqY իW]o߾ӧ:HɁ#pBlذAHDDDb!_!\cǎ!,, ΝCAAwOOOxzzo߾ޤNJJ 표u?:X:u ÇCt [o8\" SND`` DDD(4B>77qQdeeF ;;;#5k)))ܹ3/wޑ:Q b8q{o< :.~mmVDE|.]wߕ:Ql h (()))pssĉ1~x2C O>EPPƍ'u$""FI_0CHIII駟XUIqqq?,;k֬AFDUׯn߾ ӦM:Qդ k׮Cpp0:t?ӧOM̙3qy|:Q$?{,V^]vŒ%K0n8hjjJ) ӧO+Ο?/u""FOm~yxzz8|0\ &'R#.]̙3aff3gHH-{aطo wwwcQ-ЪU+#5:666޽J7:FXj~tǘ1cED`ҤI̞=[4DDDٳMmٻw/clܸ7odOn߾!|QcիWK>\_nnn~6B>!!#GĤI0vXܽ{śZL r̞=2 ?F[ !Ǚ3g?HhDTjQϞ=èQ -BtttH=@N(DDDj>8{,z%u$"c%zHH4--s BϞ=qDM &HHm5-裏kbdR"zuVSDDD@B>-- cǎիWq1xxxHѳgCCCG8DDDjKCk}EEE@=CհsN)S쮸FQӠ)))a``-Z4l $$111RG:\gHH\RqpK.uꓖCCCL>ϟ?:ȯY+_~[C̶Qٳg&M>K,:>k=ndd$r9Yȫ!kkkaɘ3g'u$"f޷YQ#7x`cc#q-???X[[cܹRG!"jꭐONNѣѻwo(((+W`aammmiFHTb سg222EDDK!_\\> U^o7ozzzh۶-d2ԑdff5Ç^2WWW|7*U/Ξ=P,HDGG077:cԩ^tR\r{ĉ3f(sWWXx1<<<2.7ƪUuVt޽'ODj(::HJJq{xt޽;>Jɓ'+UCn7NZQsUt 066:QPg|AAƏ;b˖-u5j d2r9-[( 2 W?ĠA Nݺu vvvȀq:+}||`I兀@߾}닞={b„  ℻e˖7`l߾7lp&.99C^^ DDD$I!bݺuشi:uTE``` `ii 69qC++HIԼ:w-- SLѣ1}Tov 2eʥШ5;1110229%cGDDUkΝl۶.+,^WƢEuV#5ywEΝB ߗ:5Tػw/=VZUz7ɓضmƌÇKI>:vao~qq 6 x+""u!> ӦMÐ!C2Sž={Э[7L>7oބԱ^VPPWyzBB`ҤIh׮dY۷q!cQ#PB~ҥ(,,ڵk2O&O9s`߾}RG"j߿?bݺusRd2YfMD`Ϟ=,@- Ν;vk7;]vwߕ:QdrL2W\Q\\ Bӧ'%""R?5.䋊'`U»Cdd$Ν+u&IqYfASS%%%()))3ܣG:ګ~~~^()*bϞ=Ȑ"QQFJJJPXXXp,䉈jF|ff&|||0o3{g{LݻWy,NS^=.[B~ӦM033äI^yT}Wƚ5kpܸqIIIXh,u ݺu вeKL2.\P !11{q!9rGÇquGׯ֬Y~L?FHH 88^^^^sܺu wwwDGG#&&Ϟ=Ø1cuy322вeK_P) 5:Ԅ5V&ZX~=.]͛7c֭=5//kUmC__Y_ZEˊm](cضm`РA8}4N>ׯcΝx W_=T2וvuT=^=^Z[e!|^yꞟ\\\йsg>{3ncg$NxaѰB``ʹ&FFF Dǎ;Gnhh000p;votZ-JT yС"""о}{᫄)+QRSܱ6qAe4KKKDDDʊH޽{RGh6?h|Ǻwg}Çc3Po!)V iӦE<3ŇÇdZ5gРA8z(<<<.]}Gll,<==T7n  EFFnܸ?(,,Ĕ)SdܸqGϞ=M޽{ann'' qpp90}ZuW;;Zϧ*/_^{ 8p ܹSoSTTvލT̘1ްV^+66!ݻw1sL̟?_9~II VX---<ΝO?TkgK~~>.\|ܼy氶qM믐dXt)>3? ¾}VU 55K,oj?N*Tjs:6nHypL|%l싹6zGbCHl/IcnF1IP߹ѹNǣǹu\:s:q<~iiiϟ1h[UrUn޽{aڴiXjv)~_W֫*+Me+ 44())۷rJZ9L6 Ν;gP_o]Ĝ'MF]>:99f1縦ԵՅf}% Μ9Cn޼i5Q233 @[dNy233Hz:yyyi&@;wuу(""аa(**(&&PDD}VVO?$,%JOOy=4|p}tꫯEEEMѣGhСT\\Lǎ#ԱcG .К5ǩ)''y"֭[u.O[[[dZׯ)-S/r:ڇf̘!z}y'NU|)33K@nnnJ'N}QNNKt1""x"I$Z~(rww>{zzR^bӉ'H*,ٳgD"QZZJ R)RJJ }"##ɓ'A+V%&&ָVU)IS\.V\r_>J[/--%WWWZfÃtq+ >k75oIPbN9.]|}aߚ62e yyy |ll,?\^>200 ԧOʪ>+:GC Q;v죏>"T+Wy=uyooosaaa8^dbbBv>'%%)jH/^Xi5iHQgȐ!dll7$ۋʣ .4ʗ999 3ܩȑ#@bb"r]_~P{Zq窡]Lܺ/ڵkGU[n``3gDզWVb,YDiUPQQQ8ߏr\3-ި:jEujҎf=kږќ*;YYYy&;R)كYf)/>** 666-,SUO6O<Ѹ^xx8*'NӧO1o׾}{?صk)+x8XaT ӧnZZ T,s St&Oꊱ1q5|(..{!&Ot$&&V;bĈ8w˗/G\\BBB[˪Ǭ͑bz@HHLLL0e̚5 ֭?NJΞ=Ǐ#117n:u U<8q"8ya…FRRUT:]M8OFaa!bbbT7Ӣ*FLڗ樨ׯ|q6o,t@5zUel  -¸q?MP z*aaa3gzXbi:B[>گN9)}]ꑾ5믪'c222Ǐ ۪4Y)EקАP۶mwޡ޽{O^^^-X-۷($$ڵh֭s""&???:t(}4i$uVĬzjݻӉ'(88XHömۨXM6sNa[OOOZp!U>IrFEW|7yd3f yzzҒ%K#O]U=z"##I&СCiСԿZls.77>SJdggGDsSeΝ4b@FFFӑ#GtHYki޼yZkÍ7h֬YiҥΡCŅLLLk׮ƍuH 69y{{ә3g(((5m޼v_7ajkG~РA4uTA_***(88RRRԪUSKmX4k>iiiZ,X_JĪ_|"¡CիWWg^^<qqqKc-s.{(,,Tkdd;]{{sUqqqJ0GeGdjhDӼUEEEܹs8}4Fh444FN1űqƭ[:#-- ظq#d2LMMKXOxضm`޽*iccLV`YƼy)))Xt)2338* :#))  @`` TٳG%125evGɓ'>ڷo͛7ynfW_Ett k:u۷o7t-Fnݰ15evG^1 Yշ6LDFF7Pۮ]0~xʕ+4h,,,{}vHKK?/^rb\v |FXBeҥ ݻ'1ӟjw۴iqTSQQ _7oP97a۶mJؾ};r9v؁+V`ڵ/0n87[nE=WUSwؾ};K. ,5,322u}???uX5icLyC `1y@K<-Z>} <<&Omr9lll0p@ x +Ϙ]t^333q1k>TZӾ}{dggw,*8csա?666~{d߾}ӧ :t Abgaa^{ ;}]$''C&;c~(1-mۆ7tF@eGK.ȨP;w@&5m5u={?lذA_L~1O~z}(YYYʕ+ "DgϞ8vLOx1yLӧOO\l޼YiyLL ߿@Q14aՆUTT 66b1ƚy///O5J*UOb;4S~ $$&&&2e f͚uxXc\~HMMUyS ##KDc5*;]v~GPRR 6ٳ/ׯ_Ghh(_EBBJJJ8s ۇ˗/czqqqWϿ nܹs}lٲEA=GGGbٲeœ9s5?kJ9'''deeظ/]1Oî0ydDFFb…044ϘT9D1ƴqqqӧZEn@CIIIj(CCC888 !!EEEվq* Bykl4vͱj*"--bb5#y&NZmD"޽{0Be666dj?}41Q}KݻHMMȑ#j͗ >|&M„ pQ[055[oTQy\5mW-O45Mx"޽{Qk>3X5$ҽ{k׮Aw 33_1c:4i$aߧNՖ|(44&NH}%"z饗رcDDtEH$~zaQQQ.|^z) >\\^^N'N TJ@s,(HcRBB TJB~)H""ˣM6ܹ3[=z*:nJhɒ%ah_~*"OneggeeeHLLM#8p@RXX+W )) OЩSzB*gϞzgbb"F'OZ/++\\\pE>;v FFFt@.#%%8z(]X֡Cm7D">>JkjM033Sjo:u4Z]ՙ{lllTxQY[[C–B]j!((ϟ952|SGv3x`3&MBxxNRqnIDAT#ԇ0###C xWv|XSվ Abb"L_HNNFϞ=Dgx!nܸ8u:u{{{/˕nL8x9Ѻuk8;;c000PPA1EgjQΪvZO[о}{?8{,|}}:iii(,,5>|0̝;ݺu9_~eVe[.O淪bƌ066T*Gt>3sRyy9?244m?O\#99$) H.xV7nܠYf266K S*:t\\\ĄvJJoܸZnMtaڰa79sY[[͛ɓ'T\\LԦM@tmadjjZ-7ovю;̙Cm۶w^ˣ }QQQ]vQQQmݺ?1bc "Zz5YXXP~]ӓ&OLcƌ!OOOZdJ4zh媩._\g.Om*J.ӨQh3QRcL?\ؿ?~gѣN:U@RL: *cРA8x&cGVV:wSN_~ Ξ>}0]էO_~^HtJ:c@˭z~RW/2f̘3fΝ瑖={@. ?ykvvvб>|Cvv06u(--U۷۹(,,Ē%KeFFFرcObHcUULLLзo_WXS@~~>򐗗\ְ ѹsgܼy#GT:aeeU_Icpiiiƍ!`jjT,]QMYS%%%ZQL1XݫʃILW...0aㅋ7|#\1CRR>3ɓ'ڵ+F={p6gو`ҥ3f!1ӟ˱uV/L4#b4ݺuk_}UDGG:cj;v@nݰf͚1czyyyUV  cv܉Fٳ/bΝ  cdV[^O2Ƙ'c1ڌ3M?yc1ƚ&?F1c;1c5Aܑg1c `__ `tar.gz `__] Development: [`zip `__ `tar.gz `__] `Prior downloads `_ To check out the latest development version:: git clone git://github.com/nipy/nipype.git Install ------- The installation process is similar to other Python packages. If you already have a Python environment setup that has the dependencies listed below, you can do:: easy_install nipype or:: pip install nipype Debian and Ubuntu ~~~~~~~~~~~~~~~~~ Add the `NeuroDebian `_ repository and install the ``python-nipype`` package using ``apt-get`` or your favorite package manager. Mac OS X ~~~~~~~~ The easiest way to get nipype running on Mac OS X is to install Anaconda_ or Canopy_ and then add nibabel and nipype by executing:: easy_install nibabel easy_install nipype From source ~~~~~~~~~~~ If you downloaded the source distribution named something like ``nipype-x.y.tar.gz``, then unpack the tarball, change into the ``nipype-x.y`` directory and install nipype using:: python setup.py install **Note:** Depending on permissions you may need to use ``sudo``. Testing the install ------------------- The best way to test the install is to run the test suite. If you have nose_ installed, then do the following:: python -c "import nipype; nipype.test()" you can also test with nosetests:: nosetests --with-doctest /software/nipy-repo/masternipype/nipype --exclude=external --exclude=testing All tests should pass (unless you're missing a dependency). If SUBJECTS_DIR variable is not set some FreeSurfer related tests will fail. If any tests fail, please report them on our `bug tracker `_. On Debian systems, set the following environment variable before running tests:: export MATLABCMD=$pathtomatlabdir/bin/$platform/MATLAB where, $pathtomatlabdir is the path to your matlab installation and $platform is the directory referring to x86 or x64 installations (typically glnxa64 on 64-bit installations). Avoiding any MATLAB calls from testing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ On unix systems, set an empty environment variable:: export NIPYPE_NO_MATLAB= This will skip any tests that require matlab. Dependencies ------------ Below is a list of required dependencies, along with additional software recommendations. Must Have ~~~~~~~~~ Python_ 2.6 - 2.7 Nibabel_ 1.0 - 1.4 Neuroimaging file i/o library NetworkX_ 1.0 - 1.8 Python package for working with complex networks. NumPy_ 1.3 - 1.7 SciPy_ 0.7 - 0.12 Numpy and Scipy are high-level, optimized scientific computing libraries. Enthought_ Traits_ 4.0.0 - 4.3.0 Dateutil 1.5 - .. note:: Full distributions such as Anaconda_ or Canopy_ provide the above packages, except Nibabel_. Strong Recommendations ~~~~~~~~~~~~~~~~~~~~~~ IPython_ 0.10.2 - 1.0.0 Interactive python environment. This is necessary for some parallel components of the pipeline engine. Matplotlib_ 1.0 - 1.2 Plotting library `RDFLib `_ 4.1 RDFLibrary required for provenance export as RDF Sphinx_ 1.1 Required for building the documentation `Graphviz `_ Required for building the documentation Interface Dependencies ~~~~~~~~~~~~~~~~~~~~~~ These are the software packages that nipype.interfaces wraps: FSL_ 4.1.0 or later matlab_ 2008a or later SPM_ SPM5/8 FreeSurfer_ FreeSurfer version 4 and higher AFNI_ 2009_12_31_1431 or later Slicer_ 3.6 or later Nipy_ 0.1.2+20110404 or later Nitime_ (optional) Camino_ Camino2Trackvis_ ConnectomeViewer_ .. include:: ../links_names.txt nipype-0.9.2/doc/users/interface_tutorial.rst000066400000000000000000000142771227300005300213540ustar00rootroot00000000000000.. _interface_tutorial: ======================= Tutorial : Interfaces ======================= Specifying options ------------------ The nipype interface modules provide a Python interface to external packages like FSL_ and SPM_. Within the module are a series of Python classes which wrap specific package functionality. For example, in the fsl module, the class :class:`nipype.interfaces.fsl.Bet` wraps the ``bet`` command-line tool. Using the command-line tool, one would specify options using flags like ``-o``, ``-m``, ``-f ``, etc... However, in nipype, options are assigned to Python attributes and can be specified in the following ways: Options can be assigned when you first create an interface object: .. testcode:: import nipype.interfaces.fsl as fsl mybet = fsl.BET(in_file='foo.nii', out_file='bar.nii') result = mybet.run() Options can be assigned through the ``inputs`` attribute: .. testcode:: import nipype.interfaces.fsl as fsl mybet = fsl.BET() mybet.inputs.in_file = 'foo.nii' mybet.inputs.out_file = 'bar.nii' result = mybet.run() Options can be assigned when calling the ``run`` method: .. testcode:: import nipype.interfaces.fsl as fsl mybet = fsl.BET() result = mybet.run(in_file='foo.nii', out_file='bar.nii', frac=0.5) Getting Help ------------ In IPython_ you can view the docstrings which provide some basic documentation and examples. .. sourcecode:: ipython In [2]: fsl.FAST? Type: type Base Class: String Form: Namespace: Interactive File: /Users/satra/sp/nipype/interfaces/fsl/preprocess.py Docstring: Use FSL FAST for segmenting and bias correction. For complete details, see the `FAST Documentation. `_ Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import anatfile Assign options through the ``inputs`` attribute: >>> fastr = fsl.FAST() >>> fastr.inputs.in_files = anatfile >>> out = fastr.run() #doctest: +SKIP Constructor information: Definition: fsl.FAST(self, **inputs) .. sourcecode:: ipython In [5]: spm.Realign? Type: type Base Class: String Form: Namespace: Interactive File: /Users/satra/sp/nipype/interfaces/spm/preprocess.py Docstring: Use spm_realign for estimating within modality rigid body alignment http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25 Examples -------- >>> import nipype.interfaces.spm as spm >>> realign = spm.Realign() >>> realign.inputs.in_files = 'functional.nii' >>> realign.inputs.register_to_mean = True >>> realign.run() # doctest: +SKIP Constructor information: Definition: spm.Realign(self, **inputs) All of the nipype.interfaces classes have an ``help`` method which provides information on each of the options one can assign. .. sourcecode:: ipython In [6]: fsl.BET.help() Inputs ------ Mandatory: in_file: input file to skull strip Optional: args: Additional parameters to the command center: center of gravity in voxels environ: Environment variables (default={}) frac: fractional intensity threshold functional: apply to 4D fMRI data mutually exclusive: functional, reduce_bias mask: create binary mask image mesh: generate a vtk mesh brain surface no_output: Don't generate segmented output out_file: name of output skull stripped image outline: create surface outline image output_type: FSL output type radius: head radius reduce_bias: bias field and neck cleanup mutually exclusive: functional, reduce_bias skull: create skull image threshold: apply thresholding to segmented brain image and mask vertical_gradient: vertical gradient in fractional intensity threshold (-1, 1) Outputs ------- mask_file: path/name of binary brain mask (if generated) meshfile: path/name of vtk mesh file (if generated) out_file: path/name of skullstripped file outline_file: path/name of outline file (if generated) .. sourcecode:: ipython In [7]: spm.Realign.help() Inputs ------ Mandatory: in_files: list of filenames to realign Optional: fwhm: gaussian smoothing kernel width interp: degree of b-spline used for interpolation jobtype: one of: estimate, write, estwrite (default=estwrite) matlab_cmd: None mfile: Run m-code using m-file (default=True) paths: Paths to add to matlabpath quality: 0.1 = fast, 1.0 = precise register_to_mean: Indicate whether realignment is done to the mean image separation: sampling separation in mm weight_img: filename of weighting image wrap: Check if interpolation should wrap in [x,y,z] write_interp: degree of b-spline used for interpolation write_mask: True/False mask output image write_which: determines which images to reslice write_wrap: Check if interpolation should wrap in [x,y,z] Outputs ------- mean_image: Mean image file from the realignment realigned_files: Realigned files realignment_parameters: Estimated translation and rotation parameters Our :ref:`interface-index` documentation provides html versions of our docstrings and includes links to the specific package documentation. For instance, the :class:`nipype.interfaces.fsl.Bet` docstring has a direct link to the online BET Documentation. FSL interface example --------------------- Using FSL_ to realign a time_series: .. testcode:: import nipype.interfaces.fsl as fsl realigner = fsl.McFlirt() realigner.inputs.in_file='timeseries4D.nii' result = realigner.run() SPM interface example --------------------- Using SPM_ to realign a time-series: .. testcode:: import nipype.interfaces.spm as spm from glob import glob allepi = glob('epi*.nii') # this will return an unsorted list allepi.sort() realigner = spm.Realign() realigner.inputs.in_files = allepi result = realigner.run() .. include:: ../links_names.txt nipype-0.9.2/doc/users/joinnode_and_itersource.rst000066400000000000000000000120441227300005300223520ustar00rootroot00000000000000.. _joinnode_and_itersource: ==================================== JoinNode, synchronize and itersource ==================================== The previous :doc:`mapnode_and_iterables` chapter described how to fork and join nodes using MapNode and iterables. In this chapter, we introduce features which build on these concepts to add workflow flexibility. JoinNode, joinsource and joinfield ================================== A :class:`nipype.pipeline.engine.JoinNode` generalizes MapNode to operate in conjunction with an upstream iterable node to reassemble downstream results, e.g.: .. digraph:: joinnode_ex "A" -> "B1" -> "C1" -> "D"; "A" -> "B2" -> "C2" -> "D"; "A" -> "B3" -> "C3" -> "D"; The code to achieve this is as follows: :: import nipype.pipeline.engine as pe a = pe.Node(interface=A(), name="a") b = pe.Node(interface=B(), name="b") b.iterables = ("in_file", images) c = pe.Node(interface=C(), name="c") d = pe.JoinNode(interface=D(), joinsource="b", joinfield="in_files", name="d") my_workflow = pe.Workflow(name="my_workflow") my_workflow.connect([(a,b,[('subject','subject')]), (b,c,[('out_file','in_file')]) (c,d,[('out_file','in_files')]) ]) This example assumes that interface "A" has one output *subject*, interface "B" has two inputs *subject* and *in_file* and one output *out_file*, interface "C" has one input *in_file* and one output *out_file*, and interface D has one list input *in_files*. The *images* variable is a list of three input image file names. As with *iterables* and the MapNode *iterfield*, the *joinfield* can be a list of fields. Thus, the declaration in the previous example is equivalent to the following: :: d = pe.JoinNode(interface=D(), joinsource="b", joinfield=["in_files"], name="d") The *joinfield* defaults to all of the JoinNode input fields, so the declaration is also equivalent to the following: :: d = pe.JoinNode(interface=D(), joinsource="b", name="d") In this example, the node "c" *out_file* outputs are collected into the JoinNode "d" *in_files* input list. The *in_files* order is the same as the upstream "b" node iterables order. The JoinNode input can be filtered for unique values by specifying the *unique* flag, e.g.: :: d = pe.JoinNode(interface=D(), joinsource="b", unique=True, name="d") synchronize =========== The :class:`nipype.pipeline.engine.Node` *iterables* parameter can be be a single field or a list of fields. If it is a list, then execution is performed over all permutations of the list items. For example: :: b.iterables = [("m", [1, 2]), ("n", [3, 4])] results in the execution graph: .. digraph:: multiple_iterables_ex "A" -> "B13" -> "C"; "A" -> "B14" -> "C"; "A" -> "B23" -> "C"; "A" -> "B24" -> "C"; where "B13" has inputs *m* = 1, *n* = 3, "B14" has inputs *m* = 1, *n* = 4, etc. The *synchronize* parameter synchronizes the iterables lists, e.g.: :: b.iterables = [("m", [1, 2]), ("n", [3, 4])] b.synchronize = True results in the execution graph: .. digraph:: synchronize_ex "A" -> "B13" -> "C"; "A" -> "B24" -> "C"; where the iterable inputs are selected in lock-step by index, i.e.: (*m*, *n*) = (1, 3) and (2, 4) for "B13" and "B24", resp. itersource ========== The *itersource* feature allows you to expand a downstream iterable based on a mapping of an upstream iterable. For example: :: a = pe.Node(interface=A(), name="a") b = pe.Node(interface=B(), name="b") b.iterables = ("m", [1, 2]) c = pe.Node(interface=C(), name="c") d = pe.Node(interface=D(), name="d") d.itersource = ("b", "m") d.iterables = [("n", {1:[3,4], 2:[5,6]})] my_workflow = pe.Workflow(name="my_workflow") my_workflow.connect([(a,b,[('out_file','in_file')]), (b,c,[('out_file','in_file')]) (c,d,[('out_file','in_file')]) ]) results in the execution graph: .. digraph:: itersource_ex "A" -> "B1" -> "C1" -> "D13"; "C1" -> "D14"; "A" -> "B2" -> "C2" -> "D25"; "C2" -> "D26"; In this example, all interfaces have input *in_file* and output *out_file*. In addition, interface "B" has input *m* and interface "D" has input *n*. A Python dictionary associates the "b" node input value with the downstream "d" node *n* iterable values. This example can be extended with a summary JoinNode: :: e = pe.JoinNode(interface=E(), joinsource="d", joinfield="in_files", name="e") my_workflow.connect(d, 'out_file', e, 'in_files') resulting in the graph: .. digraph:: itersource_with_join_ex "A" -> "B1" -> "C1" -> "D13" -> "E"; "C1" -> "D14" -> "E"; "A" -> "B2" -> "C2" -> "D25" -> "E"; "C2" -> "D26" -> "E"; The combination of iterables, MapNode, JoinNode, synchronize and itersource enables the creation of arbitrarily complex workflow graphs. The astute workflow builder will recognize that this flexibility is both a blessing and a curse. These advanced features are handy additions to the Nipype toolkit when used judiciously. nipype-0.9.2/doc/users/mapnode_and_iterables.rst000066400000000000000000000123631227300005300217620ustar00rootroot00000000000000.. _mapnode_and_iterables: ============================================ MapNode, iterfield, and iterables explained ============================================ In this chapter we will try to explain the concepts behind MapNode, iterfield, and iterables. MapNode and iterfield ====================== Imagine that you have a list of items (lets say files) and you want to execute the same node on them (for example some smoothing or masking). Some nodes accept multiple files and do exactly the same thing on them, but some don't (they expect only one file). MapNode can solve this problem. Imagine you have the following workflow: .. digraph:: mapnode_before "A" -> "B" -> "C"; Node "A" outputs a list of files, but node "B" accepts only one file. Additionally "C" expects a list of files. What you would like is to run "B" for every file in the output of "A" and collect the results as a list and feed it to "C". Something like this: .. digraph:: mapnode_after "A" -> "B1" -> "C"; "A" -> "B2" -> "C"; "A" -> "B3" -> "C"; "A" -> "Bn" -> "C"; The code to achieve this is quite simple :: import nipype.pipeline.engine as pe a = pe.Node(interface=A(), name="a") b = pe.MapNode(interface=B(), name="b", iterfield=['in_file']) c = pe.Node(interface=C(), name="c") my_workflow = pe.Workflow(name="my_workflow") my_workflow.connect([(a,b,[('out_files','in_file')]), (b,c,[('out_file','in_files')]) ]) assuming that interfaces "A" and "C" have one input "in_files" and one output "out_files" (both lists of files). Interface "B" has single file input "in_file" and single file output "out_file". You probably noticed that you connect nodes as if "B" could accept and output list of files. This is because it is wrapped using MapNode instead of Node. This special version of node will (under the bonnet) create an instance of "B" for every item in the list from the input. The compulsory argument "iterfield" defines which input should it iterate over (for example in single file smooth interface you would like to iterate over input files not the smoothing width). At the end outputs are collected into a list again. In other words this is map and reduce scenario. You might have also noticed that the iterfield arguments expects a list of input names instead of just one name. This suggests that there can be more than one! Even thou a bit confusing this is true. You can specify more than one input to iterate over but the lists that you provide (for all the inputs specified in iterfield) have to have the same length. MapNode will then pair the parameters up and run the first instance with first set of parameters and second with second set of parameters. For example, this code: :: b = pe.MapNode(interface=B(), name="b", iterfield=['in_file', 'n']) b.inputs.in_file = ['file', 'another_file', 'different_file'] b.inputs.n = [1,2,3] b.run() is almost the same as running :: b1 = pe.Node(interface=B(), name="b1") b1.inputs.in_file = 'file' b1.inputs.n = 1 b2 = pe.Node(interface=B(), name="b2") b2.inputs.in_file = 'another_file' b2.inputs.n = 2 b3 = pe.Node(interface=B(), name="b3") b3.inputs.in_file = 'different_file' b3.inputs.n = 3 It is a rarely used feature, but you can sometimes find it useful. Iterables ========= Now imagine a different scenario. You have your workflow as before .. digraph:: iterables_before "A" -> "B" -> "C"; and there are three possible values of one of the inputs node "B" you would like to investigate (for example width of 2,4, and 6 pixels of a smoothing node). You would like to see how different parameters in node "B" would influence everything that depends on its outputs (node "C" in our example). Therefore the new graph should look like this: .. digraph:: foo "A" -> "B1" -> "C1"; "A" -> "B2" -> "C2"; "A" -> "B3" -> "C3"; Of course you can do it manually by creating copies of all the nodes for different parameter set, but this can be very time consuming, especially when is are more than one node taking inputs from "B". Luckily nipype supports this scenario! Its called iterables and and you use it this way: :: import nipype.pipeline.engine as pe a = pe.Node(interface=A(), name="a") b = pe.Node(interface=B(), name="b") b.iterables = ("n", [1, 2, 3]) c = pe.Node(interface=C(), name="c") my_workflow = pe.Workflow(name="my_workflow") my_workflow.connect([(a,b,[('out_file','in_file')]), (b,c,[('out_file','in_file')]) ]) Assuming that you want to try out values 1, 2, and 3 of input "n" of the node "B". This will also create three different versions of node "C" - each with inputs from instances of node "C" with different values of "n". Additionally, you can set multiple iterables for a node with a list of tuples in the above format. Iterables are commonly used to execute the same workflow for many subjects. Usually one parametrises DataGrabber node with subject ID. This is achieved by connecting an IdentityInterface in front of DataGrabber. When you set iterables of the IdentityInterface to the list of subjects IDs, the same workflow will be executed for every subject. See :doc:`examples/fmri_spm` to see this pattern in action. .. include:: ../links_names.txt nipype-0.9.2/doc/users/model_specification.rst000066400000000000000000000107641227300005300214660ustar00rootroot00000000000000.. _model_spec: =================================================== Model Specification for First Level fMRI Analysis =================================================== Nipype provides a general purpose model specification mechanism with specialized subclasses for package specific extensions. General purpose model specification =================================== The :class:`SpecifyModel` provides a generic mechanism for model specification. A mandatory input called subject_info provides paradigm specification for each run corresponding to a subject. This has to be in the form of a :class:`Bunch` or a list of Bunch objects (one for each run). Each Bunch object contains the following attribules. Required for most designs ------------------------- - conditions : list of names - onsets : lists of onsets corresponding to each condition - durations : lists of durations corresponding to each condition. Should be left to a single 0 if all events are being modelled as impulses. Optional -------- - regressor_names : list of names corresponding to each column. Should be None if automatically assigned. - regressors : list of lists. values for each regressor - must correspond to the number of volumes in the functional run - amplitudes : lists of amplitudes for each event. This will be ignored by SPM's Level1Design. The following two (tmod, pmod) will be ignored by any Level1Design class other than SPM: - tmod : lists of conditions that should be temporally modulated. Should default to None if not being used. - pmod : list of Bunch corresponding to conditions - name : name of parametric modulator - param : values of the modulator - poly : degree of modulation An example Bunch definition:: from nipype.interfaces.base import Bunch condnames = ['Tapping', 'Speaking', 'Yawning'] event_onsets = [[0, 10, 50], [20, 60, 80], [30, 40, 70]] durations = [[0],[0],[0]] subject_info = Bunch(conditions=condnames, onsets = event_onsets, durations = durations) Alternatively, you can provide condition, onset, duration and amplitude information through event files. The event files have to be in 1,2 or 3 column format with the columns corresponding to Onsets, Durations and Amplitudes and they have to have the name event_name.run e.g.: Words.run001.txt. The event_name part will be used to create the condition names. Words.run001.txt may look like:: # Word Onsets Durations 0 10 20 10 ... or with amplitudes:: # Word Onsets Durations Amplitudes 0 10 1 20 10 1 ... Together with this information, one needs to specify: - whether the durations and event onsets are specified in terms of scan volumes or secs. - the high-pass filter cutoff, - the repetition time per scan - functional data files corresponding to each run. Optionally you can specify realignment parameters, outlier indices. Outlier files should contain a list of numbers, one per row indicating which scans should not be included in the analysis. The numbers are 0-based. SPM specific attributes ======================= in addition to the generic specification options, several SPM specific options can be provided. In particular, the subject_info function can provide temporal and parametric modulators in the Bunch attributes tmod and pmod. The following example adds a linear parametric modulator for speaking rate for the events specified earlier:: pmod = [None, Bunch(name=['Rate'], param=[[.300, .500, .600]], poly=[1]), None] subject_info = Bunch(conditions=condnames, onsets = event_onsets, durations = durations, pmod = pmod) :class:`SpecifySPMModel` also allows specifying additional components. If you have a study with multiple runs, you can choose to concatenate conditions from different runs. by setting the input option **concatenate_runs** to True. You can also choose to set the output options for this class to be in terms of 'scans'. Sparse model specification ========================== In addition to standard models, :class:`SpecifySparseModel` allows model generation for sparse and sparse-clustered acquisition experiments. Details of the model generation and utility are provided in `Ghosh et al. (2009) OHBM 2009. `_ .. include:: ../links_names.txt nipype-0.9.2/doc/users/pipeline_tutorial.rst000066400000000000000000000036461227300005300212170ustar00rootroot00000000000000.. _pipeline_tutorial: ===================== Tutorial : Workflows ===================== This section presents several tutorials on how to setup and use pipelines. Make sure that you have the requirements satisfied and go through the steps required for the analysis tutorials. Essential reading ================= .. toctree:: :maxdepth: 1 :glob: tutorial_101 tutorial_102 tutorial_103 mapnode_and_iterables grabbing_and_sinking Beginner's guide ================ By Michael Notter. `Available here`__ __ http://miykael.github.com/nipype-beginner-s-guide/index.html Example workflows ================= .. toctree:: :maxdepth: 1 :glob: examples/* Requirements ============ All tutorials - Release 0.4 of nipype and it's dependencies have been installed Analysis tutorials - FSL_, FreeSurfer_, Camino_, ConnectomeViewer and MATLAB_ are available and callable from the command line - SPM_ 5/8 is installed and callable in matlab - Space: 3-10 GB Checklist for analysis tutorials ================================ For the analysis tutorials, we will be using a slightly modified version of the FBIRN Phase I travelling data set. Step 0 ~~~~~~ Download and extract the `Pipeline tutorial data (429MB). `_ (checksum: 56ed4b7e0aac5627d1724e9c10cd26a7) Step 1. ~~~~~~~ Ensure that all programs are available by calling ``bet``, ``matlab`` and then ``which spm`` within matlab to ensure you have spm5/8 in your matlab path. Step 2. ~~~~~~~ You can now run the tutorial by typing ``python tutorial_script.py`` within the nipype-tutorial directory. This will run a full first level analysis on two subjects following by a 1-sample t-test on their first level results. The next section goes through each section of the tutorial script and describes what it is doing. .. include:: ../links_names.txt nipype-0.9.2/doc/users/plugins.rst000066400000000000000000000232711227300005300171440ustar00rootroot00000000000000.. _plugins: ==================== Using Nipype Plugins ==================== The workflow engine supports a plugin architecture for workflow execution. The available plugins allow local and distributed execution of workflows and debugging. Each available plugin is described below. Current plugins are available for Linear, Multiprocessing, IPython_ distributed processing platforms and for direct processing on SGE_, PBS_, HTCondor_, LSF_, and SLURM_. We anticipate future plugins for the Soma_ workflow. .. note:: The current distributed processing plugins rely on the availability of a shared filesystem across computational nodes. A variety of config options can control how execution behaves in this distributed context. These are listed later on in this page. All plugins can be executed with:: workflow.run(plugin=PLUGIN_NAME, plugin_args=ARGS_DICT) Optional arguments:: status_callback : a function handle max_jobs : maximum number of concurrent jobs max_tries : number of times to try submitting a job retry_timeout : amount of time to wait between tries .. note:: Except for the status_callback, the remaining arguments only apply to the distributed plugins: MultiProc/IPython(X)/SGE/PBS/HTCondor/HTCondorDAGMan/LSF For example: Plugins ======= Debug ----- This plugin provides a simple mechanism to debug certain components of a workflow without executing any node. Mandatory arguments:: callable : A function handle that receives as arguments a node and a graph The function callable will called for every node from a topological sort of the execution graph. Linear ------ This plugin runs the workflow one node at a time in a single process locally. The order of the nodes is determined by a topological sort of the workflow:: workflow.run(plugin='Linear') MultiProc --------- Uses the Python_ multiprocessing library to distribute jobs as new processes on a local system. Optional arguments:: n_procs : Number of processes to launch in parallel, if not set number of processors/threads will be automatically detected To distribute processing on a multicore machine, simply call:: workflow.run(plugin='MultiProc') This will use all available CPUs. If on the other hand you would like to restrict the number of used resources (to say 2 CPUs), you can call:: workflow.run(plugin='MultiProc', plugin_args={'n_procs' : 2} IPython ------- This plugin provide access to distributed computing using IPython_ parallel machinery. .. note:: We provide backward compatibility with IPython_ versions earlier than 0.10.1 using the IPythonX plugin. Please read the IPython_ documentation to determine how to setup your cluster for distributed processing. This typically involves calling ipcluster. Once the clients have been started, any pipeline executed with:: workflow.run(plugin='IPython') SGE/PBS ------- In order to use nipype with SGE_ or PBS_ you simply need to call:: workflow.run(plugin='SGE') workflow.run(plugin='PBS') Optional arguments:: template: custom template file to use qsub_args: any other command line args to be passed to qsub. max_jobname_len: (PBS only) maximum length of the job name. Default 15. For example, the following snippet executes the workflow on myqueue with a custom template:: workflow.run(plugin='SGE', plugin_args=dict(template='mytemplate.sh', qsub_args='-q myqueue') In addition to overall workflow configuration, you can use node level configuration for PBS/SGE:: node.plugin_args = {'qsub_args': '-l nodes=1:ppn=3'} this would apply only to the node and is useful in situations, where a particular node might use more resources than other nodes in a workflow. .. note:: Setting the keyword `overwrite` would overwrite any global configuration with this local configuration:: node.plugin_args = {'qsub_args': '-l nodes=1:ppn=3', 'overwrite': True} LSF --- Submitting via LSF is almost identical to SGE above: workflow.run(plugin='LSF') Optional arguments:: template: custom template file to use bsub_args: any other command line args to be passed to bsub. HTCondor -------- DAGMan ~~~~~~ With its DAGMan_ component HTCondor_ (previously Condor) allows for submitting entire graphs of dependent jobs at once. With the ``CondorDAGMan`` plug-in Nipype can utilize this functionality to submit complete workflows directly and in a single step. Consequently, and in contrast to other plug-ins, workflow execution returns almost instantaneously -- Nipype is only used to generate the workflow graph, while job scheduling and dependency resolution are entirely managed by HTCondor_. Please note that although DAGMan_ supports specification of data dependencies as well as data provisioning on compute nodes this functionality is currently not supported by this plug-in. As with all other batch systems supported by Nipype, only HTCondor pools with a shared file system can be used to process Nipype workflows. Workflow execution with HTCondor DAGMan is done by calling:: workflow.run(plugin='CondorDAGMan') Job execution behavior can be tweaked with the following optional plug-in arguments. The value of most arguments can be a literal string or a filename, where in the latter case the content of the file will be used as the argument value:: submit_template : submit spec template for individual jobs in a DAG (see CondorDAGManPlugin.default_submit_template for the default. initial_specs : additional submit specs that are prepended to any job's submit file override_specs : additional submit specs that are appended to any job's submit file wrapper_cmd : path to an exectuable that will be started instead of a node script. This is useful for wrapper script that execute certain functionality prior or after a node runs. If this option is given the wrapper command is called with the respective Python exectuable and the path to the node script as final arguments wrapper_args : optional additional arguments to a wrapper command dagman_args : arguments to be prepended to the job execution script in the dagman call block : if True the plugin call will block until Condor has finished prcoessing the entire workflow (default: False) Please see the `HTCondor documentation`_ for details on possible configuration options and command line arguments. Using the ``wrapper_cmd`` argument it is possible to combine Nipype workflow execution with checkpoint/migration functionality offered by, for example, DMTCP_. This is especially useful in the case of workflows with long running nodes, such as Freesurfer's recon-all pipeline, where Condor's job prioritization algorithm could lead to jobs being evicted from compute nodes in order to maximize overall troughput. With checkpoint/migration enabled such a job would be checkpointed prior eviction and resume work from the checkpointed state after being rescheduled -- instead of restarting from scratch. On a Debian system, executing a workflow with support for checkpoint/migration for all nodes could look like this:: # define common parameters dmtcp_hdr = """ should_transfer_files = YES when_to_transfer_output = ON_EXIT_OR_EVICT kill_sig = 2 environment = DMTCP_TMPDIR=./;JALIB_STDERR_PATH=/dev/null;DMTCP_PREFIX_ID=$(CLUSTER)_$(PROCESS) """ shim_args = "--log %(basename)s.shimlog --stdout %(basename)s.shimout --stderr %(basename)s.shimerr" # run workflow workflow.run( plugin='CondorDAGMan', plugin_args=dict(initial_specs=dmtcp_hdr, wrapper_cmd='/usr/lib/condor/shim_dmtcp', wrapper_args=shim_args) ) ``qsub`` emulation ~~~~~~~~~~~~~~~~~~ .. note:: This plug-in is deprecated and users should migrate to the more robust and more versatile ``CondorDAGMan`` plug-in. Despite the differences between HTCondor and SGE-like batch systems the plugin usage (incl. supported arguments) is almost identical. The HTCondor plugin relies on a ``qsub`` emulation script for HTCondor, called ``condor_qsub`` that can be obtained from a `Git repository on git.debian.org`_. This script is currently not shipped with a standard HTCondor distribution, but is included in the HTCondor package from http://neuro.debian.net. It is sufficient to download this script and install it in any location on a system that is included in the ``PATH`` configuration. .. _Git repository on git.debian.org: http://anonscm.debian.org/gitweb/?p=pkg-exppsy/condor.git;a=blob_plain;f=debian/condor_qsub;hb=HEAD Running a workflow in a HTCondor pool is done by calling:: workflow.run(plugin='Condor') The plugin supports a limited set of qsub arguments (``qsub_args``) that cover the most common use cases. The ``condor_qsub`` emulation script translates qsub arguments into the corresponding HTCondor terminology and handles the actual job submission. For details on supported options see the manpage of ``condor_qsub``. Optional arguments:: qsub_args: any other command line args to be passed to condor_qsub. .. include:: ../links_names.txt .. _SGE: http://www.oracle.com/us/products/tools/oracle-grid-engine-075549.html .. _OGE: http://www.oracle.com/us/products/tools/oracle-grid-engine-075549.html .. _Soma: http://brainvisa.info/soma/soma-workflow/ .. _PBS: http://www.clusterresources.com/products/torque-resource-manager.php .. _LSF: http://www.platform.com/Products/platform-lsf .. _HTCondor: http://www.cs.wisc.edu/htcondor/ .. _DAGMan: http://research.cs.wisc.edu/htcondor/dagman/dagman.html .. _HTCondor documentation: http://research.cs.wisc.edu/htcondor/manual .. _DMTCP: http://dmtcp.sourceforge.net .. _SLURM: http://slurm.schedmd.com/ nipype-0.9.2/doc/users/saving_workflows.rst000066400000000000000000000070361227300005300210700ustar00rootroot00000000000000.. _saving_workflows: =================================================== Saving Workflows and Nodes to a file (experimental) =================================================== On top of the standard way of saving (i.e. serializing) objects in Python (see `pickle `_) Nipype provides methods to turn Workflows and nodes into human readable code. This is useful if you want to save a Workflow that you have generated on the fly for future use. To generate Python code for a Workflow use the export method: .. testcode:: from nipype.interfaces.fsl import BET, ImageMaths from nipype.pipeline.engine import Workflow, Node, MapNode, format_node from nipype.interfaces.utility import Function, IdentityInterface bet = Node(BET(), name='bet') bet.iterables = ('frac', [0.3, 0.4]) bet2 = MapNode(BET(), name='bet2', iterfield=['infile']) bet2.iterables = ('frac', [0.4, 0.5]) maths = Node(ImageMaths(), name='maths') def testfunc(in1): """dummy func """ out = in1 + 'foo' + "out1" return out funcnode = Node(Function(input_names=['a'], output_names=['output'], function=testfunc), name='testfunc') funcnode.inputs.in1 = '-sub' func = lambda x: x inode = Node(IdentityInterface(fields=['a']), name='inode') wf = Workflow('testsave') wf.add_nodes([bet2]) wf.connect(bet, 'mask_file', maths, 'in_file') wf.connect(bet2, ('mask_file', func), maths, 'in_file2') wf.connect(inode, 'a', funcnode, 'in1') wf.connect(funcnode, 'output', maths, 'op_string') wf.export() This will create a file "outputtestsave.py" with the following content: .. testcode:: from nipype.pipeline.engine import Workflow, Node, MapNode from nipype.interfaces.utility import IdentityInterface from nipype.interfaces.utility import Function from nipype.utils.misc import getsource from nipype.interfaces.fsl.preprocess import BET from nipype.interfaces.fsl.utils import ImageMaths # Functions func = lambda x: x # Workflow testsave = Workflow("testsave") # Node: testsave.inode inode = Node(IdentityInterface(fields=['a'], mandatory_inputs=True), name="inode") # Node: testsave.testfunc testfunc = Node(Function(input_names=['a'], output_names=['output']), name="testfunc") def testfunc_1(in1): """dummy func """ out = in1 + 'foo' + "out1" return out testfunc.inputs.function_str = getsource(testfunc_1) testfunc.inputs.ignore_exception = False testfunc.inputs.in1 = '-sub' testsave.connect(inode, "a", testfunc, "in1") # Node: testsave.bet2 bet2 = MapNode(BET(), iterfield=['infile'], name="bet2") bet2.iterables = ('frac', [0.4, 0.5]) bet2.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'} bet2.inputs.ignore_exception = False bet2.inputs.output_type = 'NIFTI_GZ' bet2.inputs.terminal_output = 'stream' # Node: testsave.bet bet = Node(BET(), name="bet") bet.iterables = ('frac', [0.3, 0.4]) bet.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'} bet.inputs.ignore_exception = False bet.inputs.output_type = 'NIFTI_GZ' bet.inputs.terminal_output = 'stream' # Node: testsave.maths maths = Node(ImageMaths(), name="maths") maths.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'} maths.inputs.ignore_exception = False maths.inputs.output_type = 'NIFTI_GZ' maths.inputs.terminal_output = 'stream' testsave.connect(bet2, ('mask_file', func), maths, "in_file2") testsave.connect(bet, "mask_file", maths, "in_file") testsave.connect(testfunc, "output", maths, "op_string") The file is ready to use and includes all the necessary imports. .. include:: ../links_names.txt nipype-0.9.2/doc/users/select_files.rst000066400000000000000000000073261227300005300201270ustar00rootroot00000000000000.. _select_files: ========================== The SelectFiles Interfaces ========================== Nipype 0.9 introduces a new interface for intelligently finding files on the disk and feeding them into your workflows: :ref:`SelectFiles `. SelectFiles is intended as a simpler alternative to the :ref:`DataGrabber ` interface that was discussed previously in :doc:`grabbing_and_sinking`. SelectFiles is built on Python `format strings `_, which are similar to the Python string interpolation feature you are likely already familiar with, but advantageous in several respects. Format strings allow you to replace named sections of template strings set off by curly braces (`{}`), possibly filtered through a set of functions that control how the values are rendered into the string. As a very basic example, we could write :: msg = "This workflow uses {package}" and then format it with keyword arguments:: print msg.format(package="FSL") SelectFiles only requires that you provide templates that can be used to find your data; the actual formatting happens behind the scenes. Consider a basic example in which you want to select a T1 image and multple functional images for a number of subjects. Invoking SelectFiles in this case is quite straightforward:: from nipype import SelectFiles templates = dict(T1="data/{subject_id}/struct/T1.nii", epi="data/{subject_id}/func/epi_run*.nii") sf = SelectFiles(templates) SelectFiles will take the `templates` dictionary and parse it to determine its own inputs and oututs. Specifically, each name used in the format spec (here just `subject_id`) will become an interface input, and each key in the dictionary (here `T1` and `epi`) will become interface outputs. The `templates` dictionary thus succinctly links the node inputs to the appropriate outputs. You'll also note that, as was the case with DataGrabber, you can use basic `glob `_ syntax to match multiple files for a given output field. Additionally, any of the conversions outlined in the Python documentation for format strings can be used in the templates. There are a few other options that help make SelectFiles flexible enough to deal with any situation where you need to collect data. Like DataGrabber, SelectFiles has a `base_directory` parameter that allows you to specify a path that is common to all of the values in the `templates` dictionary. Additionally, as `glob` does not return a sorted list, there is also a `sort_filelist` option, taking a boolean, to control whether sorting should be applied (it is True by default). The final input is `force_lists`, which controls how SelectFiles behaves in cases where only a single file matches the template. The default behavior is that when a template matches multiple files they are returned as a list, while a single file is returned as a string. There may be situations where you want to force the outputs to always be returned as a list (for example, you are writing a workflow that expects to operate on several runs of data, but some of your subjects only have a single run). In this case, `force_lists` can be used to tune the outputs of the interface. You can either use a boolean value, which will be applied to every output the interface has, or you can provide a list of the output fields that should be coerced to a list. Returning to our basic example, you may want to ensure that the `epi` files are returned as a list, but you only ever will have a single `T1` file. In this case, you would do :: sf = SelectFiles(templates, force_lists=["epi"]) .. include:: ../links_names.txt nipype-0.9.2/doc/users/spmmcr.rst000066400000000000000000000021431227300005300167570ustar00rootroot00000000000000.. _spmmcr: ==================================== Using SPM with MATLAB Common Runtime ==================================== In order to use the standalone MCR version of spm, you need to ensure that the following commands are executed at the beginning of your script: .. testcode:: from nipype import spm matlab_cmd = '/path/to/run_spm8.sh /path/to/Compiler_Runtime/v713/ script' spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True) you can test by calling: .. testcode:: spm.SPMCommand().version If you want to enforce the standalone MCR version of spm for nipype globally, you can do so by setting the following environment variables: *SPMMCRCMD* Specifies the command to use to run the spm standalone MCR version. You may still override the command as described above. *FORCE_SPMMCR* Set this to any value in order to enforce the use of spm standalone MCR version in nipype globally. Technically, this sets the `use_mcr` flag of the spm interface to True. Information about the MCR version of SPM8 can be found at: http://en.wikibooks.org/wiki/SPM/Standalone nipype-0.9.2/doc/users/tutorial_101.rst000066400000000000000000000142731227300005300177110ustar00rootroot00000000000000.. _tutorial_101: ============ Pipeline 101 ============ A workflow or pipeline is built by connecting processes or nodes to each other. In the context of nipype, every interface can be treated as a pipeline node having defined inputs and outputs. Creating a workflow then is a matter of connecting appropriate outputs to inputs. Currently, workflows are limited to being directional and cannot have any loops, thereby creating an ordering to data flow. The following nipype component architecture might help understanding some of the tutorials presented here. .. image:: images/componentarchitecture.png :width: 600 px My first pipeline ================= Although the most trivial workflow consists of a single node, we will create a workflow with two nodes: a realign node that will send the realigned functional data to a smoothing node. It is important to note that setting up a workflow is separate from executing it. **1. Import appropriate modules** .. testcode:: import nipype.interfaces.spm as spm # the spm interfaces import nipype.pipeline.engine as pe # the workflow and node wrappers **2. Define nodes** Here we take instances of interfaces and make them pipeline compatible by wrapping them with pipeline specific elements. To determine the inputs and outputs of a given interface, please see :ref:`interface_tutorial`. Let's start with defining a realign node using the interface :class:`nipype.interfaces.spm.Realign` .. testcode:: realigner = pe.Node(interface=spm.Realign(), name='realign') realigner.inputs.in_files = 'somefuncrun.nii' realigner.inputs.register_to_mean = True This would be equivalent to: .. testcode:: realigner = pe.Node(interface=spm.Realign(infile='somefuncrun.nii', register_to_mean = True), name='realign') In Pythonic terms, this is saying that interface option in Node accepts an *instance* of an interface. The inputs to this interface can be set either later or while initializing the interface. .. note:: In the above example, 'somefuncrun.nii' has to exist, otherwise the commands won't work. A node will check if appropriate inputs are being supplied. Similar to the realigner node, we now set up a smoothing node. .. testcode:: smoother = pe.Node(interface=spm.Smooth(fwhm=6), name='smooth') Now we have two nodes with their inputs defined. Note that we have not defined an input file for the smoothing node. This will be done by connecting the realigner to the smoother in step 5. **3. Creating and configuring a workflow** Here we create an instance of a workflow and indicate that it should operate in the current directory. .. testcode:: workflow = pe.Workflow(name='preproc') workflow.base_dir = '.' **4. Adding nodes to workflows (optional)** If nodes are going to be connected (see step 5), this step is not necessary. However, if you would like to run a node by itself without connecting it to any other node, then you need to add it to the workflow. For adding nodes, order of nodes is not important. .. testcode:: workflow.add_nodes([smoother, realigner]) This results in a workflow containing two isolated nodes: .. image:: images/smoothrealignunconnected.png **5. Connecting nodes to each other** We want to connect the output produced by realignment to the input of smoothing. This is done as follows. .. testcode:: workflow.connect(realigner, 'realigned_files', smoother, 'in_files') or alternatively, a more flexible notation can be used. Although not shown here, the following notation can be used to connect multiple outputs from one node to multiple inputs (see step 7 below). .. testcode:: workflow.connect([(realigner, smoother, [('realigned_files', 'in_files')])]) This results in a workflow containing two connected nodes: .. image:: images/smoothrealignconnected.png **6. Visualizing the workflow** The workflow is represented as a directed acyclic graph (DAG) and one can visualize this using the following command. In fact, the pictures above were generated using this. .. testcode:: workflow.write_graph() This creates two files graph.dot and graph_detailed.dot and if graphviz_ is installed on your system it automatically converts it to png files. If graphviz is not installed you can take the dot files and load them in a graphviz visualizer elsewhere. You can specify how detailed the graph is going to be, by using "graph2use" argument which takes the following options: * hierarchical - creates a graph showing all embedded workflows (default) * orig - creates a top level graph without expanding internal workflow nodes * flat - expands workflow nodes recursively * exec - expands workflows to depict iterables (be careful - can generate really large graphs) **7. Extend it** Now that you have seen a basic pipeline let's add another node to the above pipeline. .. testcode:: import nipype.algorithms.rapidart as ra artdetect = pe.Node(interface=ra.ArtifactDetect(), name='artdetect') artdetect.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 0.5 art.inputs.zintensity_threshold = 3 workflow.connect([(realigner, artdetect, [('realigned_files', 'realigned_files'), ('realignment_parameters','realignment_parameters')] )]) .. note:: a) How an alternative form of connect was used to connect multiple output fields from the realign node to corresponding input fields of the artifact detection node. b) The current visualization only shows connected input and output ports. It does not show all the parameters that you have set for a node. This results in .. image:: images/threecomponentpipe.png :width: 650 px **8. Execute the workflow** Assuming that **somefuncrun.nii** is actually a file or you've replaced it with an appropriate one, you can run the pipeline with: .. testcode:: workflow.run() This should create a folder called preproc in your current directory, inside which are three folders: realign, smooth and artdetect (the names of the nodes). The outputs of these routines are in these folders. .. include:: ../links_names.txt nipype-0.9.2/doc/users/tutorial_102.rst000066400000000000000000000117611227300005300177110ustar00rootroot00000000000000.. _tutorial_102: ============ Pipeline 102 ============ Now that you know how to construct a workflow and execute it, we will go into more advanced concepts. This tutorial focuses on :class:`nipype.pipeline.engine.Workflow` :class:`nipype.pipeline.engine.Node` and :class:`nipype.pipeline.engine.MapNode`. A workflow is a **directed acyclic graph (DAG)** consisting of nodes which can be of type `Workflow`, `Node` or `MapNode`. Workflows can be re-used and hierarchical workflows can be easily constructed. 'name' : the mandatory keyword arg ================================== When instantiating a Workflow, Node or MapNode, a `name` has to be provided. For any given level of a workflow, no two nodes can have the same name. The engine will let you know if this is the case when you add nodes to a workflow either directly using `add_nodes` or using the `connect` function. Names have many internal uses. They determine the name of the directory in which the workflow/node is run and the outputs are stored. .. testcode:: realigner = pe.Node(interface=spm.Realign(), name='RealignSPM') Now this output will be stored in a directory called *RealignSPM*. Proper naming of your nodes can be advantageous from the perspective that it provides a semantic descriptor aligned with your thought process. This name parameter is also used to refer to nodes in embedded workflows. iterables --------- This can only be set for Node and MapNode. This is syntactic sugar for running a subgraph with the Node/MapNode at its root in a ``for`` loop. For example, consider an fMRI preprocessing pipeline that you would like to run for all your subjects. You can define a workflow and then execute it for every single subject inside a ``for`` loop. Consider the simplistic example below, where startnode is a node belonging to workflow 'mywork.' .. testcode:: for s in subjects: startnode.inputs.subject_id = s mywork.run() The pipeline engine provides a convenience function that simplifies this: .. testcode:: startnode.iterables = ('subject_id', subjects) mywork.run() This will achieve the same exact behavior as the for loop above. The workflow graph is: .. image:: images/proc2subj.png :width: 650 px Now consider the situation in which you want the last node (typically smoothing) of your preprocessing pipeline to smooth using two different kernels (0 mm and 6 mm FWHM). Again the common approach would be: .. testcode:: for s in subjects: startnode.inputs.subject_id = s uptosmoothingworkflow.run() smoothnode.inputs.infile = lastnode.output.outfile for fwhm in [0, 6]: smoothnode.inputs.fwhm = fwhm remainingworkflow.run() Instead of having multiple ``for`` loops at various stages, you can set up another set of iterables for the smoothnode. .. testcode:: startnode.iterables = ('subject_id', subjects) smoothnode.iterables = ('fwhm', [0, 6]) mywork.run() This will run the preprocessing workflow for two different smoothing kernels over all subjects. .. image:: images/proc2subj2fwhm.png :width: 650 px Thus setting iterables has a multiplicative effect. In the above examples there is a separate, distinct specifymodel node that's executed for each combination of subject and smoothing. iterfield --------- This is a mandatory keyword arg for MapNode. This enables running the underlying interface over a set of inputs and is particularly useful when the interface can only operate on a single input. For example, the :class:`nipype.interfaces.fsl.BET` will operate on only one (3d or 4d) NIfTI file. But wrapping BET in a MapNode can execute it over a list of files: .. testcode:: better = pe.MapNode(interface=fsl.Bet(), name='stripper', iterfield=['in_file']) better.inputs.in_file = ['file1.nii','file2.nii'] better.run() This will create a directory called ``stripper`` and inside it two subdirectories called ``in_file_0`` and ``in_file_1``. The output of running bet separately on each of those files will be stored in those two subdirectories. This can be extended to run it on pairwise inputs. For example, .. testcode:: transform = pe.MapNode(interface=fs.ApplyVolTransform(), name='warpvol', iterfield=['source_file', 'reg_file']) transform.inputs.source_file = ['file1.nii','file2.nii'] transform.inputs.reg_file = ['file1.reg','file2.reg'] transform.run() The above will be equivalent to running transform by taking corresponding items from each of the two fields in iterfield. The subdirectories get always named with respect to the first iterfield. overwrite --------- The overwrite keyword arg forces a node to be rerun. The `clone` function -------------------- The `clone` function can be used to create a copy of a workflow. No references to the original workflow are retained. As such the clone function requires a name keyword arg that specifies a new name for the duplicate workflow. .. include:: ../links_names.txt nipype-0.9.2/doc/users/tutorial_103.rst000066400000000000000000000072711227300005300177130ustar00rootroot00000000000000.. _tutorial_103: ============ Pipeline 103 ============ Modifying inputs to pipeline nodes ================================== Two nodes can be connected as shown below. .. testcode:: workflow.connect(realigner, 'realigned_files', smoother, 'infile') The connection mechanism allows for a function to be evaluated on the output field ('realigned files') of the source node (realigner) and have its result be sent to the input field ('infile') of the destination node (smoother). .. testcode:: def reverse_order(inlist): inlist.reverse() return inlist workflow.connect(realigner, ('realigned_files', reverse_order), smoother, 'infile') This can be extended to provide additional arguments to the function. For example: .. testcode:: def reorder(inlist, order): return [inlist[item] for item in order] workflow.connect(realigner, ('realigned_files', reorder, [2, 3, 0, 1]), smoother, 'infile') In this example, we assume the realigned_files produces a list of 4 files. We can reorder these files in a particular order using the modifier. Since such modifications are not tracked, they should be used with extreme care and only in cases where absolutely necessary. Often, one may find that it is better to insert a node rather than a function. Distributed computation ======================= The pipeline engine has built-in support for distributed computation on clusters. This can be achieved via plugin-modules for Python_ multiprocessing or the IPython_ distributed computing interface or SGE/PBS/Condor, provided the user sets up a workflow on a shared filesystem. These modules can take arguments that specify additional distribution engine parameters. For IPython_ the environment needs to be configured for distributed operation. Details are available at :ref:`plugins`. The default behavior is to run in series using the Linear plugin. .. testcode:: workflow.run() In some cases it may be advantageous to run the workflow in series locally (e.g., debugging, small-short pipelines, large memory only interfaces, relocating working directory/updating hashes). Debugging ========= When a crash happens while running a pipeline, a crashdump is stored in the pipeline's working directory unless the config option 'crashdumpdir' has been set (see :ref:config_options). The crashdump is a compressed numpy file that stores a dictionary containing three fields: 1. node - the node that failed 2. execgraph - the graph that the node came from 3. traceback - from local or remote session for the failure. We keep extending the information contained in the file and making it easier to troubleshoot the failures. However, in the meantime the following can help to recover information related to the failure. in IPython_ do (``%pdb`` in IPython_ is similar to ``dbstop`` if error in Matlab): .. testcode:: from nipype.utils.filemanip import loadflat crashinfo = loadflat('crashdump....npz') %pdb crashinfo['node'].run() # re-creates the crash pdb> up #typically, but not necessarily the crash is one stack frame up pdb> inspect variables pdb>quit Relocation of workdir ===================== In some circumstances, one might decide to move their entire working directory to a new location. It would be convenient to rerun only necessary components of the pipeline, instead of running all the nodes all over again. It is possible to do that with the :func:`~nipype.pipeline.engine.Pipeline.updatehash` function. .. testcode:: workflow.run(updatehash=True) This will execute the workflow and update all the hash values that were stored without actually running any of the interfaces. .. include:: ../links_names.txt nipype-0.9.2/doc/users/vagrant.rst000066400000000000000000000031321227300005300171170ustar00rootroot00000000000000.. _debug: ====================== Running Nipype in a VM ====================== .. tip:: Creating the Vagrant VM as described below requires an active internet connection. Container technologies (Vagrant_, Docker_) allow creating and manipulating lightweight virtual environments. The Nipype_ source now contains a Vagrantfile to launch a Vagrant_ VM. Requirements: * Vagrant_ * Virtualbox_ After you have installed Vagrant and Virtualbox, you simply need to download the latest Nipype source and unzip/tar/compress it. Go into your terminal and switch to the nipype source directory. Make sure the Vagrantfile is in the directory. Now you can execute:: vagrant up This will launch and provision the virtual machine. The default virtual machine is built using Ubuntu Precise 64, linked to the NeuroDebian_ source repo and contains a 2 node Grid Engine for cluster execution. The machine has a default IP address of `192.168.100.20` . From the vagrant startup directory you can log into the machine using:: vagrant ssh Now you can install your favorite software using:: sudo apt-get install fsl afni Also note that the directory in which you call `vagrant up` will be mounted under `/vagrant` inside the virtual machine. You can also copy the Vagrantfile or modify it in order to mount a different directory/directories. Please read through Vagrant_ documentation on other features. The python environment is built using a `miniconda `_ distribution. Hence `conda` can be used to do your python package management inside the VM. .. include:: ../links_names.txt nipype-0.9.2/doc/version.rst000066400000000000000000000001061227300005300157770ustar00rootroot00000000000000.. _version: .. htmlonly:: :Release: |version| :Date: |today| nipype-0.9.2/examples/000077500000000000000000000000001227300005300146345ustar00rootroot00000000000000nipype-0.9.2/examples/README000066400000000000000000000002621227300005300155140ustar00rootroot00000000000000A dataset for use with these scripts can be downloaded from the nipype website. At the time of writing, it's at: http://nipy.sourceforge.net/nipype/users/pipeline_tutorial.html nipype-0.9.2/examples/dmri_camino_dti.py000077500000000000000000000263411227300005300203400ustar00rootroot00000000000000#!/usr/bin/env python """ ================= dMRI: Camino, DTI ================= Introduction ============ This script, camino_dti_tutorial.py, demonstrates the ability to perform basic diffusion analysis in a Nipype pipeline. python dmri_camino_dti.py We perform this analysis using the FSL course data, which can be acquired from here: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz Import necessary modules from nipype. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.camino as camino import nipype.interfaces.fsl as fsl import nipype.interfaces.camino2trackvis as cam2trk import nipype.algorithms.misc as misc import os # system functions """ We use the following functions to scrape the voxel and data dimensions of the input images. This allows the pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial (fmri_spm_face.py) also implements this inferral of voxel size from the data. """ def get_vox_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() voxdims = hdr.get_zooms() return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] def get_data_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() datadims = hdr.get_data_shape() return [int(datadims[0]), int(datadims[1]), int(datadims[2])] def get_affine(volume): import nibabel as nb nii = nb.load(volume) return nii.get_affine() subject_list = ['subj1'] fsl.FSLCommand.set_default_output_type('NIFTI') """ Map field names to individual subject runs """ info = dict(dwi=[['subject_id', 'data']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.engine.Node` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" # This needs to point to the fdt folder you can find after extracting # http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/') datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ An inputnode is used to pass the data obtained by the data grabber to the actual processing functions """ inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode") """ Setup for Diffusion Tensor Computation -------------------------------------- In this section we create the nodes necessary for diffusion analysis. First, the diffusion image is converted to voxel order. """ image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") fsl2scheme.inputs.usegradmod = True """ Second, diffusion tensors are fit to the voxel-order data. """ dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit') """ Next, a lookup table is generated from the schemefile and the signal-to-noise ratio (SNR) of the unweighted (q=0) data. """ dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") dtlutgen.inputs.snr = 16.0 dtlutgen.inputs.inversion = 1 """ In this tutorial we implement probabilistic tractography using the PICo algorithm. PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel; this is produced using the following node. """ picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") picopdfs.inputs.inputmodel = 'dt' """ An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography. """ bet = pe.Node(interface=fsl.BET(), name="bet") bet.inputs.mask = True """ Finally, tractography is performed. First DT streamline tractography. """ trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt") """ Now camino's Probablistic Index of connectivity algorithm. In this tutorial, we will use only 1 iteration for time-saving purposes. """ trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico") trackpico.inputs.iterations = 1 """ Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. """ cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt") cam2trk_dt.inputs.min_length = 30 cam2trk_dt.inputs.voxel_order = 'LAS' cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico") cam2trk_pico.inputs.min_length = 30 cam2trk_pico.inputs.voxel_order = 'LAS' trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino") """ Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes. For VTK use VtkStreamlines. """ procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines") procstreamlines.inputs.outputtracts = 'oogl' """ We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers. """ fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa') trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace') dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') analyzeheader_fa = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_fa") analyzeheader_fa.inputs.datatype = "double" analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace') fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii') trace2nii = fa2nii.clone("trace2nii") """ Since we have now created all our nodes, we can now define our workflow and start making connections. """ tractography = pe.Workflow(name='tractography') tractography.connect([(inputnode, bet,[("dwi","in_file")])]) """ File format conversion """ tractography.connect([(inputnode, image2voxel, [("dwi", "in_file")]), (inputnode, fsl2scheme, [("bvecs", "bvec_file"), ("bvals", "bval_file")]) ]) """ Tensor fitting """ tractography.connect([(image2voxel, dtifit,[['voxel_order','in_file']]), (fsl2scheme, dtifit,[['scheme','scheme_file']]) ]) """ Workflow for applying DT streamline tractogpahy """ tractography.connect([(bet, trackdt,[("mask_file","seed_file")])]) tractography.connect([(dtifit, trackdt,[("tensor_fitted","in_file")])]) """ Workflow for applying PICo """ tractography.connect([(bet, trackpico,[("mask_file","seed_file")])]) tractography.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])]) tractography.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])]) tractography.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])]) tractography.connect([(picopdfs, trackpico,[("pdfs","in_file")])]) # ProcStreamlines might throw memory errors - comment this line out in such case tractography.connect([(trackdt, procstreamlines,[("tracked","in_file")])]) """ Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the tensor fitting. This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable. """ tractography.connect([(dtifit, fa,[("tensor_fitted","in_file")])]) tractography.connect([(fa, analyzeheader_fa,[("fa","in_file")])]) tractography.connect([(inputnode, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) tractography.connect([(fa, fa2nii,[('fa','data_file')])]) tractography.connect([(inputnode, fa2nii,[(('dwi', get_affine), 'affine')])]) tractography.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])]) tractography.connect([(dtifit, trace,[("tensor_fitted","in_file")])]) tractography.connect([(trace, analyzeheader_trace,[("trace","in_file")])]) tractography.connect([(inputnode, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) tractography.connect([(trace, trace2nii,[('trace','data_file')])]) tractography.connect([(inputnode, trace2nii,[(('dwi', get_affine), 'affine')])]) tractography.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])]) tractography.connect([(dtifit, dteig,[("tensor_fitted","in_file")])]) tractography.connect([(trackpico, cam2trk_pico, [('tracked','in_file')])]) tractography.connect([(trackdt, cam2trk_dt, [('tracked','in_file')])]) tractography.connect([(inputnode, cam2trk_pico,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) tractography.connect([(inputnode, cam2trk_dt,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) """ Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding their names to the subject list and their data to the proper folders. """ workflow = pe.Workflow(name="workflow") workflow.base_dir = os.path.abspath('camino_dti_tutorial') workflow.connect([(infosource,datasource,[('subject_id', 'subject_id')]), (datasource,tractography,[('dwi','inputnode.dwi'), ('bvals','inputnode.bvals'), ('bvecs','inputnode.bvecs') ]) ]) """ The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline. """ if __name__ == '__main__': workflow.run() workflow.write_graph() """ You can choose the format of the experted graph with the ``format`` option. For example ``workflow.write_graph(format='eps')`` """ nipype-0.9.2/examples/dmri_connectivity.py000077500000000000000000000646471227300005300207630ustar00rootroot00000000000000#!/usr/bin/env python """ ============================================= dMRI: Connectivity - Camino, CMTK, FreeSurfer ============================================= Introduction ============ This script, connectivity_tutorial.py, demonstrates the ability to perform connectivity mapping using Nipype for pipelining, Freesurfer for Reconstruction / Parcellation, Camino for tensor-fitting and tractography, and the Connectome Mapping Toolkit (CMTK) for connectivity analysis. python connectivity_tutorial.py We perform this analysis using the FSL course data, which can be acquired from here: * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz This pipeline also requires the Freesurfer directory for 'subj1' from the FSL course data. To save time, this data can be downloaded from here: * http://dl.dropbox.com/u/315714/subj1.zip?dl=1 A data package containing the outputs of this pipeline can be obtained from here: * http://db.tt/1vx4vLeP Along with Camino (http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php?n=Main.HomePage), Camino-Trackvis (http://www.nitrc.org/projects/camino-trackvis/), FSL (http://www.fmrib.ox.ac.uk/fsl/), and Freesurfer (http://surfer.nmr.mgh.harvard.edu/), you must also have the Connectome File Format library installed as well as the Connectome Mapper. These are written by Stephan Gerhard and can be obtained from: http://www.cmtk.org/ Or on github at: CFFlib: https://github.com/LTS5/cfflib CMP: https://github.com/LTS5/cmp Output data can be visualized in the ConnectomeViewer ConnectomeViewer: https://github.com/LTS5/connectomeviewer First, we import the necessary modules from nipype. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.camino as camino import nipype.interfaces.fsl as fsl import nipype.interfaces.camino2trackvis as cam2trk import nipype.interfaces.freesurfer as fs # freesurfer import nipype.interfaces.cmtk as cmtk import nipype.algorithms.misc as misc import inspect import os.path as op # system functions import cmp # connectome mapper """ We define the following functions to scrape the voxel and data dimensions of the input images. This allows the pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial (fmri_spm_face.py) also implements this inferral of voxel size from the data. We also define functions to select the proper parcellation/segregation file from Freesurfer's output for each subject. For the mapping in this tutorial, we use the aparc+seg.mgz file. While it is possible to change this to use the regions defined in aparc.a2009s+aseg.mgz, one would also have to write/obtain a network resolution map defining the nodes based on those regions. """ def get_vox_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() voxdims = hdr.get_zooms() return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] def get_data_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() datadims = hdr.get_data_shape() return [int(datadims[0]), int(datadims[1]), int(datadims[2])] def get_affine(volume): import nibabel as nb nii = nb.load(volume) return nii.get_affine() def select_aparc(list_of_files): for in_file in list_of_files: if 'aparc+aseg.mgz' in in_file: idx = list_of_files.index(in_file) return list_of_files[idx] def select_aparc_annot(list_of_files): for in_file in list_of_files: if '.aparc.annot' in in_file: idx = list_of_files.index(in_file) return list_of_files[idx] """ These need to point to the main Freesurfer directory as well as the freesurfer subjects directory. No assumptions are made about where the directory of subjects is placed. Recon-all must have been run on subj1 from the FSL course data. """ fs_dir = op.abspath('/usr/local/freesurfer') subjects_dir = op.abspath(op.join(op.curdir,'./subjects')) fsl.FSLCommand.set_default_output_type('NIFTI') """ This needs to point to the fdt folder you can find after extracting http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz """ data_dir = op.abspath('fsl_course_data/fdt/') fs.FSCommand.set_default_subjects_dir(subjects_dir) subject_list = ['subj1'] """ An infosource node is used to loop through the subject list and define the input files. For our purposes, these are the diffusion-weighted MR image, b vectors, and b values. The info dictionary is used to provide a template of the naming of these files. For instance, the 4D nifti diffusion image is stored in the FSL course data as data.nii.gz. """ infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") infosource.iterables = ('subject_id', subject_list) info = dict(dwi=[['subject_id', 'data']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) """ A datasource node is used to perform the actual data grabbing. Templates for the associated images are used to obtain the correct images. The data are assumed to lie in data_dir/subject_id/. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" datasource.inputs.base_directory = data_dir datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') datasource.inputs.template_args = info datasource.inputs.base_directory = data_dir datasource.inputs.sort_filelist = True """ FreeSurferSource nodes are used to retrieve a number of image files that were automatically generated by the recon-all process. Here we use three of these nodes, two of which are defined to return files for solely the left and right hemispheres. """ FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') FreeSurferSource.inputs.subjects_dir = subjects_dir FreeSurferSourceLH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceLH') FreeSurferSourceLH.inputs.subjects_dir = subjects_dir FreeSurferSourceLH.inputs.hemi = 'lh' FreeSurferSourceRH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceRH') FreeSurferSourceRH.inputs.subjects_dir = subjects_dir FreeSurferSourceRH.inputs.hemi = 'rh' """ Since the b values and b vectors come from the FSL course, we must convert it to a scheme file for use in Camino. """ fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") fsl2scheme.inputs.usegradmod = True """ FSL's Brain Extraction tool is used to create a mask from the b0 image """ b0Strip = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0') """ FSL's FLIRT function is used to coregister the b0 mask and the structural image. A convert_xfm node is then used to obtain the inverse of the transformation matrix. FLIRT is used once again to apply the inverse transformation to the parcellated brain image. """ coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister') coregister.inputs.cost = ('corratio') convertxfm = pe.Node(interface=fsl.ConvertXFM(), name = 'convertxfm') convertxfm.inputs.invert_xfm = True inverse = pe.Node(interface=fsl.FLIRT(), name = 'inverse') inverse.inputs.interp = ('nearestneighbour') inverse_AparcAseg = pe.Node(interface=fsl.FLIRT(), name = 'inverse_AparcAseg') inverse_AparcAseg.inputs.interp = ('nearestneighbour') """ A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. Nodes are used to convert the following: * Original structural image to NIFTI * Parcellated white matter image to NIFTI * Parcellated whole-brain image to NIFTI * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI """ mri_convert_Brain = pe.Node(interface=fs.MRIConvert(), name='mri_convert_Brain') mri_convert_Brain.inputs.out_type = 'nii' mri_convert_WMParc = mri_convert_Brain.clone('mri_convert_WMParc') mri_convert_AparcAseg = mri_convert_Brain.clone('mri_convert_AparcAseg') mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') mris_convertLH.inputs.out_datatype = 'gii' mris_convertRH = mris_convertLH.clone('mris_convertRH') mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') """ An inputnode is used to pass the data obtained by the data grabber to the actual processing functions """ inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals", "subject_id"]), name="inputnode") """ In this section we create the nodes necessary for diffusion analysis. First, the diffusion image is converted to voxel order, since this is the format in which Camino does its processing. """ image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") """ Second, diffusion tensors are fit to the voxel-order data. If desired, these tensors can be converted to a Nifti tensor image using the DT2NIfTI interface. """ dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit') """ Next, a lookup table is generated from the schemefile and the signal-to-noise ratio (SNR) of the unweighted (q=0) data. """ dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") dtlutgen.inputs.snr = 16.0 dtlutgen.inputs.inversion = 1 """ In this tutorial we implement probabilistic tractography using the PICo algorithm. PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel; this probabilitiy distribution map is produced using the following node. """ picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") picopdfs.inputs.inputmodel = 'dt' """ Finally, tractography is performed. In this tutorial, we will use only one iteration for time-saving purposes. It is important to note that we use the TrackPICo interface here. This interface now expects the files required for PICo tracking (i.e. the output from picopdfs). Similar interfaces exist for alternative types of tracking, such as Bayesian tracking with Dirac priors (TrackBayesDirac). """ track = pe.Node(interface=camino.TrackPICo(), name="track") track.inputs.iterations = 1 """ Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. """ camino2trackvis = pe.Node(interface=cam2trk.Camino2Trackvis(), name="camino2trk") camino2trackvis.inputs.min_length = 30 camino2trackvis.inputs.voxel_order = 'LAS' trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino") """ Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes. """ vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines") procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines") procstreamlines.inputs.outputtracts = 'oogl' """ We can easily produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers, and then merge them back into a single .nii file. """ fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa') trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace') dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') analyzeheader_fa = pe.Node(interface=camino.AnalyzeHeader(),name='analyzeheader_fa') analyzeheader_fa.inputs.datatype = 'double' analyzeheader_trace = pe.Node(interface=camino.AnalyzeHeader(),name='analyzeheader_trace') analyzeheader_trace.inputs.datatype = 'double' fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii') trace2nii = fa2nii.clone("trace2nii") """ This section adds the Connectome Mapping Toolkit (CMTK) nodes. These interfaces are fairly experimental and may not function properly. In order to perform connectivity mapping using CMTK, the parcellated structural data is rewritten using the indices and parcellation scheme from the connectome mapper (CMP). This process has been written into the ROIGen interface, which will output a remapped aparc+aseg image as well as a dictionary of label information (i.e. name, display colours) pertaining to the original and remapped regions. These label values are input from a user-input lookup table, if specified, and otherwise the default Freesurfer LUT (/freesurfer/FreeSurferColorLUT.txt). """ roigen = pe.Node(interface=cmtk.ROIGen(), name="ROIGen") cmp_config = cmp.configuration.PipelineConfiguration(parcellation_scheme = "NativeFreesurfer") cmp_config.parcellation_scheme = "NativeFreesurfer" roigen.inputs.LUT_file = cmp_config.get_freeview_lut("NativeFreesurfer")['freesurferaparc'] roigen_structspace = roigen.clone('ROIGen_structspace') """ The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts and outputs a number of different files. The most important of which is the connectivity network itself, which is stored as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the specific tracts that connect between user-selected regions. """ creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") creatematrix.inputs.count_region_intersections = True createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes") createnodes.inputs.resolution_network_file = cmp_config.parcellation['freesurferaparc']['node_information_graphml'] """ Here we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. """ CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") gpickledNetworks = pe.Node(interface=util.Merge(1), name="NetworkFiles") """ Since we have now created all our nodes, we can define our workflow and start making connections. """ mapping = pe.Workflow(name='mapping') """ First, we connect the input node to the early conversion functions. FreeSurfer input nodes: """ mapping.connect([(inputnode, FreeSurferSource,[("subject_id","subject_id")])]) mapping.connect([(inputnode, FreeSurferSourceLH,[("subject_id","subject_id")])]) mapping.connect([(inputnode, FreeSurferSourceRH,[("subject_id","subject_id")])]) """ Required conversions for processing in Camino: """ mapping.connect([(inputnode, image2voxel, [("dwi", "in_file")]), (inputnode, fsl2scheme, [("bvecs", "bvec_file"), ("bvals", "bval_file")]), (image2voxel, dtifit,[['voxel_order','in_file']]), (fsl2scheme, dtifit,[['scheme','scheme_file']]) ]) """ Nifti conversions for the parcellated white matter image (used in Camino's conmap), and the subject's stripped brain image from Freesurfer: """ mapping.connect([(FreeSurferSource, mri_convert_WMParc,[('wmparc','in_file')])]) mapping.connect([(FreeSurferSource, mri_convert_Brain,[('brain','in_file')])]) """ Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) """ mapping.connect([(FreeSurferSourceLH, mris_convertLH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere,[('sphere','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere,[('sphere','in_file')])]) """ The annotation files are converted using the pial surface as a map via the MRIsConvert interface. One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files specifically (rather than i.e. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. """ mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) """ This section coregisters the diffusion-weighted and parcellated white-matter / whole brain images. At present the conmap node connection is left commented, as there have been recent changes in Camino code that have presented some users with errors. """ mapping.connect([(inputnode, b0Strip,[('dwi','in_file')])]) mapping.connect([(b0Strip, coregister,[('out_file','in_file')])]) mapping.connect([(mri_convert_Brain, coregister,[('out_file','reference')])]) mapping.connect([(coregister, convertxfm,[('out_matrix_file','in_file')])]) mapping.connect([(b0Strip, inverse,[('out_file','reference')])]) mapping.connect([(convertxfm, inverse,[('out_file','in_matrix_file')])]) mapping.connect([(mri_convert_WMParc, inverse,[('out_file','in_file')])]) """ The tractography pipeline consists of the following nodes. Further information about the tractography can be found in nipype/examples/dmri_camino_dti.py. """ mapping.connect([(b0Strip, track,[("mask_file","seed_file")])]) mapping.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])]) mapping.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])]) mapping.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])]) mapping.connect([(picopdfs, track,[("pdfs","in_file")])]) """ Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the tensor fitting. This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable. """ mapping.connect([(dtifit, fa,[("tensor_fitted","in_file")])]) mapping.connect([(fa, analyzeheader_fa,[("fa","in_file")])]) mapping.connect([(inputnode, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) mapping.connect([(fa, fa2nii,[('fa','data_file')])]) mapping.connect([(inputnode, fa2nii,[(('dwi', get_affine), 'affine')])]) mapping.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])]) mapping.connect([(dtifit, trace,[("tensor_fitted","in_file")])]) mapping.connect([(trace, analyzeheader_trace,[("trace","in_file")])]) mapping.connect([(inputnode, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) mapping.connect([(trace, trace2nii,[('trace','data_file')])]) mapping.connect([(inputnode, trace2nii,[(('dwi', get_affine), 'affine')])]) mapping.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])]) mapping.connect([(dtifit, dteig,[("tensor_fitted","in_file")])]) """ The output tracts are converted to Trackvis format (and back). Here we also use the voxel- and data-grabbing functions defined at the beginning of the pipeline. """ mapping.connect([(track, camino2trackvis, [('tracked','in_file')]), (track, vtkstreamlines,[['tracked','in_file']]), (camino2trackvis, trk2camino,[['trackvis','in_file']]) ]) mapping.connect([(inputnode, camino2trackvis,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) """ Here the CMTK connectivity mapping nodes are connected. The original aparc+aseg image is converted to NIFTI, then registered to the diffusion image and delivered to the ROIGen node. The remapped parcellation, original tracts, and label file are then given to CreateMatrix. """ mapping.connect(createnodes, 'node_network', creatematrix, 'resolution_network_file') mapping.connect([(FreeSurferSource, mri_convert_AparcAseg, [(('aparc_aseg', select_aparc), 'in_file')])]) mapping.connect([(b0Strip, inverse_AparcAseg,[('out_file','reference')])]) mapping.connect([(convertxfm, inverse_AparcAseg,[('out_file','in_matrix_file')])]) mapping.connect([(mri_convert_AparcAseg, inverse_AparcAseg,[('out_file','in_file')])]) mapping.connect([(mri_convert_AparcAseg, roigen_structspace,[('out_file','aparc_aseg_file')])]) mapping.connect([(roigen_structspace, createnodes,[("roi_file","roi_file")])]) mapping.connect([(inverse_AparcAseg, roigen,[("out_file","aparc_aseg_file")])]) mapping.connect([(roigen, creatematrix,[("roi_file","roi_file")])]) mapping.connect([(camino2trackvis, creatematrix,[("trackvis","tract_file")])]) mapping.connect([(inputnode, creatematrix,[("subject_id","out_matrix_file")])]) mapping.connect([(inputnode, creatematrix,[("subject_id","out_matrix_mat_file")])]) """ The merge nodes defined earlier are used here to create lists of the files which are destined for the CFFConverter. """ mapping.connect([(creatematrix, gpickledNetworks,[("matrix_files","in1")])]) mapping.connect([(mris_convertLH, giftiSurfaces,[("converted","in1")])]) mapping.connect([(mris_convertRH, giftiSurfaces,[("converted","in2")])]) mapping.connect([(mris_convertLHwhite, giftiSurfaces,[("converted","in3")])]) mapping.connect([(mris_convertRHwhite, giftiSurfaces,[("converted","in4")])]) mapping.connect([(mris_convertLHinflated, giftiSurfaces,[("converted","in5")])]) mapping.connect([(mris_convertRHinflated, giftiSurfaces,[("converted","in6")])]) mapping.connect([(mris_convertLHsphere, giftiSurfaces,[("converted","in7")])]) mapping.connect([(mris_convertRHsphere, giftiSurfaces,[("converted","in8")])]) mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])]) mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])]) mapping.connect([(roigen, niftiVolumes,[("roi_file","in1")])]) mapping.connect([(inputnode, niftiVolumes,[("dwi","in2")])]) mapping.connect([(mri_convert_Brain, niftiVolumes,[("out_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file","in1")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file_mm","in2")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_length_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_label_file","in4")])]) """ This block actually connects the merged lists to the CFF converter. We pass the surfaces and volumes that are to be included, as well as the tracts and the network itself. The currently running pipeline (dmri_connectivity.py) is also scraped and included in the CFF file. This makes it easy for the user to examine the entire processing pathway used to generate the end product. """ CFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe())) mapping.connect([(giftiSurfaces, CFFConverter,[("out","gifti_surfaces")])]) mapping.connect([(giftiLabels, CFFConverter,[("out","gifti_labels")])]) mapping.connect([(gpickledNetworks, CFFConverter,[("out","gpickled_networks")])]) mapping.connect([(niftiVolumes, CFFConverter,[("out","nifti_volumes")])]) mapping.connect([(fiberDataArrays, CFFConverter,[("out","data_files")])]) mapping.connect([(creatematrix, CFFConverter,[("filtered_tractographies","tract_files")])]) mapping.connect([(inputnode, CFFConverter,[("subject_id","title")])]) """ Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding their names to the subject list and their data to the proper folders. """ connectivity = pe.Workflow(name="connectivity") connectivity.base_dir = op.abspath('dmri_connectivity') connectivity.connect([ (infosource,datasource,[('subject_id', 'subject_id')]), (datasource,mapping,[('dwi','inputnode.dwi'), ('bvals','inputnode.bvals'), ('bvecs','inputnode.bvecs') ]), (infosource,mapping,[('subject_id','inputnode.subject_id')]) ]) """ The following functions run the whole workflow and produce graphs describing the processing pipeline. By default, write_graph outputs a .dot file and a .png image, but here we set it to output the image as a vector graphic, by passing the format='eps' argument. """ if __name__ == '__main__': connectivity.run() connectivity.write_graph(format='eps') """ The output CFF file of this pipeline can be loaded in the Connectome Viewer (http://www.cmtk.org) After loading the network into memory it can be examined in 3D or as a connectivity matrix using the default scripts produced by the Code Oracle. To compare networks, one must use the MergeCNetworks interface to merge two networks into a single CFF file. Statistics can then be run using the Network Brain Statistics (NBS) plugin Surfaces can also be loaded along with their labels from the aparc+aseg file. The tractography is included in the file so that region-to-region fibers can be individually plotted using the Code Oracle. """ nipype-0.9.2/examples/dmri_connectivity_advanced.py000077500000000000000000000636131227300005300226000ustar00rootroot00000000000000#!/usr/bin/env python """ ============================================= dMRI: Connectivity - MRtrix, CMTK, FreeSurfer ============================================= Introduction ============ This script, connectivity_tutorial_advanced.py, demonstrates the ability to perform connectivity mapping using Nipype for pipelining, Freesurfer for Reconstruction / Segmentation, MRtrix for spherical deconvolution and tractography, and the Connectome Mapping Toolkit (CMTK) for further parcellation and connectivity analysis. python connectivity_tutorial_advanced.py We perform this analysis using the FSL course data, which can be acquired from here: * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz This pipeline also requires the Freesurfer directory for 'subj1' from the FSL course data. To save time, this data can be downloaded from here: * http://dl.dropbox.com/u/315714/subj1.zip?dl=1 The result of this processing will be the connectome for subj1 as a Connectome File Format (CFF) File, using the Lausanne2008 parcellation scheme. A data package containing the outputs of this pipeline can be obtained from here: * http://db.tt/909Q3AC1 .. seealso:: connectivity_tutorial.py Original tutorial using Camino and the NativeFreesurfer Parcellation Scheme www.cmtk.org For more info about the parcellation scheme .. warning:: The ConnectomeMapper (https://github.com/LTS5/cmp or www.cmtk.org) must be installed for this tutorial to function! Packages and Data Setup ======================= Import necessary modules from nipype. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.fsl as fsl import nipype.interfaces.freesurfer as fs # freesurfer import nipype.interfaces.mrtrix as mrtrix import nipype.algorithms.misc as misc import nipype.interfaces.cmtk as cmtk import nipype.interfaces.dipy as dipy import inspect import os, os.path as op # system functions from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline from nipype.workflows.dmri.camino.connectivity_mapping import select_aparc_annot from nipype.utils.misc import package_check import warnings from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline from nipype.workflows.smri.freesurfer import create_tessellation_flow try: package_check('cmp') except Exception, e: warnings.warn('cmp not installed') else: import cmp """ This needs to point to the freesurfer subjects directory (Recon-all must have been run on subj1 from the FSL course data) Alternatively, the reconstructed subject data can be downloaded from: * http://dl.dropbox.com/u/315714/subj1.zip """ subjects_dir = op.abspath(op.join(op.curdir,'./subjects')) fs.FSCommand.set_default_subjects_dir(subjects_dir) fsl.FSLCommand.set_default_output_type('NIFTI') fs_dir = os.environ['FREESURFER_HOME'] lookup_file = op.join(fs_dir,'FreeSurferColorLUT.txt') """ This needs to point to the fdt folder you can find after extracting * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz """ data_dir = op.abspath(op.join(op.curdir,'exdata/')) subject_list = ['subj1'] """ Use infosource node to loop through the subject list and define the input files. For our purposes, these are the diffusion-weighted MR image, b vectors, and b values. """ infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") infosource.iterables = ('subject_id', subject_list) info = dict(dwi=[['subject_id', 'data']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) """ Use datasource node to perform the actual data grabbing. Templates for the associated images are used to obtain the correct images. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" datasource.inputs.base_directory = data_dir datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ The input node and Freesurfer sources declared here will be the main conduits for the raw data to the rest of the processing pipeline. """ inputnode = pe.Node(interface=util.IdentityInterface(fields=["subject_id","dwi", "bvecs", "bvals", "subjects_dir"]), name="inputnode") inputnode.inputs.subjects_dir = subjects_dir FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') FreeSurferSourceLH = FreeSurferSource.clone('fssourceLH') FreeSurferSourceLH.inputs.hemi = 'lh' FreeSurferSourceRH = FreeSurferSource.clone('fssourceRH') FreeSurferSourceRH.inputs.hemi = 'rh' """ Creating the workflow's nodes ============================= Conversion nodes ---------------- A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. Nodes are used to convert the following: * Original structural image to NIFTI * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI """ mri_convert_Brain = pe.Node(interface=fs.MRIConvert(), name='mri_convert_Brain') mri_convert_Brain.inputs.out_type = 'nii' mri_convert_ROI_scale500 = mri_convert_Brain.clone('mri_convert_ROI_scale500') mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') mris_convertLH.inputs.out_datatype = 'gii' mris_convertRH = mris_convertLH.clone('mris_convertRH') mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') """ Diffusion processing nodes -------------------------- .. seealso:: dmri_mrtrix_dti.py Tutorial that focuses solely on the MRtrix diffusion processing http://www.brain.org.au/software/mrtrix/index.html MRtrix's online documentation b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix. """ fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(),name='fsl2mrtrix') """ Distortions induced by eddy currents are corrected prior to fitting the tensors. The first image is used as a reference for which to warp the others. """ eddycorrect = create_eddy_correct_pipeline(name='eddycorrect') eddycorrect.inputs.inputnode.ref_num = 1 """ Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created: * Major eigenvector in each voxel * Apparent diffusion coefficient * Fractional anisotropy """ dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(),name='dwi2tensor') tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(),name='tensor2vector') tensor2adc = pe.Node(interface=mrtrix.Tensor2ApparentDiffusion(),name='tensor2adc') tensor2fa = pe.Node(interface=mrtrix.Tensor2FractionalAnisotropy(),name='tensor2fa') MRconvert_fa = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert_fa') MRconvert_fa.inputs.extension = 'nii' """ These nodes are used to create a rough brain mask from the b0 image. The b0 image is extracted from the original diffusion-weighted image, put through a simple thresholding routine, and smoothed using a 3x3 median filter. """ MRconvert = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert') MRconvert.inputs.extract_at_axis = 3 MRconvert.inputs.extract_at_coordinate = [0] threshold_b0 = pe.Node(interface=mrtrix.Threshold(),name='threshold_b0') median3d = pe.Node(interface=mrtrix.MedianFilter3D(),name='median3d') """ The brain mask is also used to help identify single-fiber voxels. This is done by passing the brain mask through two erosion steps, multiplying the remaining mask with the fractional anisotropy map, and thresholding the result to obtain some highly anisotropic within-brain voxels. """ erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_firstpass') erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_secondpass') MRmultiply = pe.Node(interface=mrtrix.MRMultiply(),name='MRmultiply') MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') threshold_FA = pe.Node(interface=mrtrix.Threshold(),name='threshold_FA') threshold_FA.inputs.absolute_threshold_value = 0.7 """ For whole-brain tracking we also require a broad white-matter seed mask. This is created by generating a white matter mask, given a brainmask, and thresholding it at a reasonably high level. """ bet = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0') gen_WM_mask = pe.Node(interface=mrtrix.GenerateWhiteMatterMask(),name='gen_WM_mask') threshold_wmmask = pe.Node(interface=mrtrix.Threshold(),name='threshold_wmmask') threshold_wmmask.inputs.absolute_threshold_value = 0.4 """ The spherical deconvolution step depends on the estimate of the response function in the highly anisotropic voxels we obtained above. .. warning:: For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps. """ estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),name='estimateresponse') estimateresponse.inputs.maximum_harmonic_order = 6 csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),name='csdeconv') csdeconv.inputs.maximum_harmonic_order = 6 """ Finally, we track probabilistically using the orientation distribution functions obtained earlier. The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format. """ probCSDstreamtrack = pe.Node(interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(),name='probCSDstreamtrack') probCSDstreamtrack.inputs.inputmodel = 'SD_PROB' probCSDstreamtrack.inputs.desired_number_of_tracks = 150000 tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(),name='tracks2prob') tracks2prob.inputs.colour = True MRconvert_tracks2prob = MRconvert_fa.clone(name='MRconvert_tracks2prob') tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(),name='tck2trk') trk2tdi = pe.Node(interface=dipy.TrackDensityMap(),name='trk2tdi') """ Structural segmentation nodes ----------------------------- The following node identifies the transformation between the diffusion-weighted image and the structural image. This transformation is then applied to the tracts so that they are in the same space as the regions of interest. """ coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister') coregister.inputs.cost = ('normmi') """ Parcellation is performed given the aparc+aseg image from Freesurfer. The CMTK Parcellation step subdivides these regions to return a higher-resolution parcellation scheme. The parcellation used here is entitled "scale500" and returns 1015 regions. """ parcellation_name = 'scale500' parcellate = pe.Node(interface=cmtk.Parcellate(), name="Parcellate") parcellate.inputs.parcellation_name = parcellation_name """ The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts and outputs a number of different files. The most important of which is the connectivity network itself, which is stored as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the specific tracts that connect between user-selected regions. Here we choose the Lausanne2008 parcellation scheme, since we are incorporating the CMTK parcellation step. """ parcellation_name = 'scale500' cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes") createnodes.inputs.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml'] creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") creatematrix.inputs.count_region_intersections = True """ Next we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. The inspect.getfile command is used to package this script into the resulting CFF file, so that it is easy to look back at the processing parameters that were used. """ CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") CFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe())) giftiSurfaces = pe.Node(interface=util.Merge(9), name="GiftiSurfaces") giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") gpickledNetworks = pe.Node(interface=util.Merge(2), name="NetworkFiles") """ We also create a workflow to calculate several network metrics on our resulting file, and another CFF converter which will be used to package these networks into a single file. """ networkx = create_networkx_pipeline(name='networkx') cmats_to_csv = create_cmats_to_csv_pipeline(name='cmats_to_csv') NxStatsCFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="NxStatsCFFConverter") NxStatsCFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe())) tessflow = create_tessellation_flow(name='tessflow', out_format='gii') tessflow.inputs.inputspec.lookup_file = lookup_file """ Connecting the workflow ======================= Here we connect our processing pipeline. Connecting the inputs, FreeSurfer nodes, and conversions -------------------------------------------------------- """ mapping = pe.Workflow(name='mapping') """ First, we connect the input node to the FreeSurfer input nodes. """ mapping.connect([(inputnode, FreeSurferSource,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode, FreeSurferSource,[("subject_id","subject_id")])]) mapping.connect([(inputnode, FreeSurferSourceLH,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode, FreeSurferSourceLH,[("subject_id","subject_id")])]) mapping.connect([(inputnode, FreeSurferSourceRH,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode, FreeSurferSourceRH,[("subject_id","subject_id")])]) mapping.connect([(inputnode, tessflow,[("subjects_dir","inputspec.subjects_dir")])]) mapping.connect([(inputnode, tessflow,[("subject_id","inputspec.subject_id")])]) mapping.connect([(inputnode, parcellate,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode, parcellate,[("subject_id","subject_id")])]) mapping.connect([(parcellate, mri_convert_ROI_scale500,[('roi_file','in_file')])]) """ Nifti conversion for subject's stripped brain image from Freesurfer: """ mapping.connect([(FreeSurferSource, mri_convert_Brain,[('brain','in_file')])]) """ Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) """ mapping.connect([(FreeSurferSourceLH, mris_convertLH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere,[('sphere','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere,[('sphere','in_file')])]) """ The annotation files are converted using the pial surface as a map via the MRIsConvert interface. One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files specifically (rather than e.g. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. """ mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) """ Diffusion Processing -------------------- Now we connect the tensor computations: """ mapping.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), ("bvals", "bval_file")])]) mapping.connect([(inputnode, eddycorrect,[("dwi","inputnode.in_file")])]) mapping.connect([(eddycorrect, dwi2tensor,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(fsl2mrtrix, dwi2tensor,[("encoding_file","encoding_file")])]) mapping.connect([(dwi2tensor, tensor2vector,[['tensor','in_file']]), (dwi2tensor, tensor2adc,[['tensor','in_file']]), (dwi2tensor, tensor2fa,[['tensor','in_file']]), ]) mapping.connect([(tensor2fa, MRmult_merge,[("FA","in1")])]) mapping.connect([(tensor2fa, MRconvert_fa,[("FA","in_file")])]) """ This block creates the rough brain mask to be multiplied, mulitplies it with the fractional anisotropy image, and thresholds it to get the single-fiber voxels. """ mapping.connect([(eddycorrect, MRconvert,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(MRconvert, threshold_b0,[("converted","in_file")])]) mapping.connect([(threshold_b0, median3d,[("out_file","in_file")])]) mapping.connect([(median3d, erode_mask_firstpass,[("out_file","in_file")])]) mapping.connect([(erode_mask_firstpass, erode_mask_secondpass,[("out_file","in_file")])]) mapping.connect([(erode_mask_secondpass, MRmult_merge,[("out_file","in2")])]) mapping.connect([(MRmult_merge, MRmultiply,[("out","in_files")])]) mapping.connect([(MRmultiply, threshold_FA,[("out_file","in_file")])]) """ Here the thresholded white matter mask is created for seeding the tractography. """ mapping.connect([(eddycorrect, bet,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(eddycorrect, gen_WM_mask,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(bet, gen_WM_mask,[("mask_file","binary_mask")])]) mapping.connect([(fsl2mrtrix, gen_WM_mask,[("encoding_file","encoding_file")])]) mapping.connect([(gen_WM_mask, threshold_wmmask,[("WMprobabilitymap","in_file")])]) """ Next we estimate the fiber response distribution. """ mapping.connect([(eddycorrect, estimateresponse,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(fsl2mrtrix, estimateresponse,[("encoding_file","encoding_file")])]) mapping.connect([(threshold_FA, estimateresponse,[("out_file","mask_image")])]) """ Run constrained spherical deconvolution. """ mapping.connect([(eddycorrect, csdeconv,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(gen_WM_mask, csdeconv,[("WMprobabilitymap","mask_image")])]) mapping.connect([(estimateresponse, csdeconv,[("response","response_file")])]) mapping.connect([(fsl2mrtrix, csdeconv,[("encoding_file","encoding_file")])]) """ Connect the tractography and compute the tract density image. """ mapping.connect([(threshold_wmmask, probCSDstreamtrack,[("out_file","seed_file")])]) mapping.connect([(csdeconv, probCSDstreamtrack,[("spherical_harmonics_image","in_file")])]) mapping.connect([(probCSDstreamtrack, tracks2prob,[("tracked","in_file")])]) mapping.connect([(eddycorrect, tracks2prob,[("outputnode.eddy_corrected","template_file")])]) mapping.connect([(tracks2prob, MRconvert_tracks2prob,[("tract_image","in_file")])]) """ Structural Processing --------------------- First, we coregister the diffusion image to the structural image """ mapping.connect([(eddycorrect, coregister,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(mri_convert_Brain, coregister,[('out_file','reference')])]) """ The MRtrix-tracked fibers are converted to TrackVis format (with voxel and data dimensions grabbed from the DWI). The connectivity matrix is created with the transformed .trk fibers and the parcellation file. """ mapping.connect([(eddycorrect, tck2trk,[("outputnode.eddy_corrected","image_file")])]) mapping.connect([(mri_convert_Brain, tck2trk,[("out_file","registration_image_file")])]) mapping.connect([(coregister, tck2trk,[("out_matrix_file","matrix_file")])]) mapping.connect([(probCSDstreamtrack, tck2trk,[("tracked","in_file")])]) mapping.connect([(tck2trk, creatematrix,[("out_file","tract_file")])]) mapping.connect([(tck2trk, trk2tdi,[("out_file","in_file")])]) mapping.connect([(inputnode, creatematrix,[("subject_id","out_matrix_file")])]) mapping.connect([(inputnode, creatematrix,[("subject_id","out_matrix_mat_file")])]) mapping.connect([(parcellate, creatematrix,[("roi_file","roi_file")])]) mapping.connect([(parcellate, createnodes,[("roi_file","roi_file")])]) mapping.connect([(createnodes, creatematrix,[("node_network","resolution_network_file")])]) """ The merge nodes defined earlier are used here to create lists of the files which are destined for the CFFConverter. """ mapping.connect([(mris_convertLH, giftiSurfaces,[("converted","in1")])]) mapping.connect([(mris_convertRH, giftiSurfaces,[("converted","in2")])]) mapping.connect([(mris_convertLHwhite, giftiSurfaces,[("converted","in3")])]) mapping.connect([(mris_convertRHwhite, giftiSurfaces,[("converted","in4")])]) mapping.connect([(mris_convertLHinflated, giftiSurfaces,[("converted","in5")])]) mapping.connect([(mris_convertRHinflated, giftiSurfaces,[("converted","in6")])]) mapping.connect([(mris_convertLHsphere, giftiSurfaces,[("converted","in7")])]) mapping.connect([(mris_convertRHsphere, giftiSurfaces,[("converted","in8")])]) mapping.connect([(tessflow, giftiSurfaces,[("outputspec.meshes","in9")])]) mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])]) mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])]) mapping.connect([(parcellate, niftiVolumes,[("roi_file","in1")])]) mapping.connect([(eddycorrect, niftiVolumes,[("outputnode.eddy_corrected","in2")])]) mapping.connect([(mri_convert_Brain, niftiVolumes,[("out_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file","in1")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file_mm","in2")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_length_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_label_file","in4")])]) """ This block actually connects the merged lists to the CFF converter. We pass the surfaces and volumes that are to be included, as well as the tracts and the network itself. The currently running pipeline (dmri_connectivity_advanced.py) is also scraped and included in the CFF file. This makes it easy for the user to examine the entire processing pathway used to generate the end product. """ mapping.connect([(giftiSurfaces, CFFConverter,[("out","gifti_surfaces")])]) mapping.connect([(giftiLabels, CFFConverter,[("out","gifti_labels")])]) mapping.connect([(creatematrix, CFFConverter,[("matrix_files","gpickled_networks")])]) mapping.connect([(niftiVolumes, CFFConverter,[("out","nifti_volumes")])]) mapping.connect([(fiberDataArrays, CFFConverter,[("out","data_files")])]) mapping.connect([(creatematrix, CFFConverter,[("filtered_tractographies","tract_files")])]) mapping.connect([(inputnode, CFFConverter,[("subject_id","title")])]) """ The graph theoretical metrics are computed using the networkx workflow and placed in another CFF file """ mapping.connect([(inputnode, networkx,[("subject_id","inputnode.extra_field")])]) mapping.connect([(creatematrix, networkx,[("intersection_matrix_file","inputnode.network_file")])]) mapping.connect([(networkx, NxStatsCFFConverter,[("outputnode.network_files","gpickled_networks")])]) mapping.connect([(giftiSurfaces, NxStatsCFFConverter,[("out","gifti_surfaces")])]) mapping.connect([(giftiLabels, NxStatsCFFConverter,[("out","gifti_labels")])]) mapping.connect([(niftiVolumes, NxStatsCFFConverter,[("out","nifti_volumes")])]) mapping.connect([(fiberDataArrays, NxStatsCFFConverter,[("out","data_files")])]) mapping.connect([(inputnode, NxStatsCFFConverter,[("subject_id","title")])]) mapping.connect([(inputnode, cmats_to_csv,[("subject_id","inputnode.extra_field")])]) mapping.connect([(creatematrix, cmats_to_csv,[("matlab_matrix_files","inputnode.matlab_matrix_files")])]) """ Create a higher-level workflow ------------------------------ Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes declared at the beginning. Our tutorial is now extensible to any arbitrary number of subjects by simply adding their names to the subject list and their data to the proper folders. """ connectivity = pe.Workflow(name="connectivity") connectivity.base_dir = op.abspath('dmri_connectivity_advanced') connectivity.connect([ (infosource,datasource,[('subject_id', 'subject_id')]), (datasource,mapping,[('dwi','inputnode.dwi'), ('bvals','inputnode.bvals'), ('bvecs','inputnode.bvecs') ]), (infosource,mapping,[('subject_id','inputnode.subject_id')]) ]) """ The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline. """ if __name__ == '__main__': connectivity.run() connectivity.write_graph() nipype-0.9.2/examples/dmri_dtk_dti.py000077500000000000000000000153511227300005300176530ustar00rootroot00000000000000#!/usr/bin/env python """ ================================== dMRI: DTI - Diffusion Toolkit, FSL ================================== A pipeline example that uses several interfaces to perform analysis on diffusion weighted images using Diffusion Toolkit tools. This tutorial is based on the 2010 FSL course and uses data freely available at the FSL website at: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz More details can be found at http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/fdt/index.htm In order to run this tutorial you need to have Diffusion Toolkit and FSL tools installed and accessible from matlab/command line. Check by calling fslinfo and dtk from the command line. Tell python where to find the appropriate functions. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.diffusion_toolkit as dtk import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import os # system functions from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline """ Confirm package dependencies are installed. (This is only for the tutorial, rarely would you put this in your own code.) """ from nipype.utils.misc import package_check package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') """ Setting up workflows -------------------- This is a generic workflow for DTI data analysis using the FSL Data specific components ------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``dwis1`` and ``dwis2``. Each subject directory contains each of the following files: bvec, bval, diffusion weighted data, a set of target masks, a seed file, and a transformation matrix. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``dwi`` or ``bvals``). These fields become the output fields of the ``datasource`` node in the pipeline. Specify the subject directories """ subject_list = ['subj1'] """ Map field names to individual subject runs """ info = dict(dwi=[['subject_id', 'data']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.engine.Node` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" # This needs to point to the fdt folder you can find after extracting # http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/') datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Setup for Diffusion Tensor Computation -------------------------------------- Here we will create a generic workflow for DTI computation """ computeTensor = pe.Workflow(name='computeTensor') """ extract the volume with b=0 (nodif_brain) """ fslroi = pe.Node(interface=fsl.ExtractROI(),name='fslroi') fslroi.inputs.t_min=0 fslroi.inputs.t_size=1 """ create a brain mask from the nodif_brain """ bet = pe.Node(interface=fsl.BET(),name='bet') bet.inputs.mask=True bet.inputs.frac=0.34 """ correct the diffusion weighted images for eddy_currents """ eddycorrect = create_eddy_correct_pipeline('eddycorrect') eddycorrect.inputs.inputnode.ref_num=0 """ compute the diffusion tensor in each voxel """ dtifit = pe.Node(interface=dtk.DTIRecon(),name='dtifit') """ connect all the nodes for this workflow """ computeTensor.connect([ (fslroi,bet,[('roi_file','in_file')]), (eddycorrect,dtifit,[('outputnode.eddy_corrected','DWI')]) ]) """ Setup for Tracktography ----------------------- Here we will create a workflow to enable deterministic tracktography """ tractography = pe.Workflow(name='tractography') dtk_tracker = pe.Node(interface=dtk.DTITracker(), name="dtk_tracker") dtk_tracker.inputs.invert_x = True smooth_trk = pe.Node(interface=dtk.SplineFilter(), name="smooth_trk") smooth_trk.inputs.step_length = 0.5 """ connect all the nodes for this workflow """ tractography.connect([ (dtk_tracker, smooth_trk, [('track_file', 'track_file')]) ]) """ Setup data storage area """ datasink = pe.Node(interface=nio.DataSink(),name='datasink') datasink.inputs.base_directory = os.path.abspath('dtiresults') def getstripdir(subject_id): return os.path.join(os.path.abspath('data/workingdir/dwiproc'),'_subject_id_%s' % subject_id) """ Setup the pipeline that combines the two workflows: tractography and computeTensor ---------------------------------------------------------------------------------- """ dwiproc = pe.Workflow(name="dwiproc") dwiproc.base_dir = os.path.abspath('dtk_dti_tutorial') dwiproc.connect([ (infosource,datasource,[('subject_id', 'subject_id')]), (datasource,computeTensor,[('dwi','fslroi.in_file'), ('bvals','dtifit.bvals'), ('bvecs','dtifit.bvecs'), ('dwi','eddycorrect.inputnode.in_file')]), (computeTensor,tractography,[('bet.mask_file','dtk_tracker.mask1_file'), ('dtifit.tensor','dtk_tracker.tensor_file') ]) ]) if __name__ == '__main__': dwiproc.run() dwiproc.write_graph() nipype-0.9.2/examples/dmri_dtk_odf.py000077500000000000000000000157111227300005300176430ustar00rootroot00000000000000#!/usr/bin/env python """ ==================================== dMRI: HARDI - Diffusion Toolkit, FSL ==================================== A pipeline example that uses several interfaces to perform analysis on diffusion weighted images using Diffusion Toolkit tools. This tutorial is based on the 2010 FSL course and uses data freely available at the FSL website at: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz More details can be found at http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/fdt/index.htm In order to run this tutorial you need to have Diffusion Toolkit and FSL tools installed and accessible from matlab/command line. Check by calling fslinfo and dtk from the command line. Tell python where to find the appropriate functions. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.diffusion_toolkit as dtk import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import os # system functions from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline """ Confirm package dependencies are installed. (This is only for the tutorial, rarely would you put this in your own code.) """ from nipype.utils.misc import package_check package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') """ Setting up workflows -------------------- This is a generic workflow for DTI data analysis using the FSL Data specific components ------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``dwis1`` and ``dwis2``. Each subject directory contains each of the following files: bvec, bval, diffusion weighted data, a set of target masks, a seed file, and a transformation matrix. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``dwi`` or ``bvals``). These fields become the output fields of the ``datasource`` node in the pipeline. Specify the subject directories """ subject_list = ['siemens_hardi_test'] """ Map field names to individual subject runs """ info = dict(dwi=[['subject_id', 'siemens_hardi_test_data']], bvecs=[['subject_id','siemens_hardi_test_data.bvec']], bvals=[['subject_id','siemens_hardi_test_data.bval']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.engine.Node` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" # This needs to point to the fdt folder you can find after extracting # http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz datasource.inputs.base_directory = os.path.abspath('data') datasource.inputs.field_template = dict(dwi='%s/%s.nii') datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Setup for ODF Computation -------------------------------------- Here we will create a generic workflow for ODF computation """ compute_ODF = pe.Workflow(name='compute_ODF') """ extract the volume with b=0 (nodif_brain) """ fslroi = pe.Node(interface=fsl.ExtractROI(),name='fslroi') fslroi.inputs.t_min=0 fslroi.inputs.t_size=1 """ create a brain mask from the nodif_brain """ bet = pe.Node(interface=fsl.BET(),name='bet') bet.inputs.mask=True bet.inputs.frac=0.34 """ correct the diffusion weighted images for eddy_currents """ eddycorrect = create_eddy_correct_pipeline('eddycorrect') eddycorrect.inputs.inputnode.ref_num=0 hardi_mat = pe.Node(interface=dtk.HARDIMat(),name='hardi_mat') odf_recon = pe.Node(interface=dtk.ODFRecon(),name='odf_recon') """ connect all the nodes for this workflow """ compute_ODF.connect([ (fslroi,bet,[('roi_file','in_file')]), (eddycorrect, odf_recon,[('outputnode.eddy_corrected','DWI')]), (eddycorrect, hardi_mat,[('outputnode.eddy_corrected','reference_file')]), (hardi_mat, odf_recon, [('out_file', 'matrix')]) ]) """ Setup for Tracktography ----------------------- Here we will create a workflow to enable deterministic tracktography """ tractography = pe.Workflow(name='tractography') odf_tracker = pe.Node(interface=dtk.ODFTracker(), name="odf_tracker") smooth_trk = pe.Node(interface=dtk.SplineFilter(), name="smooth_trk") smooth_trk.inputs.step_length = 1 """ connect all the nodes for this workflow """ tractography.connect([ (odf_tracker, smooth_trk, [('track_file', 'track_file')]) ]) """ Setup the pipeline that combines the two workflows: tractography and compute_ODF ---------------------------------------------------------------------------------- """ dwiproc = pe.Workflow(name="dwiproc") dwiproc.base_dir = os.path.abspath('dtk_odf_tutorial') dwiproc.connect([ (infosource,datasource,[('subject_id', 'subject_id')]), (datasource,compute_ODF,[('dwi','fslroi.in_file'), ('bvals','hardi_mat.bvals'), ('bvecs','hardi_mat.bvecs'), ('dwi','eddycorrect.inputnode.in_file')]), (compute_ODF,tractography,[('bet.mask_file','odf_tracker.mask1_file'), ('odf_recon.ODF','odf_tracker.ODF'), ('odf_recon.max','odf_tracker.max') ]) ]) dwiproc.inputs.compute_ODF.hardi_mat.oblique_correction = True dwiproc.inputs.compute_ODF.odf_recon.n_directions = 31 dwiproc.inputs.compute_ODF.odf_recon.n_b0 = 5 dwiproc.inputs.compute_ODF.odf_recon.n_output_directions = 181 if __name__ == '__main__': dwiproc.run() dwiproc.write_graph() nipype-0.9.2/examples/dmri_fsl_dti.py000077500000000000000000000225001227300005300176470ustar00rootroot00000000000000#!/usr/bin/env python """ =============== dMRI [DTI, FSL] =============== A pipeline example that uses several interfaces to perform analysis on diffusion weighted images using FSL FDT tools. This tutorial is based on the 2010 FSL course and uses data freely available at the FSL website at: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz More details can be found at http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/fdt/index.htm In order to run this tutorial you need to have fsl tools installed and accessible from matlab/command line. Check by calling fslinfo from the command line. Tell python where to find the appropriate functions. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import os # system functions from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline,\ create_bedpostx_pipeline """ Confirm package dependencies are installed. (This is only for the tutorial, rarely would you put this in your own code.) """ from nipype.utils.misc import package_check package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') """ Setting up workflows -------------------- This is a generic workflow for DTI data analysis using the FSL Data specific components ------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``dwis1`` and ``dwis2``. Each subject directory contains each of the following files: bvec, bval, diffusion weighted data, a set of target masks, a seed file, and a transformation matrix. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``dwi`` or ``bvals``). These fields become the output fields of the ``datasource`` node in the pipeline. Specify the subject directories """ subject_list = ['subj1'] """ Map field names to individual subject runs """ info = dict(dwi=[['subject_id', 'data']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']], seed_file = [['subject_id','MASK_average_thal_right']], target_masks = [['subject_id',['MASK_average_M1_right', 'MASK_average_S1_right', 'MASK_average_occipital_right', 'MASK_average_pfc_right', 'MASK_average_pmc_right', 'MASK_average_ppc_right', 'MASK_average_temporal_right']]]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """ Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.engine.Node` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" # This needs to point to the fdt folder you can find after extracting # http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/') datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz', seed_file="%s.bedpostX/%s.nii.gz", target_masks="%s.bedpostX/%s.nii.gz") datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Setup for Diffusion Tensor Computation -------------------------------------- Here we will create a generic workflow for DTI computation """ computeTensor = pe.Workflow(name='computeTensor') """ extract the volume with b=0 (nodif_brain) """ fslroi = pe.Node(interface=fsl.ExtractROI(),name='fslroi') fslroi.inputs.t_min=0 fslroi.inputs.t_size=1 """ create a brain mask from the nodif_brain """ bet = pe.Node(interface=fsl.BET(),name='bet') bet.inputs.mask=True bet.inputs.frac=0.34 """ correct the diffusion weighted images for eddy_currents """ eddycorrect = create_eddy_correct_pipeline('eddycorrect') eddycorrect.inputs.inputnode.ref_num=0 """ compute the diffusion tensor in each voxel """ dtifit = pe.Node(interface=fsl.DTIFit(),name='dtifit') """ connect all the nodes for this workflow """ computeTensor.connect([ (fslroi,bet,[('roi_file','in_file')]), (eddycorrect, dtifit,[('outputnode.eddy_corrected','dwi')]), (infosource, dtifit,[['subject_id','base_name']]), (bet,dtifit,[('mask_file','mask')]) ]) """ Setup for Tracktography ----------------------- Here we will create a workflow to enable probabilistic tracktography and hard segmentation of the seed region """ tractography = pe.Workflow(name='tractography') tractography.base_dir = os.path.abspath('fsl_dti_tutorial') """ estimate the diffusion parameters: phi, theta, and so on """ bedpostx = create_bedpostx_pipeline() bedpostx.get_node("xfibres").iterables = ("n_fibres",[1,2]) flirt = pe.Node(interface=fsl.FLIRT(), name='flirt') flirt.inputs.in_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz') flirt.inputs.dof = 12 """ perform probabilistic tracktography """ probtrackx = pe.Node(interface=fsl.ProbTrackX(),name='probtrackx') probtrackx.inputs.mode='seedmask' probtrackx.inputs.c_thresh = 0.2 probtrackx.inputs.n_steps=2000 probtrackx.inputs.step_length=0.5 probtrackx.inputs.n_samples=5000 probtrackx.inputs.opd=True probtrackx.inputs.os2t=True probtrackx.inputs.loop_check=True """ perform hard segmentation on the output of probtrackx """ findthebiggest = pe.Node(interface=fsl.FindTheBiggest(),name='findthebiggest') """ connect all the nodes for this workflow """ tractography.add_nodes([bedpostx, flirt]) tractography.connect([(bedpostx,probtrackx,[('outputnode.thsamples','thsamples'), ('outputnode.phsamples','phsamples'), ('outputnode.fsamples','fsamples') ]), (probtrackx,findthebiggest,[('targets','in_files')]), (flirt, probtrackx, [('out_matrix_file','xfm')]) ]) """ Setup data storage area """ datasink = pe.Node(interface=nio.DataSink(),name='datasink') datasink.inputs.base_directory = os.path.abspath('dtiresults') def getstripdir(subject_id): import os return os.path.join(os.path.abspath('data/workingdir/dwiproc'),'_subject_id_%s' % subject_id) """ Setup the pipeline that combines the two workflows: tractography and computeTensor ---------------------------------------------------------------------------------- """ dwiproc = pe.Workflow(name="dwiproc") dwiproc.base_dir = os.path.abspath('fsl_dti_tutorial') dwiproc.connect([ (infosource,datasource,[('subject_id', 'subject_id')]), (datasource,computeTensor,[('dwi','fslroi.in_file'), ('bvals','dtifit.bvals'), ('bvecs','dtifit.bvecs'), ('dwi','eddycorrect.inputnode.in_file')]), (datasource,tractography,[('bvals','bedpostx.inputnode.bvals'), ('bvecs','bedpostx.inputnode.bvecs'), ('seed_file','probtrackx.seed'), ('target_masks','probtrackx.target_masks') ]), (computeTensor,tractography,[('eddycorrect.outputnode.eddy_corrected','bedpostx.inputnode.dwi'), ('bet.mask_file','bedpostx.inputnode.mask'), ('bet.mask_file','probtrackx.mask'), ('fslroi.roi_file','flirt.reference')]), (infosource, datasink,[('subject_id','container'), (('subject_id', getstripdir),'strip_dir')]), (tractography,datasink,[('findthebiggest.out_file','fbiggest.@biggestsegmentation')]) ]) if __name__ == '__main__': dwiproc.run() dwiproc.write_graph() nipype-0.9.2/examples/dmri_group_connectivity_camino.py000066400000000000000000000144301227300005300235030ustar00rootroot00000000000000""" ================================================== dMRI: Group connectivity - Camino, FSL, FreeSurfer ================================================== Introduction ============ This script, dmri_group_connectivity_camino.py, runs group-based connectivity analysis using the dmri.camino.connectivity_mapping Nipype workflow. Further detail on the processing can be found in :doc:`dmri_connectivity`. This tutorial can be run using: python dmri_group_connectivity_camino.py We perform this analysis using one healthy subject and two subjects who suffer from Parkinson's disease. The whole package (960 mb as .tar.gz / 1.3 gb uncompressed) including the Freesurfer directories for these subjects, can be acquired from here: * http://db.tt/b6F1t0QV A data package containing the outputs of this pipeline can be obtained from here: * http://db.tt/kNvAI751 Along with Camino, Camino-Trackvis, FSL, and Freesurfer, you must also have the Connectome File Format library installed as well as the Connectome Mapper. * Camino: http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php?n=Main.HomePage * Camino-Trackvis: http://www.nitrc.org/projects/camino-trackvis/ * FSL: http://www.fmrib.ox.ac.uk/fsl/ * Freesurfer: http://surfer.nmr.mgh.harvard.edu/ * CTMK: http://www.cmtk.org/ * CFF: sudo apt-get install python-cfflib Or on github at: * CFFlib: https://github.com/LTS5/cfflib * CMP: https://github.com/LTS5/cmp Output data can be visualized in ConnectomeViewer, TrackVis, and anything that can view Nifti files. * ConnectomeViewer: https://github.com/LTS5/connectomeviewer * TrackVis: http://trackvis.org/ The fiber data is available in Numpy arrays, and the connectivity matrix is also produced as a MATLAB matrix. Import the workflows -------------------- First, we import the necessary modules from nipype. """ import nipype.interfaces.fsl as fsl import nipype.interfaces.freesurfer as fs # freesurfer import os.path as op # system functions import cmp from nipype.workflows.dmri.camino.group_connectivity import create_group_connectivity_pipeline from nipype.workflows.dmri.connectivity.group_connectivity import (create_merge_networks_by_group_workflow, create_merge_group_networks_workflow, create_average_networks_by_group_workflow) """ Set the proper directories -------------------------- First, we import the necessary modules from nipype. """ fs_dir = op.abspath('/usr/local/freesurfer') subjects_dir = op.abspath('groupcondatapackage/subjects/') data_dir = op.abspath('groupcondatapackage/data/') fs.FSCommand.set_default_subjects_dir(subjects_dir) fsl.FSLCommand.set_default_output_type('NIFTI') """ Define the groups ----------------- Here we define the groups for this study. We would like to search for differences between the healthy subject and the two vegetative patients. The group list is defined as a Python dictionary (see http://docs.python.org/tutorial/datastructures.html), with group IDs ('controls', 'parkinsons') as keys, and subject/patient names as values. We set the main output directory as 'groupcon'. """ group_list = {} group_list['controls'] = ['cont17'] group_list['parkinsons'] = ['pat10', 'pat20'] """ The output directory must be named as well. """ global output_dir output_dir = op.abspath('dmri_group_connectivity_camino') """ Main processing loop ==================== The title for the final grouped-network connectome file is dependent on the group names. The resulting file for this example is 'parkinsons-controls.cff'. The following code implements the format a-b-c-...x.cff for an arbitary number of groups. .. warning:: The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'. The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme. This line creates the processing workflow given the information input about the groups and subjects. .. seealso:: * nipype/workflows/dmri/mrtrix/group_connectivity.py * nipype/workflows/dmri/camino/connectivity_mapping.py * :doc:`dmri_connectivity` The purpose of the second-level workflow is simple: It is used to merge each subject's CFF file into one, so that there is a single file containing all of the networks for each group. This can be useful for performing Network Brain Statistics using the NBS plugin in ConnectomeViewer. .. seealso:: http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html """ title = '' for idx, group_id in enumerate(group_list.keys()): title += group_id if not idx == len(group_list.keys()) - 1: title += '-' info = dict(dwi=[['subject_id', 'dti']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info) # Here we define the parcellation scheme and the number of tracks to produce parcellation_scheme = 'NativeFreesurfer' cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = parcellation_scheme l1pipeline.inputs.connectivity.inputnode.resolution_network_file = cmp_config._get_lausanne_parcellation(parcellation_scheme)['freesurferaparc']['node_information_graphml'] l1pipeline.run() l1pipeline.write_graph(format='eps', graph2use='flat') # The second-level pipeline is created here l2pipeline = create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) l2pipeline.run() l2pipeline.write_graph(format='eps', graph2use='flat') """ Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects. It is also convenient to have every subject in a single CFF file, so that is what the third-level pipeline does. """ l3pipeline = create_merge_group_networks_workflow(group_list, data_dir, subjects_dir, output_dir, title) l3pipeline.run() l3pipeline.write_graph(format='eps', graph2use='flat') """ The fourth and final workflow averages the networks and saves them in another CFF file """ l4pipeline = create_average_networks_by_group_workflow(group_list, data_dir, subjects_dir, output_dir, title) l4pipeline.run() l4pipeline.write_graph(format='eps', graph2use='flat') nipype-0.9.2/examples/dmri_group_connectivity_mrtrix.py000066400000000000000000000165101227300005300235630ustar00rootroot00000000000000""" ================================================== dMRI: Group connectivity - MRtrix, FSL, FreeSurfer ================================================== Introduction ============ This script, dmri_group_connectivity_mrtrix.py, runs group-based connectivity analysis using the dmri.mrtrix.connectivity_mapping Nipype workflow. Further detail on the processing can be found in :doc:`dmri_connectivity_advanced`. This tutorial can be run using: python dmri_group_connectivity_mrtrix.py We perform this analysis using one healthy subject and two subjects who suffer from Parkinson's disease. The whole package (960 mb as .tar.gz / 1.3 gb uncompressed) including the Freesurfer directories for these subjects, can be acquired from here: * http://db.tt/b6F1t0QV A data package containing the outputs of this pipeline can be obtained from here: * http://db.tt/elmMnIt1 Along with MRtrix, FSL, and Freesurfer, you must also have the Connectome File Format library installed as well as the Connectome Mapper (cmp). * MRtrix: http://www.brain.org.au/software/mrtrix/ * FSL: http://www.fmrib.ox.ac.uk/fsl/ * Freesurfer: http://surfer.nmr.mgh.harvard.edu/ * CTMK: http://www.cmtk.org/ * CFF: sudo apt-get install python-cfflib Or on github at: * CFFlib: https://github.com/LTS5/cfflib * CMP: https://github.com/LTS5/cmp Output data can be visualized in ConnectomeViewer, TrackVis, Gephi, the MRtrix Viewer (mrview), and anything that can view Nifti files. * ConnectomeViewer: https://github.com/LTS5/connectomeviewer * TrackVis: http://trackvis.org/ * Gephi: http://gephi.org/ The fiber data is available in Numpy arrays, and the connectivity matrix is also produced as a MATLAB matrix. Import the workflows -------------------- First, we import the necessary modules from nipype. """ import nipype.interfaces.fsl as fsl import nipype.interfaces.freesurfer as fs # freesurfer import os.path as op # system functions import cmp from nipype.workflows.dmri.mrtrix.group_connectivity import create_group_connectivity_pipeline from nipype.workflows.dmri.connectivity.group_connectivity import (create_merge_network_results_by_group_workflow, create_merge_group_network_results_workflow, create_average_networks_by_group_workflow) """ Set the proper directories -------------------------- First, we import the necessary modules from nipype. """ subjects_dir = op.abspath('groupcondatapackage/subjects/') data_dir = op.abspath('groupcondatapackage/data/') fs.FSCommand.set_default_subjects_dir(subjects_dir) fsl.FSLCommand.set_default_output_type('NIFTI') """ Define the groups ----------------- Here we define the groups for this study. We would like to search for differences between the healthy subject and the two vegetative patients. The group list is defined as a Python dictionary (see http://docs.python.org/tutorial/datastructures.html), with group IDs ('controls', 'parkinsons') as keys, and subject/patient names as values. We set the main output directory as 'groupcon'. """ group_list = {} group_list['controls'] = ['cont17'] group_list['parkinsons'] = ['pat10', 'pat20'] """ The output directory must be named as well. """ global output_dir output_dir = op.abspath('dmri_group_connectivity_mrtrix') """ Main processing loop ==================== The title for the final grouped-network connectome file is dependent on the group names. The resulting file for this example is 'parkinsons-controls.cff'. The following code implements the format a-b-c-...x.cff for an arbitary number of groups. .. warning:: The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dti'. The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme. The workflow is created given the information input about the groups and subjects. .. seealso:: * nipype/workflows/dmri/mrtrix/group_connectivity.py * nipype/workflows/dmri/mrtrix/connectivity_mapping.py * :doc:`dmri_connectivity_advanced` We set values for absolute threshold used on the fractional anisotropy map. This is done in order to identify single-fiber voxels. In brains with more damage, however, it may be necessary to reduce the threshold, since their brains are have lower average fractional anisotropy values. We invert the b-vectors in the encoding file, and set the maximum harmonic order of the pre-tractography spherical deconvolution step. This is done to show how to set inputs that will affect both groups. Next we create and run the second-level pipeline. The purpose of this workflow is simple: It is used to merge each subject's CFF file into one, so that there is a single file containing all of the networks for each group. This can be useful for performing Network Brain Statistics using the NBS plugin in ConnectomeViewer. .. seealso:: http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html """ title = '' for idx, group_id in enumerate(group_list.keys()): title += group_id if not idx == len(group_list.keys()) - 1: title += '-' info = dict(dwi=[['subject_id', 'dti']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info) # Here with invert the b-vectors in the Y direction and set the maximum harmonic order of the # spherical deconvolution step l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True l1pipeline.inputs.connectivity.mapping.csdeconv.maximum_harmonic_order = 6 # Here we define the parcellation scheme and the number of tracks to produce parcellation_name = 'scale500' l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml'] l1pipeline.inputs.connectivity.mapping.probCSDstreamtrack.desired_number_of_tracks = 100000 l1pipeline.run() l1pipeline.write_graph(format='eps', graph2use='flat') # The second-level pipeline is created here l2pipeline = create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) l2pipeline.inputs.l2inputnode.network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml'] l2pipeline.run() l2pipeline.write_graph(format='eps', graph2use='flat') """ Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects. It is also convenient to have every subject in a single CFF file, so that is what the third-level pipeline does. """ l3pipeline = create_merge_group_network_results_workflow(group_list, data_dir, subjects_dir, output_dir, title) l3pipeline.run() l3pipeline.write_graph(format='eps', graph2use='flat') """ The fourth and final workflow averages the networks and saves them in another CFF file """ l4pipeline = create_average_networks_by_group_workflow(group_list, data_dir, subjects_dir, output_dir, title) l4pipeline.run() l4pipeline.write_graph(format='eps', graph2use='flat') nipype-0.9.2/examples/dmri_mrtrix_dti.py000077500000000000000000000243401227300005300204140ustar00rootroot00000000000000#!/usr/bin/env python """ ======================= dMRI: DTI - MRtrix, FSL ======================= Introduction ============ This script, dmri_mrtrix_dti.py, demonstrates the ability to perform advanced diffusion analysis in a Nipype pipeline. python dmri_mrtrix_dti.py We perform this analysis using the FSL course data, which can be acquired from here: * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz Import necessary modules from nipype. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.mrtrix as mrtrix #<---- The important new part! import nipype.interfaces.fsl as fsl import nipype.algorithms.misc as misc import os, os.path as op # system functions fsl.FSLCommand.set_default_output_type('NIFTI') """ This needs to point to the fdt folder you can find after extracting * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz """ data_dir = op.abspath(op.join(op.curdir,'exdata/')) subject_list = ['subj1'] """ Use infosource node to loop through the subject list and define the input files. For our purposes, these are the diffusion-weighted MR image, b vectors, and b values. """ infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") infosource.iterables = ('subject_id', subject_list) info = dict(dwi=[['subject_id', 'data']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) """ Use datasource node to perform the actual data grabbing. Templates for the associated images are used to obtain the correct images. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" datasource.inputs.base_directory = data_dir datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ An inputnode is used to pass the data obtained by the data grabber to the actual processing functions """ inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode") """ Diffusion processing nodes -------------------------- .. seealso:: dmri_connectivity_advanced.py Tutorial with further detail on using MRtrix tractography for connectivity analysis http://www.brain.org.au/software/mrtrix/index.html MRtrix's online documentation b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix. """ fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(),name='fsl2mrtrix') """ Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created: * Major eigenvector in each voxel * Apparent diffusion coefficient * Fractional anisotropy """ gunzip = pe.Node(interface=misc.Gunzip(), name='gunzip') dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(),name='dwi2tensor') tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(),name='tensor2vector') tensor2adc = pe.Node(interface=mrtrix.Tensor2ApparentDiffusion(),name='tensor2adc') tensor2fa = pe.Node(interface=mrtrix.Tensor2FractionalAnisotropy(),name='tensor2fa') """ These nodes are used to create a rough brain mask from the b0 image. The b0 image is extracted from the original diffusion-weighted image, put through a simple thresholding routine, and smoothed using a 3x3 median filter. """ MRconvert = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert') MRconvert.inputs.extract_at_axis = 3 MRconvert.inputs.extract_at_coordinate = [0] threshold_b0 = pe.Node(interface=mrtrix.Threshold(),name='threshold_b0') median3d = pe.Node(interface=mrtrix.MedianFilter3D(),name='median3d') """ The brain mask is also used to help identify single-fiber voxels. This is done by passing the brain mask through two erosion steps, multiplying the remaining mask with the fractional anisotropy map, and thresholding the result to obtain some highly anisotropic within-brain voxels. """ erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_firstpass') erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_secondpass') MRmultiply = pe.Node(interface=mrtrix.MRMultiply(),name='MRmultiply') MRmult_merge = pe.Node(interface=util.Merge(2), name="MRmultiply_merge") threshold_FA = pe.Node(interface=mrtrix.Threshold(),name='threshold_FA') threshold_FA.inputs.absolute_threshold_value = 0.7 """ For whole-brain tracking we also require a broad white-matter seed mask. This is created by generating a white matter mask, given a brainmask, and thresholding it at a reasonably high level. """ bet = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0') gen_WM_mask = pe.Node(interface=mrtrix.GenerateWhiteMatterMask(),name='gen_WM_mask') threshold_wmmask = pe.Node(interface=mrtrix.Threshold(),name='threshold_wmmask') threshold_wmmask.inputs.absolute_threshold_value = 0.4 """ The spherical deconvolution step depends on the estimate of the response function in the highly anisotropic voxels we obtained above. .. warning:: For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps. """ estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),name='estimateresponse') estimateresponse.inputs.maximum_harmonic_order = 6 csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),name='csdeconv') csdeconv.inputs.maximum_harmonic_order = 6 """ Finally, we track probabilistically using the orientation distribution functions obtained earlier. The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format. """ probCSDstreamtrack = pe.Node(interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(),name='probCSDstreamtrack') probCSDstreamtrack.inputs.inputmodel = 'SD_PROB' probCSDstreamtrack.inputs.maximum_number_of_tracks = 150000 tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(),name='tracks2prob') tracks2prob.inputs.colour = True tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(),name='tck2trk') """ Creating the workflow --------------------- In this section we connect the nodes for the diffusion processing. """ tractography = pe.Workflow(name='tractography') tractography.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), ("bvals", "bval_file")])]) tractography.connect([(inputnode, gunzip,[("dwi","in_file")])]) tractography.connect([(gunzip, dwi2tensor,[("out_file","in_file")])]) tractography.connect([(fsl2mrtrix, dwi2tensor,[("encoding_file","encoding_file")])]) tractography.connect([(dwi2tensor, tensor2vector,[['tensor','in_file']]), (dwi2tensor, tensor2adc,[['tensor','in_file']]), (dwi2tensor, tensor2fa,[['tensor','in_file']]), ]) tractography.connect([(tensor2fa, MRmult_merge,[("FA","in1")])]) """ This block creates the rough brain mask to be multiplied, mulitplies it with the fractional anisotropy image, and thresholds it to get the single-fiber voxels. """ tractography.connect([(gunzip, MRconvert,[("out_file","in_file")])]) tractography.connect([(MRconvert, threshold_b0,[("converted","in_file")])]) tractography.connect([(threshold_b0, median3d,[("out_file","in_file")])]) tractography.connect([(median3d, erode_mask_firstpass,[("out_file","in_file")])]) tractography.connect([(erode_mask_firstpass, erode_mask_secondpass,[("out_file","in_file")])]) tractography.connect([(erode_mask_secondpass, MRmult_merge,[("out_file","in2")])]) tractography.connect([(MRmult_merge, MRmultiply,[("out","in_files")])]) tractography.connect([(MRmultiply, threshold_FA,[("out_file","in_file")])]) """ Here the thresholded white matter mask is created for seeding the tractography. """ tractography.connect([(gunzip, bet,[("out_file","in_file")])]) tractography.connect([(gunzip, gen_WM_mask,[("out_file","in_file")])]) tractography.connect([(bet, gen_WM_mask,[("mask_file","binary_mask")])]) tractography.connect([(fsl2mrtrix, gen_WM_mask,[("encoding_file","encoding_file")])]) tractography.connect([(gen_WM_mask, threshold_wmmask,[("WMprobabilitymap","in_file")])]) """ Next we estimate the fiber response distribution. """ tractography.connect([(gunzip, estimateresponse,[("out_file","in_file")])]) tractography.connect([(fsl2mrtrix, estimateresponse,[("encoding_file","encoding_file")])]) tractography.connect([(threshold_FA, estimateresponse,[("out_file","mask_image")])]) """ Run constrained spherical deconvolution. """ tractography.connect([(gunzip, csdeconv,[("out_file","in_file")])]) tractography.connect([(gen_WM_mask, csdeconv,[("WMprobabilitymap","mask_image")])]) tractography.connect([(estimateresponse, csdeconv,[("response","response_file")])]) tractography.connect([(fsl2mrtrix, csdeconv,[("encoding_file","encoding_file")])]) """ Connect the tractography and compute the tract density image. """ tractography.connect([(threshold_wmmask, probCSDstreamtrack,[("out_file","seed_file")])]) tractography.connect([(csdeconv, probCSDstreamtrack,[("spherical_harmonics_image","in_file")])]) tractography.connect([(probCSDstreamtrack, tracks2prob,[("tracked","in_file")])]) tractography.connect([(gunzip, tracks2prob,[("out_file","template_file")])]) tractography.connect([(gunzip, tck2trk,[("out_file","image_file")])]) tractography.connect([(probCSDstreamtrack, tck2trk,[("tracked","in_file")])]) """ Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes declared at the beginning. Our tutorial is now extensible to any arbitrary number of subjects by simply adding their names to the subject list and their data to the proper folders. """ dwiproc = pe.Workflow(name="dwiproc") dwiproc.base_dir = os.path.abspath('dmri_mrtrix_dti') dwiproc.connect([ (infosource,datasource,[('subject_id', 'subject_id')]), (datasource,tractography,[('dwi','inputnode.dwi'), ('bvals','inputnode.bvals'), ('bvecs','inputnode.bvecs') ]) ]) if __name__ == '__main__': dwiproc.run() dwiproc.write_graph() nipype-0.9.2/examples/dmri_tbss_nki.py000077500000000000000000000115041227300005300200410ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ========================= dMRI: TBSS on NKI RS data ========================= A pipeline to do a TBSS analysis on the NKI rockland sample data """ from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline from nipype.workflows.dmri.fsl.tbss import create_tbss_non_FA, create_tbss_all """ Tell python where to find the appropriate functions. """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import os # system functions fsl.FSLCommand.set_default_output_type('NIFTI') """ You can get the data from: http://fcon_1000.projects.nitrc.org/indi/pro/eNKI_RS_TRT/FrontPage.html """ dataDir = os.path.abspath('nki_rs_data') workingdir = './tbss_example' subjects_list = ['2475376', '3313349', '3808535', '3893245', '8735778', '9630905'] gen_fa = pe.Workflow(name="gen_fa") gen_fa.base_dir = os.path.join(os.path.abspath(workingdir), 'l1') subject_id_infosource = pe.Node(util.IdentityInterface(fields=['subject_id']), name='subject_id_infosource') subject_id_infosource.iterables = ('subject_id', subjects_list) datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['dwi', 'bvec', 'bval']), name='datasource') datasource.inputs.base_directory = os.path.abspath(dataDir) datasource.inputs.template = '%s/session2/DTI_mx_137/dti.%s' datasource.inputs.template_args = dict(dwi=[['subject_id', 'nii.gz']], bvec=[['subject_id', 'bvec']], bval=[['subject_id', 'bval']]) datasource.inputs.sort_filelist = True gen_fa.connect(subject_id_infosource, 'subject_id', datasource, 'subject_id') eddy_correct = create_eddy_correct_pipeline() eddy_correct.inputs.inputnode.ref_num = 0 gen_fa.connect(datasource, 'dwi', eddy_correct, 'inputnode.in_file') bet = pe.Node(interface=fsl.BET(), name='bet') bet.inputs.mask = True bet.inputs.frac = 0.34 gen_fa.connect(eddy_correct, 'pick_ref.out', bet, 'in_file') dtifit = pe.Node(interface=fsl.DTIFit(), name='dtifit') gen_fa.connect(eddy_correct, 'outputnode.eddy_corrected', dtifit, 'dwi') gen_fa.connect(subject_id_infosource, 'subject_id', dtifit, 'base_name') gen_fa.connect(bet, 'mask_file', dtifit, 'mask') gen_fa.connect(datasource, 'bvec', dtifit, 'bvecs') gen_fa.connect(datasource, 'bval', dtifit, 'bvals') datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.join(os.path.abspath(workingdir), 'l1_results') datasink.inputs.parameterization = False gen_fa.connect(dtifit, 'FA', datasink, 'FA') gen_fa.connect(dtifit, 'MD', datasink, 'MD') if __name__ == '__main__': gen_fa.write_graph() gen_fa.run() """ Here we get the FA list including all the subjects. """ tbss_source = pe.Node(interface=nio.DataGrabber(outfiles=['fa_list', 'md_list']), name='tbss_source') tbss_source.inputs.base_directory = datasink.inputs.base_directory tbss_source.inputs.template = '%s/%s_%s.nii' tbss_source.inputs.template_args = dict(fa_list=[['FA', subjects_list, 'FA']], md_list=[['MD', subjects_list, 'MD']]) tbss_source.inputs.sort_filelist = True """ TBSS analysis """ tbss_all = create_tbss_all() tbss_all.inputs.inputnode.skeleton_thresh = 0.2 tbssproc = pe.Workflow(name="tbssproc") tbssproc.base_dir = os.path.join(os.path.abspath(workingdir), 'l2') tbssproc.connect(tbss_source, 'fa_list', tbss_all, 'inputnode.fa_list') tbss_MD = create_tbss_non_FA(name='tbss_MD') tbss_MD.inputs.inputnode.skeleton_thresh = tbss_all.inputs.inputnode.skeleton_thresh tbssproc.connect([(tbss_all, tbss_MD, [('tbss2.outputnode.field_list', 'inputnode.field_list'), ('tbss3.outputnode.groupmask', 'inputnode.groupmask'), ('tbss3.outputnode.meanfa_file', 'inputnode.meanfa_file'), ('tbss4.outputnode.distance_map', 'inputnode.distance_map')]), (tbss_source, tbss_MD, [('md_list', 'inputnode.file_list')]), ]) if __name__ == '__main__': tbssproc.write_graph() tbssproc.run() nipype-0.9.2/examples/fmri_freesurfer_smooth.py000077500000000000000000000544441227300005300220020ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ====================================== fMRI: surface smooth - FreeSurfer, SPM ====================================== This tutorial illustrates how to perform surface-based smoothing of cortical data using FreeSurfer_ and then perform firstlevel model and contrast estimation using SPM_. A surface-based second level glm illustrates the use of spherical registration and freesurfer's glm functions. Preparing environment ===================== Step 0 ------ In order to run this tutorial you need to have SPM_ and FreeSurfer_ tools installed and accessible from matlab/command line. Check by calling mri_info from the command line. Step 1 ------ Link the *fsaverage* directory for your freesurfer distribution. To do this type: :: cd nipype-tutorial/fsdata ln -s $FREESURFER_HOME/subjects/fsaverage cd .. Defining the workflow ===================== """ import os # system functions import nipype.algorithms.modelgen as model # model generation import nipype.algorithms.rapidart as ra # artifact detection import nipype.interfaces.freesurfer as fs # freesurfer import nipype.interfaces.io as nio # i/o routines import nipype.interfaces.matlab as mlab # how to run matlab import nipype.interfaces.spm as spm # spm import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine """ Preliminaries ------------- Set any package specific configuration. Setting the subjects directory and the appropriate matlab command to use. if you want to use a different spm version/path, it should also be entered here. These are currently being set at the class level, so every node will inherit these settings. However, these can also be changed or set for an individual node. """ # Tell freesurfer what subjects directory to use subjects_dir = os.path.abspath('fsdata') fs.FSCommand.set_default_subjects_dir(subjects_dir) # Set the way matlab should be called mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") # If SPM is not in your MATLAB path you should add it here mlab.MatlabCommand.set_default_paths('/software/spm8') """ Setup preprocessing workflow ---------------------------- """ preproc = pe.Workflow(name='preproc') """ Use :class:`nipype.interfaces.spm.Realign` for motion correction and register all images to the mean image. """ realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True """ Use :class:`nipype.algorithms.rapidart` to determine which of the images in the functional series are outliers based on deviations in intensity or movement. """ art = pe.Node(interface=ra.ArtifactDetect(), name="art") art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'SPM' """ Use :class:`nipype.interfaces.freesurfer.BBRegister` to coregister the mean functional image generated by realign to the subjects' surfaces. """ surfregister = pe.Node(interface=fs.BBRegister(),name='surfregister') surfregister.inputs.init = 'fsl' surfregister.inputs.contrast_type = 't2' """ Use :class:`nipype.interfaces.io.FreeSurferSource` to retrieve various image files that are automatically generated by the recon-all process. """ FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') """ Use :class:`nipype.interfaces.freesurfer.ApplyVolTransform` to convert the brainmask generated by freesurfer into the realigned functional space. """ ApplyVolTransform = pe.Node(interface=fs.ApplyVolTransform(), name='applyreg') ApplyVolTransform.inputs.inverse = True """ Use :class:`nipype.interfaces.freesurfer.Binarize` to extract a binary brain mask. """ Threshold = pe.Node(interface=fs.Binarize(),name='threshold') Threshold.inputs.min = 10 Threshold.inputs.out_type = 'nii' """ Two different types of functional data smoothing are performed in this workflow. The volume smoothing option performs a standard SPM smoothin. using :class:`nipype.interfaces.spm.Smooth`. In addition, we use a smoothing routine from freesurfer (:class:`nipype.interfaces.freesurfer.Binarize`) to project the functional data from the volume to the subjects' surface, smooth it on the surface and fit it back into the volume forming the cortical ribbon. The projection uses the average value along a "cortical column". In addition to the surface smoothing, the rest of the volume is smoothed with a 3d gaussian kernel. .. note:: It is very important to note that the projection to the surface takes a 3d manifold to a 2d manifold. Hence the reverse projection, simply fills the thickness of cortex with the smoothed data. The smoothing is not performed in a depth specific manner. The output of this branch should only be used for surface-based analysis and visualization. """ volsmooth = pe.Node(interface=spm.Smooth(), name = "volsmooth") surfsmooth = pe.MapNode(interface=fs.Smooth(proj_frac_avg=(0,1,0.1)), name = "surfsmooth", iterfield=['in_file']) """ We connect up the different nodes to implement the preprocessing workflow. """ preproc.connect([(realign, surfregister,[('mean_image', 'source_file')]), (FreeSurferSource, ApplyVolTransform,[('brainmask','target_file')]), (surfregister, ApplyVolTransform,[('out_reg_file','reg_file')]), (realign, ApplyVolTransform,[('mean_image', 'source_file')]), (ApplyVolTransform, Threshold,[('transformed_file','in_file')]), (realign, art,[('realignment_parameters','realignment_parameters'), ('realigned_files','realigned_files')]), (Threshold, art, [('binary_file', 'mask_file')]), (realign, volsmooth, [('realigned_files', 'in_files')]), (realign, surfsmooth, [('realigned_files', 'in_file')]), (surfregister, surfsmooth, [('out_reg_file','reg_file')]), ]) """ Set up volume analysis workflow ------------------------------- """ volanalysis = pe.Workflow(name='volanalysis') """ Generate SPM-specific design information using :class:`nipype.interfaces.spm.SpecifyModel`. """ modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec") modelspec.inputs.concatenate_runs = True """ Generate a first level SPM.mat file for analysis :class:`nipype.interfaces.spm.Level1Design`. """ level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} """ Use :class:`nipype.interfaces.spm.EstimateModel` to determine the parameters of the model. """ level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} """ Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the first level contrasts specified in a few steps above. """ contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") volanalysis.connect([(modelspec,level1design,[('session_info','session_info')]), (level1design,level1estimate,[('spm_mat_file','spm_mat_file')]), (level1estimate,contrastestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), ]) """ Set up surface analysis workflow -------------------------------- We simply clone the volume analysis workflow. """ surfanalysis = volanalysis.clone(name='surfanalysis') """ Set up volume normalization workflow ------------------------------------ The volume analysis is performed in individual space. Therefore, post analysis we normalize the contrast images to MNI space. """ volnorm = pe.Workflow(name='volnormconimages') """ Use :class:`nipype.interfaces.freesurfer.MRIConvert` to convert the brainmask, an mgz file and the contrast images (nifti-1 img/hdr pairs), to single volume nifti images. """ convert = pe.Node(interface=fs.MRIConvert(out_type='nii'),name='convert2nii') convert2 = pe.MapNode(interface=fs.MRIConvert(out_type='nii'), iterfield=['in_file'], name='convertimg2nii') """ Use :class:`nipype.interfaces.spm.Segment` to segment the structural image and generate the transformation file to MNI space. .. note:: Segment takes longer than usual because the nose is wrapped behind the head in the structural image. """ segment = pe.Node(interface=spm.Segment(), name='segment') """ Use :class:`nipype.interfaces.freesurfer.ApplyVolTransform` to convert contrast images into freesurfer space. """ normwreg = pe.MapNode(interface=fs.ApplyVolTransform(), iterfield=['source_file'], name='applyreg2con') """ Use :class:`nipype.interfaces.spm.Normalize` to normalize the contrast images to MNI space """ normalize = pe.Node(interface=spm.Normalize(jobtype='write'), name='norm2mni') """ Connect up the volume normalization components """ volnorm.connect([(convert, segment, [('out_file','data')]), (convert2, normwreg, [('out_file', 'source_file')]), (segment, normalize, [('transformation_mat', 'parameter_file')]), (normwreg, normalize, [('transformed_file','apply_to_files')]), ]) """ Preproc + Analysis + VolumeNormalization workflow ------------------------------------------------- Connect up the lower level workflows into an integrated analysis. In addition, we add an input node that specifies all the inputs needed for this workflow. Thus, one can import this workflow and connect it to their own data sources. An example with the nifti-tutorial data is provided below. For this workflow the only necessary inputs are the functional images, a freesurfer subject id corresponding to recon-all processed data, the session information for the functional runs and the contrasts to be evaluated. """ inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'subject_id', 'session_info', 'contrasts']), name='inputnode') """ Connect the components into an integrated workflow. """ l1pipeline = pe.Workflow(name='firstlevel') l1pipeline.connect([(inputnode,preproc,[('func','realign.in_files'), ('subject_id','surfregister.subject_id'), ('subject_id','fssource.subject_id'), ]), (inputnode, volanalysis,[('session_info','modelspec.subject_info'), ('contrasts','contrastestimate.contrasts')]), (inputnode, surfanalysis,[('session_info','modelspec.subject_info'), ('contrasts','contrastestimate.contrasts')]), ]) # attach volume and surface model specification and estimation components l1pipeline.connect([(preproc, volanalysis, [('realign.realignment_parameters', 'modelspec.realignment_parameters'), ('volsmooth.smoothed_files', 'modelspec.functional_runs'), ('art.outlier_files', 'modelspec.outlier_files'), ('threshold.binary_file', 'level1design.mask_image')]), (preproc, surfanalysis, [('realign.realignment_parameters', 'modelspec.realignment_parameters'), ('surfsmooth.smoothed_file', 'modelspec.functional_runs'), ('art.outlier_files', 'modelspec.outlier_files'), ('threshold.binary_file', 'level1design.mask_image')]) ]) # attach volume contrast normalization components l1pipeline.connect([(preproc, volnorm, [('fssource.orig','convert2nii.in_file'), ('surfregister.out_reg_file','applyreg2con.reg_file'), ('fssource.orig','applyreg2con.target_file')]), (volanalysis, volnorm, [('contrastestimate.con_images', 'convertimg2nii.in_file'), ]) ]) """ Data specific components ------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1', 's3'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Set preprocessing parameters ---------------------------- """ l1pipeline.inputs.preproc.fssource.subjects_dir = subjects_dir l1pipeline.inputs.preproc.volsmooth.fwhm = 4 l1pipeline.inputs.preproc.surfsmooth.surface_fwhm = 5 l1pipeline.inputs.preproc.surfsmooth.vol_fwhm = 4 """ Experimental paradigm specific components ----------------------------------------- Here we create a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. """ def subjectinfo(subject_id): from nipype.interfaces.base import Bunch from copy import deepcopy print "Subject ID: %s\n"%str(subject_id) output = [] names = ['Task-Odd','Task-Even'] for r in range(4): onsets = [range(15,240,60),range(45,240,60)] output.insert(r, Bunch(conditions=names, onsets=deepcopy(onsets), durations=[[15] for s in names], )) return output """Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrasts = [cont1,cont2] """ Set up node specific inputs --------------------------- We replicate the modelspec parameters separately for the surface- and volume-based analysis. """ modelspecref = l1pipeline.inputs.volanalysis.modelspec modelspecref.input_units = 'secs' modelspecref.time_repetition = 3. modelspecref.high_pass_filter_cutoff = 120 modelspecref = l1pipeline.inputs.surfanalysis.modelspec modelspecref.input_units = 'secs' modelspecref.time_repetition = 3. modelspecref.high_pass_filter_cutoff = 120 l1designref = l1pipeline.inputs.volanalysis.level1design l1designref.timing_units = modelspecref.output_units l1designref.interscan_interval = modelspecref.time_repetition l1designref = l1pipeline.inputs.surfanalysis.level1design l1designref.timing_units = modelspecref.output_units l1designref.interscan_interval = modelspecref.time_repetition l1pipeline.inputs.inputnode.contrasts = contrasts """ Setup the pipeline ------------------ The nodes created above do not describe the flow of data. They merely describe the parameters used for each function. In this section we setup the connections between the nodes such that appropriate outputs from nodes are piped into appropriate inputs of other nodes. Use the :class:`nipype.pipeline.engine.Workfow` to create a graph-based execution pipeline for first level analysis. """ level1 = pe.Workflow(name="level1") level1.base_dir = os.path.abspath('volsurf_tutorial/workingdir') level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource,l1pipeline,[('func','inputnode.func')]), (infosource,l1pipeline,[('subject_id','inputnode.subject_id'), (('subject_id', subjectinfo), 'inputnode.session_info')]), ]) """ Store the output ---------------- Create a datasink node to store the contrast images and registration info """ datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('volsurf_tutorial/l1out') datasink.inputs.substitutions = [] def getsubs(subject_id): subs = [('_subject_id_%s/'%subject_id,'')] return subs # store relevant outputs from various stages of the 1st level analysis level1.connect([(infosource, datasink,[('subject_id','container'), (('subject_id', getsubs), 'substitutions') ]), (l1pipeline, datasink,[('surfanalysis.contrastestimate.con_images','contrasts'), ('preproc.surfregister.out_reg_file','registrations'), ]) ]) """ Run the analysis pipeline and also create a dot+png (if graphviz is available) that visually represents the workflow. """ if __name__ == '__main__': level1.run() level1.write_graph(graph2use='flat') """ Level2 surface-based pipeline ----------------------------- Create a level2 workflow """ l2flow = pe.Workflow(name='l2out') l2flow.base_dir = os.path.abspath('volsurf_tutorial') """ Setup a dummy node to iterate over contrasts and hemispheres """ l2inputnode = pe.Node(interface=util.IdentityInterface(fields=['contrasts', 'hemi']), name='inputnode') l2inputnode.iterables = [('contrasts', range(1,len(contrasts)+1)), ('hemi', ['lh','rh'])] """ Use a datagrabber node to collect contrast images and registration files """ l2source = pe.Node(interface=nio.DataGrabber(infields=['con_id'], outfields=['con','reg']), name='l2source') l2source.inputs.base_directory = os.path.abspath('volsurf_tutorial/l1out') l2source.inputs.template = '*' l2source.inputs.field_template = dict(con='*/contrasts/con_%04d.img', reg='*/registrations/*.dat') l2source.inputs.template_args = dict(con=[['con_id']],reg=[[]]) l2source.inputs.sort_filelist = True l2flow.connect(l2inputnode, 'contrasts', l2source, 'con_id') """ Merge contrast images and registration files """ mergenode = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') def ordersubjects(files, subj_list): outlist = [] for s in subj_list: for f in files: if '/%s/'%s in f: outlist.append(f) continue print outlist return outlist l2flow.connect(l2source,('con', ordersubjects, subject_list), mergenode, 'in1') l2flow.connect(l2source,('reg', ordersubjects, subject_list), mergenode, 'in2') """ Concatenate contrast images projected to fsaverage """ l2concat = pe.Node(interface=fs.MRISPreproc(), name='concat') l2concat.inputs.target = 'fsaverage' l2concat.inputs.fwhm = 5 def list2tuple(listoflist): return [tuple(x) for x in listoflist] l2flow.connect(l2inputnode, 'hemi', l2concat, 'hemi') l2flow.connect(mergenode, ('out', list2tuple), l2concat, 'vol_measure_file') """ Perform a one sample t-test """ l2ttest = pe.Node(interface=fs.OneSampleTTest(), name='onesample') l2flow.connect(l2concat, 'out_file', l2ttest, 'in_file') """ Run the analysis pipeline and also create a dot+png (if graphviz is available) that visually represents the workflow. """ if __name__ == '__main__': l2flow.run() l2flow.write_graph(graph2use='flat') nipype-0.9.2/examples/fmri_fsl.py000077500000000000000000000525221227300005300170200ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ========= fMRI: FSL ========= A workflow that uses fsl to perform a first level analysis on the nipype tutorial data set:: python fmri_fsl.py First tell python where to find the appropriate functions. """ import os # system functions import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model generation import nipype.algorithms.rapidart as ra # artifact detection """ Preliminaries ------------- Setup any package specific configuration. The output file format for FSL routines is being set to compressed NIFTI. """ fsl.FSLCommand.set_default_output_type('NIFTI_GZ') """ Setting up workflows -------------------- In this tutorial we will be setting up a hierarchical workflow for fsl analysis. This will demonstrate how pre-defined workflows can be setup and shared across users, projects and labs. Setup preprocessing workflow ---------------------------- This is a generic fsl feat preprocessing workflow encompassing skull stripping, motion correction and smoothing operations. """ preproc = pe.Workflow(name='preproc') """ Set up a node to define all inputs required for the preprocessing workflow """ inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'struct',]), name='inputspec') """ Convert functional images to float representation. Since there can be more than one functional run we use a MapNode to convert each run. """ img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float', op_string = '', suffix='_dtype'), iterfield=['in_file'], name='img2float') preproc.connect(inputnode, 'func', img2float, 'in_file') """ Extract the middle volume of the first run as the reference """ extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), name = 'extractref') """ Define a function to pick the first file from a list of files """ def pickfirst(files): if isinstance(files, list): return files[0] else: return files preproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file') """ Define a function to return the 1 based index of the middle volume """ def getmiddlevolume(func): from nibabel import load funcfile = func if isinstance(func, list): funcfile = func[0] _,_,_,timepoints = load(funcfile).get_shape() return (timepoints/2)-1 preproc.connect(inputnode, ('func', getmiddlevolume), extract_ref, 't_min') """ Realign the functional runs to the middle volume of the first run """ motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True, save_plots = True), name='realign', iterfield = ['in_file']) preproc.connect(img2float, 'out_file', motion_correct, 'in_file') preproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') """ Plot the estimated motion parameters """ plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'), name='plot_motion', iterfield=['in_file']) plot_motion.iterables = ('plot_type', ['rotations', 'translations']) preproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') """ Extract the mean volume of the first functional run """ meanfunc = pe.Node(interface=fsl.ImageMaths(op_string = '-Tmean', suffix='_mean'), name='meanfunc') preproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file') """ Strip the skull from the mean functional to generate a mask """ meanfuncmask = pe.Node(interface=fsl.BET(mask = True, no_output=True, frac = 0.3), name = 'meanfuncmask') preproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') """ Mask the functional runs with the extracted mask """ maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), iterfield=['in_file'], name = 'maskfunc') preproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') preproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') """ Determine the 2nd and 98th percentile intensities of each functional run """ getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'), iterfield = ['in_file'], name='getthreshold') preproc.connect(maskfunc, 'out_file', getthresh, 'in_file') """ Threshold the first run of the functional data at 10% of the 98th percentile """ threshold = pe.Node(interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), name='threshold') preproc.connect(maskfunc, ('out_file', pickfirst), threshold, 'in_file') """ Define a function to get 10% of the intensity """ def getthreshop(thresh): return '-thr %.10f -Tmin -bin'%(0.1*thresh[0][1]) preproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string') """ Determine the median value of the functional runs using the mask """ medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'), iterfield = ['in_file'], name='medianval') preproc.connect(motion_correct, 'out_file', medianval, 'in_file') preproc.connect(threshold, 'out_file', medianval, 'mask_file') """ Dilate the mask """ dilatemask = pe.Node(interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), name='dilatemask') preproc.connect(threshold, 'out_file', dilatemask, 'in_file') """ Mask the motion corrected functional runs with the dilated mask """ maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file'], name='maskfunc2') preproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') preproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') """ Determine the mean image from each functional run """ meanfunc2 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc2') preproc.connect(maskfunc2, 'out_file', meanfunc2, 'in_file') """ Merge the median values with the mean functional images into a coupled list """ mergenode = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') preproc.connect(meanfunc2,'out_file', mergenode, 'in1') preproc.connect(medianval,'out_stat', mergenode, 'in2') """ Smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask constituting the mean functional """ smooth = pe.MapNode(interface=fsl.SUSAN(), iterfield=['in_file', 'brightness_threshold','usans'], name='smooth') """ Define a function to get the brightness threshold for SUSAN """ def getbtthresh(medianvals): return [0.75*val for val in medianvals] def getusans(x): return [[tuple([val[0],0.75*val[1]])] for val in x] preproc.connect(maskfunc2, 'out_file', smooth, 'in_file') preproc.connect(medianval, ('out_stat', getbtthresh), smooth, 'brightness_threshold') preproc.connect(mergenode, ('out', getusans), smooth, 'usans') """ Mask the smoothed data with the dilated mask """ maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file'], name='maskfunc3') preproc.connect(smooth, 'smoothed_file', maskfunc3, 'in_file') preproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') """ Scale each volume of the run so that the median value of the run is set to 10000 """ intnorm = pe.MapNode(interface=fsl.ImageMaths(suffix='_intnorm'), iterfield=['in_file','op_string'], name='intnorm') preproc.connect(maskfunc3, 'out_file', intnorm, 'in_file') """ Define a function to get the scaling factor for intensity normalization """ def getinormscale(medianvals): return ['-mul %.10f'%(10000./val) for val in medianvals] preproc.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string') """ Perform temporal highpass filtering on the data """ highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'), iterfield=['in_file'], name='highpass') preproc.connect(intnorm, 'out_file', highpass, 'in_file') """ Generate a mean functional image from the first run """ meanfunc3 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc3') preproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file') """ Strip the structural image and coregister the mean functional image to the structural image """ nosestrip = pe.Node(interface=fsl.BET(frac=0.3), name = 'nosestrip') skullstrip = pe.Node(interface=fsl.BET(mask = True), name = 'stripstruct') coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister') """ Use :class:`nipype.algorithms.rapidart` to determine which of the images in the functional series are outliers based on deviations in intensity and/or movement. """ art = pe.MapNode(interface=ra.ArtifactDetect(use_differences = [True, False], use_norm = True, norm_threshold = 1, zintensity_threshold = 3, parameter_source = 'FSL', mask_type = 'file'), iterfield=['realigned_files', 'realignment_parameters'], name="art") preproc.connect([(inputnode, nosestrip,[('struct','in_file')]), (nosestrip, skullstrip, [('out_file','in_file')]), (skullstrip, coregister,[('out_file','in_file')]), (meanfunc2, coregister,[(('out_file',pickfirst),'reference')]), (motion_correct, art, [('par_file','realignment_parameters')]), (maskfunc2, art, [('out_file','realigned_files')]), (dilatemask, art, [('out_file', 'mask_file')]), ]) """ Set up model fitting workflow ----------------------------- """ modelfit = pe.Workflow(name='modelfit') """ Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information. """ modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") """ Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf file for analysis """ level1design = pe.Node(interface=fsl.Level1Design(), name="level1design") """ Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat file for use by FILMGLS """ modelgen = pe.MapNode(interface=fsl.FEATModel(), name='modelgen', iterfield = ['fsf_file', 'ev_files']) """ Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a mat file and a functional run """ modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5, threshold=1000), name='modelestimate', iterfield = ['design_file','in_file']) """ Use :class:`nipype.interfaces.fsl.ContrastMgr` to generate contrast estimates """ conestimate = pe.MapNode(interface=fsl.ContrastMgr(), name='conestimate', iterfield = ['tcon_file','param_estimates', 'sigmasquareds', 'corrections', 'dof_file']) modelfit.connect([ (modelspec,level1design,[('session_info','session_info')]), (level1design,modelgen,[('fsf_files', 'fsf_file'), ('ev_files', 'ev_files')]), (modelgen,modelestimate,[('design_file','design_file')]), (modelgen,conestimate,[('con_file','tcon_file')]), (modelestimate,conestimate,[('param_estimates','param_estimates'), ('sigmasquareds', 'sigmasquareds'), ('corrections','corrections'), ('dof_file','dof_file')]), ]) """ Set up fixed-effects workflow ----------------------------- """ fixed_fx = pe.Workflow(name='fixedfx') """ Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and varcopes for each condition """ copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'), iterfield=['in_files'], name="copemerge") varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'), iterfield=['in_files'], name="varcopemerge") """ Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition specific level 2 model design files """ level2model = pe.Node(interface=fsl.L2Model(), name='l2model') """ Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model """ flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo", iterfield=['cope_file','var_cope_file']) fixed_fx.connect([(copemerge,flameo,[('merged_file','cope_file')]), (varcopemerge,flameo,[('merged_file','var_cope_file')]), (level2model,flameo, [('design_mat','design_file'), ('design_con','t_con_file'), ('design_grp','cov_split_file')]), ]) """ Set up first-level workflow --------------------------- """ def sort_copes(files): numelements = len(files[0]) outfiles = [] for i in range(numelements): outfiles.insert(i,[]) for j, elements in enumerate(files): outfiles[i].append(elements[i]) return outfiles def num_copes(files): return len(files) firstlevel = pe.Workflow(name='firstlevel') firstlevel.connect([(preproc, modelfit, [('highpass.out_file', 'modelspec.functional_runs'), ('art.outlier_files', 'modelspec.outlier_files'), ('highpass.out_file','modelestimate.in_file')]), (preproc, fixed_fx, [('coregister.out_file', 'flameo.mask_file')]), (modelfit, fixed_fx,[(('conestimate.copes', sort_copes),'copemerge.in_files'), (('conestimate.varcopes', sort_copes),'varcopemerge.in_files'), (('conestimate.copes', num_copes),'l2model.num_copes'), ]) ]) """ Experiment specific components ------------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1'] #, 's3'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Use the get_node function to retrieve an internal node by name. Then set the iterables on this node to perform two different extents of smoothing. """ smoothnode = firstlevel.get_node('preproc.smooth') assert(str(smoothnode)=='preproc.smooth') smoothnode.iterables = ('fwhm', [5.,10.]) hpcutoff = 120 TR = 3. firstlevel.inputs.preproc.highpass.suffix = '_hpf' firstlevel.inputs.preproc.highpass.op_string = '-bptf %d -1'%(hpcutoff/TR) """ Setup a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. Other examples of this function are available in the `doc/examples` folder. Note: Python knowledge required here. """ def subjectinfo(subject_id): from nipype.interfaces.base import Bunch from copy import deepcopy print "Subject ID: %s\n"%str(subject_id) output = [] names = ['Task-Odd','Task-Even'] for r in range(4): onsets = [range(15,240,60),range(45,240,60)] output.insert(r, Bunch(conditions=names, onsets=deepcopy(onsets), durations=[[15] for s in names], amplitudes=None, tmod=None, pmod=None, regressor_names=None, regressors=None)) return output """ Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ['Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]] cont2 = ['Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]] cont3 = ['Task','F', [cont1, cont2]] contrasts = [cont1,cont2] firstlevel.inputs.modelfit.modelspec.input_units = 'secs' firstlevel.inputs.modelfit.modelspec.time_repetition = TR firstlevel.inputs.modelfit.modelspec.high_pass_filter_cutoff = hpcutoff firstlevel.inputs.modelfit.level1design.interscan_interval = TR firstlevel.inputs.modelfit.level1design.bases = {'dgamma':{'derivs': False}} firstlevel.inputs.modelfit.level1design.contrasts = contrasts firstlevel.inputs.modelfit.level1design.model_serial_correlations = True """ Set up complete workflow ======================== """ l1pipeline = pe.Workflow(name= "level1") l1pipeline.base_dir = os.path.abspath('./fsl/workingdir') l1pipeline.config = {"execution": {"crashdump_dir":os.path.abspath('./fsl/crashdumps')}} l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (infosource, firstlevel, [(('subject_id', subjectinfo), 'modelfit.modelspec.subject_info')]), (datasource, firstlevel, [('struct','preproc.inputspec.struct'), ('func', 'preproc.inputspec.func'), ]), ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': l1pipeline.write_graph() outgraph = l1pipeline.run() #l1pipeline.run(plugin='MultiProc', plugin_args={'n_procs':2}) nipype-0.9.2/examples/fmri_fsl_feeds.py000077500000000000000000000455561227300005300201770ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ================= fMRI: FEEDS - FSL ================= A pipeline example that data from the FSL FEEDS set. Single subject, two stimuli. You can find it at http://www.fmrib.ox.ac.uk/fsl/feeds/doc/index.html """ import os # system functions import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model generation """ Preliminaries ------------- Setup any package specific configuration. The output file format for FSL routines is being set to compressed NIFTI. """ fsl.FSLCommand.set_default_output_type('NIFTI_GZ') """ Setting up workflows -------------------- In this tutorial we will be setting up a hierarchical workflow for fsl analysis. This will demonstrate how pre-defined workflows can be setup and shared across users, projects and labs. Setup preprocessing workflow ---------------------------- This is a generic fsl feat preprocessing workflow encompassing skull stripping, motion correction and smoothing operations. """ preproc = pe.Workflow(name='preproc') """ Set up a node to define all inputs required for the preprocessing workflow """ inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'struct',]), name='inputspec') """ Convert functional images to float representation. Since there can be more than one functional run we use a MapNode to convert each run. """ img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float', op_string = '', suffix='_dtype'), iterfield=['in_file'], name='img2float') preproc.connect(inputnode, 'func', img2float, 'in_file') """ Extract the middle volume of the first run as the reference """ extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), name = 'extractref') """ Define a function to pick the first file from a list of files """ def pickfirst(files): if isinstance(files, list): return files[0] else: return files preproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file') """ Define a function to return the 1 based index of the middle volume """ def getmiddlevolume(func): from nibabel import load funcfile = func if isinstance(func, list): funcfile = func[0] _,_,_,timepoints = load(funcfile).get_shape() return (timepoints/2)-1 preproc.connect(inputnode, ('func', getmiddlevolume), extract_ref, 't_min') """ Realign the functional runs to the middle volume of the first run """ motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True, save_plots = True), name='realign', iterfield = ['in_file']) preproc.connect(img2float, 'out_file', motion_correct, 'in_file') preproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') """ Extract the mean volume of the first functional run """ meanfunc = pe.Node(interface=fsl.ImageMaths(op_string = '-Tmean', suffix='_mean'), name='meanfunc') preproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file') """ Strip the skull from the mean functional to generate a mask """ meanfuncmask = pe.Node(interface=fsl.BET(mask = True, no_output=True, frac = 0.3), name = 'meanfuncmask') preproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') """ Mask the functional runs with the extracted mask """ maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), iterfield=['in_file'], name = 'maskfunc') preproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') preproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') """ Determine the 2nd and 98th percentile intensities of each functional run """ getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'), iterfield = ['in_file'], name='getthreshold') preproc.connect(maskfunc, 'out_file', getthresh, 'in_file') """ Threshold the first run of the functional data at 10% of the 98th percentile """ threshold = pe.Node(interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), name='threshold') preproc.connect(maskfunc, ('out_file', pickfirst), threshold, 'in_file') """ Define a function to get 10% of the intensity """ def getthreshop(thresh): return '-thr %.10f -Tmin -bin'%(0.1*thresh[0][1]) preproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string') """ Determine the median value of the functional runs using the mask """ medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'), iterfield = ['in_file'], name='medianval') preproc.connect(motion_correct, 'out_file', medianval, 'in_file') preproc.connect(threshold, 'out_file', medianval, 'mask_file') """ Dilate the mask """ dilatemask = pe.Node(interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), name='dilatemask') preproc.connect(threshold, 'out_file', dilatemask, 'in_file') """ Mask the motion corrected functional runs with the dilated mask """ maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file'], name='maskfunc2') preproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') preproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') """ Determine the mean image from each functional run """ meanfunc2 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc2') preproc.connect(maskfunc2, 'out_file', meanfunc2, 'in_file') """ Merge the median values with the mean functional images into a coupled list """ mergenode = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') preproc.connect(meanfunc2,'out_file', mergenode, 'in1') preproc.connect(medianval,'out_stat', mergenode, 'in2') """ Smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask consituting the mean functional """ smooth = pe.MapNode(interface=fsl.SUSAN(), iterfield=['in_file', 'brightness_threshold','usans'], name='smooth') """ Define a function to get the brightness threshold for SUSAN """ def getbtthresh(medianvals): return [0.75*val for val in medianvals] def convert_th(x): return [[tuple([val[0],0.75*val[1]])] for val in x] preproc.connect(maskfunc2, 'out_file', smooth, 'in_file') preproc.connect(medianval, ('out_stat', getbtthresh), smooth, 'brightness_threshold') preproc.connect(mergenode, ('out', convert_th), smooth, 'usans') """ Mask the smoothed data with the dilated mask """ maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file'], name='maskfunc3') preproc.connect(smooth, 'smoothed_file', maskfunc3, 'in_file') preproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') """ Scale each volume of the run so that the median value of the run is set to 10000 """ intnorm = pe.MapNode(interface=fsl.ImageMaths(suffix='_intnorm'), iterfield=['in_file','op_string'], name='intnorm') preproc.connect(maskfunc3, 'out_file', intnorm, 'in_file') """ Define a function to get the scaling factor for intensity normalization """ def getinormscale(medianvals): return ['-mul %.10f'%(10000./val) for val in medianvals] preproc.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string') """ Perform temporal highpass filtering on the data """ highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'), iterfield=['in_file'], name='highpass') preproc.connect(intnorm, 'out_file', highpass, 'in_file') """ Generate a mean functional image from the first run """ meanfunc3 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc3') preproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file') """ Strip the structural image a coregister the mean functional image to the structural image """ nosestrip = pe.Node(interface=fsl.BET(frac=0.3), name = 'nosestrip') skullstrip = pe.Node(interface=fsl.BET(mask = True), name = 'stripstruct') coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister') preproc.connect([(inputnode, nosestrip,[('struct','in_file')]), (nosestrip, skullstrip, [('out_file','in_file')]), (skullstrip, coregister,[('out_file','in_file')]), (meanfunc2, coregister,[(('out_file',pickfirst),'reference')]), ]) """ Set up model fitting workflow ----------------------------- """ modelfit = pe.Workflow(name='modelfit') """ Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information. """ modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") """ Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf file for analysis """ level1design = pe.Node(interface=fsl.Level1Design(), name="level1design") """ Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat file for use by FILMGLS """ modelgen = pe.MapNode(interface=fsl.FEATModel(), name='modelgen', iterfield = ['fsf_file', 'ev_files']) """ Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a mat file and a functional run """ modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5, threshold=1000), name='modelestimate', iterfield = ['design_file','in_file']) """ Use :class:`nipype.interfaces.fsl.ContrastMgr` to generate contrast estimates """ conestimate = pe.MapNode(interface=fsl.ContrastMgr(), name='conestimate', iterfield = ['fcon_file', 'tcon_file','param_estimates', 'sigmasquareds', 'corrections', 'dof_file']) modelfit.connect([ (modelspec,level1design,[('session_info','session_info')]), (level1design,modelgen,[('fsf_files','fsf_file'), ('ev_files', 'ev_files')]), (modelgen,modelestimate,[('design_file','design_file')]), (modelgen,conestimate,[('con_file','tcon_file')]), (modelgen,conestimate,[('fcon_file','fcon_file')]), (modelestimate,conestimate,[('param_estimates','param_estimates'), ('sigmasquareds', 'sigmasquareds'), ('corrections','corrections'), ('dof_file','dof_file')]), ]) """ Set up fixed-effects workflow ----------------------------- """ fixed_fx = pe.Workflow(name='fixedfx') """ Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and varcopes for each condition """ copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'), iterfield=['in_files'], name="copemerge") varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'), iterfield=['in_files'], name="varcopemerge") """ Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition specific level 2 model design files """ level2model = pe.Node(interface=fsl.L2Model(), name='l2model') """ Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model """ flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo", iterfield=['cope_file','var_cope_file']) fixed_fx.connect([(copemerge,flameo,[('merged_file','cope_file')]), (varcopemerge,flameo,[('merged_file','var_cope_file')]), (level2model,flameo, [('design_mat','design_file'), ('design_con','t_con_file'), ('design_grp','cov_split_file')]), ]) """ Set up first-level workflow --------------------------- """ def sort_copes(files): numelements = len(files[0]) outfiles = [] for i in range(numelements): outfiles.insert(i,[]) for j, elements in enumerate(files): outfiles[i].append(elements[i]) return outfiles def num_copes(files): return len(files) firstlevel = pe.Workflow(name='firstlevel') firstlevel.connect([(preproc, modelfit, [('highpass.out_file', 'modelspec.functional_runs'), ('highpass.out_file','modelestimate.in_file')]), (preproc, fixed_fx, [('coregister.out_file', 'flameo.mask_file')]), (modelfit, fixed_fx,[(('conestimate.copes', sort_copes),'copemerge.in_files'), (('conestimate.varcopes', sort_copes),'varcopemerge.in_files'), (('conestimate.copes', num_copes),'l2model.num_copes'), ]) ]) """ Experiment specific components ------------------------------ This tutorial does a single subject analysis so we are not using infosource and iterables """ # Specify the location of the FEEDS data. You can find it at http://www.fmrib.ox.ac.uk/fsl/feeds/doc/index.html feeds_data_dir = os.path.abspath('feeds_data') # Specify the subject directories # Map field names to individual subject runs. info = dict(func=[['fmri']], struct=[['structural']]) """ Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.Node` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = feeds_data_dir datasource.inputs.template = '%s.nii.gz' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True firstlevel.inputs.preproc.smooth.fwhm = 5 hpcutoff = 100 TR = 3. firstlevel.inputs.preproc.highpass.suffix = '_hpf' firstlevel.inputs.preproc.highpass.op_string = '-bptf %d -1'%(hpcutoff/TR) """ Setup a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. Other examples of this function are available in the `doc/examples` folder. Note: Python knowledge required here. """ from nipype.interfaces.base import Bunch firstlevel.inputs.modelfit.modelspec.subject_info = [Bunch(conditions=['Visual','Auditory'], onsets=[range(0,int(180*TR),60),range(0,int(180*TR),90)], durations=[[30], [45]], amplitudes=None, tmod=None, pmod=None, regressor_names=None, regressors=None)] """ Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ['Visual>Baseline','T', ['Visual','Auditory'],[1,0]] cont2 = ['Auditory>Baseline','T', ['Visual','Auditory'],[0,1]] cont3 = ['Task','F', [cont1, cont2]] contrasts = [cont1,cont2,cont3] model_serial_correlations = True firstlevel.inputs.modelfit.modelspec.input_units = 'secs' firstlevel.inputs.modelfit.modelspec.time_repetition = TR firstlevel.inputs.modelfit.modelspec.high_pass_filter_cutoff = hpcutoff firstlevel.inputs.modelfit.level1design.interscan_interval = TR firstlevel.inputs.modelfit.level1design.bases = {'dgamma':{'derivs': True}} firstlevel.inputs.modelfit.level1design.contrasts = contrasts firstlevel.inputs.modelfit.level1design.model_serial_correlations = model_serial_correlations """ Set up complete workflow ======================== """ l1pipeline = pe.Workflow(name= "level1") l1pipeline.base_dir = os.path.abspath('./fsl_feeds/workingdir') l1pipeline.config = dict(crashdump_dir=os.path.abspath('./fsl_feeds/crashdumps')) l1pipeline.connect([(datasource, firstlevel, [('struct','preproc.inputspec.struct'), ('func', 'preproc.inputspec.func'), ]), ]) """ Setup the datasink """ datasink = pe.Node(interface=nio.DataSink(parameterization=False), name="datasink") datasink.inputs.base_directory = os.path.abspath('./fsl_feeds/l1out') datasink.inputs.substitutions = [('dtype_mcf_mask_mean', 'meanfunc'), ('brain_brain_flirt','coregistered')] # store relevant outputs from various stages of the 1st level analysis l1pipeline.connect([(firstlevel, datasink,[('fixedfx.flameo.stats_dir',"fixedfx.@con"), ('preproc.coregister.out_file','coregstruct'), ('preproc.meanfunc2.out_file','meanfunc'), ('modelfit.conestimate.zstats', 'level1.@Z'), ]) ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': l1pipeline.run() # l2pipeline.run() nipype-0.9.2/examples/fmri_fsl_reuse.py000077500000000000000000000231511227300005300202170ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ========================= fMRI: FSL reuse workflows ========================= A workflow that uses fsl to perform a first level analysis on the nipype tutorial data set:: python fmri_fsl_reuse.py First tell python where to find the appropriate functions. """ import os # system functions import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model generation import nipype.algorithms.rapidart as ra # artifact detection from nipype.workflows.fmri.fsl import (create_featreg_preproc, create_modelfit_workflow, create_fixed_effects_flow) """ Preliminaries ------------- Setup any package specific configuration. The output file format for FSL routines is being set to compressed NIFTI. """ fsl.FSLCommand.set_default_output_type('NIFTI_GZ') level1_workflow = pe.Workflow(name='level1flow') preproc = create_featreg_preproc(whichvol='first') modelfit = create_modelfit_workflow() fixed_fx = create_fixed_effects_flow() """ Add artifact detection and model specification nodes between the preprocessing and modelfitting workflows. """ art = pe.MapNode(interface=ra.ArtifactDetect(use_differences = [True, False], use_norm = True, norm_threshold = 1, zintensity_threshold = 3, parameter_source = 'FSL', mask_type = 'file'), iterfield=['realigned_files', 'realignment_parameters', 'mask_file'], name="art") modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") level1_workflow.connect([(preproc, art, [('outputspec.motion_parameters', 'realignment_parameters'), ('outputspec.realigned_files', 'realigned_files'), ('outputspec.mask', 'mask_file')]), (preproc, modelspec, [('outputspec.highpassed_files', 'functional_runs'), ('outputspec.motion_parameters', 'realignment_parameters')]), (art, modelspec, [('outlier_files', 'outlier_files')]), (modelspec, modelfit, [('session_info', 'inputspec.session_info')]), (preproc, modelfit, [('outputspec.highpassed_files', 'inputspec.functional_data')]) ]) """ Set up first-level workflow --------------------------- """ def sort_copes(files): numelements = len(files[0]) outfiles = [] for i in range(numelements): outfiles.insert(i,[]) for j, elements in enumerate(files): outfiles[i].append(elements[i]) return outfiles def num_copes(files): return len(files) pickfirst = lambda x : x[0] level1_workflow.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst), 'flameo.mask_file')]), (modelfit, fixed_fx, [(('outputspec.copes', sort_copes), 'inputspec.copes'), ('outputspec.dof_file', 'inputspec.dof_files'), (('outputspec.varcopes', sort_copes), 'inputspec.varcopes'), (('outputspec.copes', num_copes), 'l2model.num_copes'), ]) ]) """ Experiment specific components ------------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1'] #, 's3'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Use the get_node function to retrieve an internal node by name. Then set the iterables on this node to perform two different extents of smoothing. """ inputnode = level1_workflow.get_node('featpreproc.inputspec') inputnode.iterables = ('fwhm', [5.,10.]) hpcutoff = 120. TR = 3. inputnode.inputs.highpass = hpcutoff/(2*TR) """ Setup a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.modelgen.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. Other examples of this function are available in the `doc/examples` folder. Note: Python knowledge required here. """ def subjectinfo(subject_id): from nipype.interfaces.base import Bunch from copy import deepcopy print "Subject ID: %s\n"%str(subject_id) output = [] names = ['Task-Odd','Task-Even'] for r in range(4): onsets = [range(15,240,60),range(45,240,60)] output.insert(r, Bunch(conditions=names, onsets=deepcopy(onsets), durations=[[15] for s in names])) return output """ Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ['Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]] cont2 = ['Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]] cont3 = ['Task','F', [cont1, cont2]] contrasts = [cont1,cont2] modelspec.inputs.input_units = 'secs' modelspec.inputs.time_repetition = TR modelspec.inputs.high_pass_filter_cutoff = hpcutoff modelfit.inputs.inputspec.interscan_interval = TR modelfit.inputs.inputspec.bases = {'dgamma':{'derivs': False}} modelfit.inputs.inputspec.contrasts = contrasts modelfit.inputs.inputspec.model_serial_correlations = True modelfit.inputs.inputspec.film_threshold = 1000 level1_workflow.base_dir = os.path.abspath('./fsl/workingdir') level1_workflow.config['execution'] = dict(crashdump_dir=os.path.abspath('./fsl/crashdumps')) level1_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (infosource, modelspec, [(('subject_id', subjectinfo), 'subject_info')]), (datasource, preproc, [('func', 'inputspec.func')]), ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': #level1_workflow.write_graph() level1_workflow.run() #level1_workflow.run(plugin='MultiProc', plugin_args={'n_procs':2}) nipype-0.9.2/examples/fmri_nipy_glm.py000077500000000000000000000246741227300005300200610ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ =================== fMRI: NiPy GLM, SPM =================== The fmri_nipy_glm.py integrates several interfaces to perform a first level analysis on a two-subject data set. It is very similar to the spm_tutorial with the difference of using nipy for fitting GLM model and estimating contrasts. The tutorial can be found in the examples folder. Run the tutorial from inside the nipype tutorial directory: python fmri_nipy_glm.py """ from nipype.interfaces.nipy.model import FitGLM, EstimateContrast from nipype.interfaces.nipy.preprocess import ComputeMask """Import necessary modules from nipype.""" import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.interfaces.matlab as mlab # how to run matlab import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.rapidart as ra # artifact detection import nipype.algorithms.modelgen as model # model specification import os # system functions """ Preliminaries ------------- Set any package specific configuration. The output file format for FSL routines is being set to uncompressed NIFTI and a specific version of matlab is being used. The uncompressed format is required because SPM does not handle compressed NIFTI. """ # Set the way matlab should be called mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") """The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Preprocessing pipeline nodes ---------------------------- Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """Use :class:`nipype.interfaces.spm.Realign` for motion correction and register all images to the mean image. """ realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True compute_mask = pe.Node(interface=ComputeMask(), name="compute_mask") """Use :class:`nipype.algorithms.rapidart` to determine which of the images in the functional series are outliers based on deviations in intensity or movement. """ art = pe.Node(interface=ra.ArtifactDetect(), name="art") art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'SPM' """Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid body registration of the functional data to the structural data. """ coregister = pe.Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estimate' """Smooth the functional data using :class:`nipype.interfaces.spm.Smooth`. """ smooth = pe.Node(interface=spm.Smooth(), name = "smooth") smooth.inputs.fwhm = 4 """ Set up analysis components -------------------------- Here we create a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. """ def subjectinfo(subject_id): from nipype.interfaces.base import Bunch from copy import deepcopy print "Subject ID: %s\n"%str(subject_id) output = [] names = ['Task-Odd','Task-Even'] for r in range(4): onsets = [range(15,240,60),range(45,240,60)] output.insert(r, Bunch(conditions=names, onsets=deepcopy(onsets), durations=[[15] for s in names], amplitudes=None, tmod=None, pmod=None, regressor_names=None, regressors=None)) return output """Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrasts = [cont1,cont2] """Generate design information using :class:`nipype.interfaces.spm.SpecifyModel`. nipy accepts only design specified in seconds so "output_units" has always have to be set to "secs". """ modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec") modelspec.inputs.concatenate_runs = True modelspec.inputs.input_units = 'secs' modelspec.inputs.output_units = 'secs' modelspec.inputs.time_repetition = 3. modelspec.inputs.high_pass_filter_cutoff = 120 """Fit the GLM model using nipy and ordinary least square method """ model_estimate = pe.Node(interface=FitGLM(), name="model_estimate") model_estimate.inputs.TR = 3. model_estimate.inputs.model = "spherical" model_estimate.inputs.method = "ols" """Estimate the contrasts. The format of the contrasts definition is the same as for FSL and SPM """ contrast_estimate = pe.Node(interface=EstimateContrast(), name="contrast_estimate") cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrast_estimate.inputs.contrasts = [cont1,cont2] """ Setup the pipeline ------------------ The nodes created above do not describe the flow of data. They merely describe the parameters used for each function. In this section we setup the connections between the nodes such that appropriate outputs from nodes are piped into appropriate inputs of other nodes. Use the :class:`nipype.pipeline.engine.Pipeline` to create a graph-based execution pipeline for first level analysis. The config options tells the pipeline engine to use `workdir` as the disk location to use when running the processes and keeping their outputs. The `use_parameterized_dirs` tells the engine to create sub-directories under `workdir` corresponding to the iterables in the pipeline. Thus for this pipeline there will be subject specific sub-directories. The ``nipype.pipeline.engine.Pipeline.connect`` function creates the links between the processes, i.e., how data should flow in and out of the processing nodes. """ l1pipeline = pe.Workflow(name="level1") l1pipeline.base_dir = os.path.abspath('nipy_tutorial/workingdir') l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource,realign,[('func','in_files')]), (realign, compute_mask, [('mean_image','mean_volume')]), (realign, coregister,[('mean_image', 'source'), ('realigned_files','apply_to_files')]), (datasource, coregister,[('struct', 'target')]), (coregister, smooth, [('coregistered_files', 'in_files')]), (realign, modelspec,[('realignment_parameters','realignment_parameters')]), (smooth, modelspec,[('smoothed_files','functional_runs')]), (realign, art,[('realignment_parameters','realignment_parameters')]), (coregister, art,[('coregistered_files','realigned_files')]), (compute_mask,art,[('brain_mask','mask_file')]), (art, modelspec,[('outlier_files','outlier_files')]), (infosource, modelspec, [(("subject_id", subjectinfo), "subject_info")]), (modelspec, model_estimate,[('session_info','session_info')]), (compute_mask, model_estimate, [('brain_mask','mask')]), (model_estimate, contrast_estimate, [("beta","beta"), ("nvbeta","nvbeta"), ("s2","s2"), ("dof", "dof"), ("axis", "axis"), ("constants", "constants"), ("reg_names", "reg_names")]) ]) if __name__ == '__main__': l1pipeline.run() nipype-0.9.2/examples/fmri_openfmri.py000077500000000000000000000427251227300005300200570ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ============================ fMRI: OpenfMRI.org data, FSL ============================ A growing number of datasets are available on `OpenfMRI `_. This script demonstrates how to use nipype to analyze a data set. python fmri_openfmri.py --datasetdir ds107 """ from nipype import config config.enable_provenance() from glob import glob import os import nipype.pipeline.engine as pe import nipype.algorithms.modelgen as model import nipype.algorithms.rapidart as ra import nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio import nipype.interfaces.utility as niu from nipype.workflows.fmri.fsl import (create_featreg_preproc, create_modelfit_workflow, create_fixed_effects_flow, create_reg_workflow) fsl.FSLCommand.set_default_output_type('NIFTI_GZ') def get_subjectinfo(subject_id, base_dir, task_id, model_id): """Get info for a given subject Parameters ---------- subject_id : string Subject identifier (e.g., sub001) base_dir : string Path to base directory of the dataset task_id : int Which task to process model_id : int Which model to process Returns ------- run_ids : list of ints Run numbers conds : list of str Condition names TR : float Repetition time """ from glob import glob import os import numpy as np condition_info = [] cond_file = os.path.join(base_dir, 'models', 'model%03d' % model_id, 'condition_key.txt') with open(cond_file, 'rt') as fp: for line in fp: info = line.strip().split() condition_info.append([info[0], info[1], ' '.join(info[2:])]) if len(condition_info) == 0: raise ValueError('No condition info found in %s' % cond_file) taskinfo = np.array(condition_info) n_tasks = len(np.unique(taskinfo[:, 0])) conds = [] run_ids = [] if task_id > n_tasks: raise ValueError('Task id %d does not exist' % task_id) for idx in range(n_tasks): taskidx = np.where(taskinfo[:, 0] == 'task%03d' % (idx + 1)) conds.append([condition.replace(' ', '_') for condition in taskinfo[taskidx[0], 2]]) files = glob(os.path.join(base_dir, subject_id, 'BOLD', 'task%03d_run*' % (idx + 1))) run_ids.insert(idx, range(1, len(files) + 1)) TR = np.genfromtxt(os.path.join(base_dir, 'scan_key.txt'))[1] return run_ids[task_id - 1], conds[task_id - 1], TR def analyze_openfmri_dataset(data_dir, subject=None, model_id=None, task_id=None, output_dir=None): """Analyzes an open fmri dataset Parameters ---------- data_dir : str Path to the base data directory work_dir : str Nipype working directory (defaults to cwd) """ """ Load nipype workflows """ preproc = create_featreg_preproc(whichvol='first') modelfit = create_modelfit_workflow() fixed_fx = create_fixed_effects_flow() registration = create_reg_workflow() """ Remove the plotting connection so that plot iterables don't propagate to the model stage """ preproc.disconnect(preproc.get_node('plot_motion'), 'out_file', preproc.get_node('outputspec'), 'motion_plots') """ Set up openfmri data specific components """ subjects = [path.split(os.path.sep)[-1] for path in glob(os.path.join(data_dir, 'sub*'))] infosource = pe.Node(niu.IdentityInterface(fields=['subject_id', 'model_id', 'task_id']), name='infosource') if subject is None: infosource.iterables = [('subject_id', subjects[:2]), ('model_id', [model_id]), ('task_id', [task_id])] else: infosource.iterables = [('subject_id', [subjects[subjects.index(subject)]]), ('model_id', [model_id]), ('task_id', [task_id])] subjinfo = pe.Node(niu.Function(input_names=['subject_id', 'base_dir', 'task_id', 'model_id'], output_names=['run_id', 'conds', 'TR'], function=get_subjectinfo), name='subjectinfo') subjinfo.inputs.base_dir = data_dir """ Return data components as anat, bold and behav """ datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run_id', 'task_id', 'model_id'], outfields=['anat', 'bold', 'behav', 'contrasts']), name='datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '*' datasource.inputs.field_template = {'anat': '%s/anatomy/highres001.nii.gz', 'bold': '%s/BOLD/task%03d_r*/bold.nii.gz', 'behav': ('%s/model/model%03d/onsets/task%03d_' 'run%03d/cond*.txt'), 'contrasts': ('models/model%03d/' 'task_contrasts.txt')} datasource.inputs.template_args = {'anat': [['subject_id']], 'bold': [['subject_id', 'task_id']], 'behav': [['subject_id', 'model_id', 'task_id', 'run_id']], 'contrasts': [['model_id']]} datasource.inputs.sort_filelist = True """ Create meta workflow """ wf = pe.Workflow(name='openfmri') wf.connect(infosource, 'subject_id', subjinfo, 'subject_id') wf.connect(infosource, 'model_id', subjinfo, 'model_id') wf.connect(infosource, 'task_id', subjinfo, 'task_id') wf.connect(infosource, 'subject_id', datasource, 'subject_id') wf.connect(infosource, 'model_id', datasource, 'model_id') wf.connect(infosource, 'task_id', datasource, 'task_id') wf.connect(subjinfo, 'run_id', datasource, 'run_id') wf.connect([(datasource, preproc, [('bold', 'inputspec.func')]), ]) def get_highpass(TR, hpcutoff): return hpcutoff / (2 * TR) gethighpass = pe.Node(niu.Function(input_names=['TR', 'hpcutoff'], output_names=['highpass'], function=get_highpass), name='gethighpass') wf.connect(subjinfo, 'TR', gethighpass, 'TR') wf.connect(gethighpass, 'highpass', preproc, 'inputspec.highpass') """ Setup a basic set of contrasts, a t-test per condition """ def get_contrasts(contrast_file, task_id, conds): import numpy as np contrast_def = np.genfromtxt(contrast_file, dtype=object) contrasts = [] for row in contrast_def: if row[0] != 'task%03d' % task_id: continue con = [row[1], 'T', ['cond%03d' % i for i in range(len(conds))], row[2:].astype(float).tolist()] contrasts.append(con) return contrasts contrastgen = pe.Node(niu.Function(input_names=['contrast_file', 'task_id', 'conds'], output_names=['contrasts'], function=get_contrasts), name='contrastgen') art = pe.MapNode(interface=ra.ArtifactDetect(use_differences=[True, False], use_norm=True, norm_threshold=1, zintensity_threshold=3, parameter_source='FSL', mask_type='file'), iterfield=['realigned_files', 'realignment_parameters', 'mask_file'], name="art") modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") modelspec.inputs.input_units = 'secs' wf.connect(subjinfo, 'TR', modelspec, 'time_repetition') wf.connect(datasource, 'behav', modelspec, 'event_files') wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval') wf.connect(subjinfo, 'conds', contrastgen, 'conds') wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file') wf.connect(infosource, 'task_id', contrastgen, 'task_id') wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts') wf.connect([(preproc, art, [('outputspec.motion_parameters', 'realignment_parameters'), ('outputspec.realigned_files', 'realigned_files'), ('outputspec.mask', 'mask_file')]), (preproc, modelspec, [('outputspec.highpassed_files', 'functional_runs'), ('outputspec.motion_parameters', 'realignment_parameters')]), (art, modelspec, [('outlier_files', 'outlier_files')]), (modelspec, modelfit, [('session_info', 'inputspec.session_info')]), (preproc, modelfit, [('outputspec.highpassed_files', 'inputspec.functional_data')]) ]) """ Reorder the copes so that now it combines across runs """ def sort_copes(files): numelements = len(files[0]) outfiles = [] for i in range(numelements): outfiles.insert(i, []) for j, elements in enumerate(files): outfiles[i].append(elements[i]) return outfiles def num_copes(files): return len(files) pickfirst = lambda x: x[0] wf.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst), 'flameo.mask_file')]), (modelfit, fixed_fx, [(('outputspec.copes', sort_copes), 'inputspec.copes'), ('outputspec.dof_file', 'inputspec.dof_files'), (('outputspec.varcopes', sort_copes), 'inputspec.varcopes'), (('outputspec.copes', num_copes), 'l2model.num_copes'), ]) ]) wf.connect(preproc, 'outputspec.mean', registration, 'inputspec.mean_image') wf.connect(datasource, 'anat', registration, 'inputspec.anatomical_image') registration.inputs.inputspec.target_image = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz') def merge_files(copes, varcopes): out_files = [] splits = [] out_files.extend(copes) splits.append(len(copes)) out_files.extend(varcopes) splits.append(len(varcopes)) return out_files, splits mergefunc = pe.Node(niu.Function(input_names=['copes', 'varcopes'], output_names=['out_files', 'splits'], function=merge_files), name='merge_files') wf.connect([(fixed_fx.get_node('outputspec'), mergefunc, [('copes', 'copes'), ('varcopes', 'varcopes'), ])]) wf.connect(mergefunc, 'out_files', registration, 'inputspec.source_files') def split_files(in_files, splits): copes = in_files[:splits[1]] varcopes = in_files[splits[1]:] return copes, varcopes splitfunc = pe.Node(niu.Function(input_names=['in_files', 'splits'], output_names=['copes', 'varcopes'], function=split_files), name='split_files') wf.connect(mergefunc, 'splits', splitfunc, 'splits') wf.connect(registration, 'outputspec.transformed_files', splitfunc, 'in_files') """ Connect to a datasink """ def get_subs(subject_id, conds, model_id, task_id): subs = [('_subject_id_%s_' % subject_id, '')] subs.append(('_model_id_%d' % model_id, 'model%03d' %model_id)) subs.append(('task_id_%d/' % task_id, '/task%03d_' % task_id)) subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_warp_warp', 'mean')) for i in range(len(conds)): subs.append(('_flameo%d/cope1.' % i, 'cope%02d.' % (i + 1))) subs.append(('_flameo%d/varcope1.' % i, 'varcope%02d.' % (i + 1))) subs.append(('_flameo%d/zstat1.' % i, 'zstat%02d.' % (i + 1))) subs.append(('_flameo%d/tstat1.' % i, 'tstat%02d.' % (i + 1))) subs.append(('_flameo%d/res4d.' % i, 'res4d%02d.' % (i + 1))) subs.append(('_warpall%d/cope1_warp_warp.' % i, 'cope%02d.' % (i + 1))) subs.append(('_warpall%d/varcope1_warp_warp.' % (len(conds) + i), 'varcope%02d.' % (i + 1))) return subs subsgen = pe.Node(niu.Function(input_names=['subject_id', 'conds', 'model_id', 'task_id'], output_names=['substitutions'], function=get_subs), name='subsgen') datasink = pe.Node(interface=nio.DataSink(), name="datasink") wf.connect(infosource, 'subject_id', datasink, 'container') wf.connect(infosource, 'subject_id', subsgen, 'subject_id') wf.connect(infosource, 'model_id', subsgen, 'model_id') wf.connect(infosource, 'task_id', subsgen, 'task_id') wf.connect(contrastgen, 'contrasts', subsgen, 'conds') wf.connect(subsgen, 'substitutions', datasink, 'substitutions') wf.connect([(fixed_fx.get_node('outputspec'), datasink, [('res4d', 'res4d'), ('copes', 'copes'), ('varcopes', 'varcopes'), ('zstats', 'zstats'), ('tstats', 'tstats')]) ]) wf.connect([(splitfunc, datasink, [('copes', 'copes.mni'), ('varcopes', 'varcopes.mni'), ])]) wf.connect(registration, 'outputspec.transformed_mean', datasink, 'mean.mni') """ Set processing parameters """ hpcutoff = 120. preproc.inputs.inputspec.fwhm = 6.0 gethighpass.inputs.hpcutoff = hpcutoff modelspec.inputs.high_pass_filter_cutoff = hpcutoff modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': True}} modelfit.inputs.inputspec.model_serial_correlations = True modelfit.inputs.inputspec.film_threshold = 1000 datasink.inputs.base_directory = output_dir return wf if __name__ == '__main__': import argparse defstr = ' (default %(default)s)' parser = argparse.ArgumentParser(prog='fmri_openfmri.py', description=__doc__) parser.add_argument('-d', '--datasetdir', required=True) parser.add_argument('-s', '--subject', default=None, help="Subject name (e.g. 'sub001')") parser.add_argument('-m', '--model', default=1, help="Model index" + defstr) parser.add_argument('-t', '--task', default=1, help="Task index" + defstr) parser.add_argument("-o", "--output_dir", dest="outdir", help="Output directory base") parser.add_argument("-w", "--work_dir", dest="work_dir", help="Output directory base") parser.add_argument("-p", "--plugin", dest="plugin", default='Linear', help="Plugin to use") parser.add_argument("--plugin_args", dest="plugin_args", help="Plugin arguments") args = parser.parse_args() outdir = args.outdir work_dir = os.getcwd() if args.work_dir: work_dir = os.path.abspath(args.work_dir) if outdir: outdir = os.path.abspath(outdir) else: outdir = os.path.join(work_dir, 'output') outdir = os.path.join(outdir, 'model%02d' % int(args.model), 'task%03d' % int(args.task)) wf = analyze_openfmri_dataset(data_dir=os.path.abspath(args.datasetdir), subject=args.subject, model_id=int(args.model), task_id=int(args.task), output_dir=outdir) wf.base_dir = work_dir if args.plugin_args: wf.run(args.plugin, plugin_args=eval(args.plugin_args)) else: wf.run(args.plugin) nipype-0.9.2/examples/fmri_slicer_coregistration.py000077500000000000000000000105131227300005300226230ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ===================================== fMRI: Coregistration - Slicer, BRAINS ===================================== This is currently not working and will raise an exception in release 0.3. It will be fixed in a later release. python fmri_slicer_coregistration.py """ #raise RuntimeWarning, 'Slicer not fully implmented' from nipype.interfaces.slicer import BRAINSFit, BRAINSResample """Import necessary modules from nipype.""" import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import os # system functions """ Preliminaries ------------- Confirm package dependencies are installed. (This is only for the tutorial, rarely would you put this in your own code.) """ from nipype.utils.misc import package_check package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') """The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1', 's3'] # Map field names to individual subject runs. info = dict(func=[['subject_id', 'f3']], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Preprocessing pipeline nodes ---------------------------- Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True coregister = pe.Node(interface=BRAINSFit(), name="coregister") coregister.inputs.outputTransform = True coregister.inputs.outputVolume = True coregister.inputs.transformType = ["Affine"] reslice = pe.Node(interface=BRAINSResample(), name="reslice") reslice.inputs.outputVolume = True pipeline = pe.Workflow(name="pipeline") pipeline.base_dir = os.path.abspath('slicer_tutorial/workingdir') pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource,coregister,[('func','movingVolume')]), (datasource,coregister,[('struct','fixedVolume')]), (coregister,reslice,[('outputTransform', 'warpTransform')]), (datasource,reslice,[('func','inputVolume')]), (datasource,reslice,[('struct','referenceVolume')]) ]) if __name__ == '__main__': pipeline.run() pipeline.write_graph() nipype-0.9.2/examples/fmri_spm.py000077500000000000000000000367631227300005300170440ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ============== fMRI: SPM, FSL ============== The fmri_spm.py integrates several interfaces to perform a first and second level analysis on a two-subject data set. The tutorial can be found in the examples folder. Run the tutorial from inside the nipype tutorial directory: python fmri_spm.py Import necessary modules from nipype.""" from nipype import config config.enable_provenance() from nipype import spm, fsl # In order to use this example with SPM's matlab common runtime # matlab_cmd = ('/Users/satra/Downloads/spm8/run_spm8.sh ' # '/Applications/MATLAB/MATLAB_Compiler_Runtime/v713/ script') # spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True) import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.rapidart as ra # artifact detection import nipype.algorithms.modelgen as model # model specification import os # system functions """ Preliminaries ------------- Set any package specific configuration. The output file format for FSL routines is being set to uncompressed NIFTI and a specific version of matlab is being used. The uncompressed format is required because SPM does not handle compressed NIFTI. """ # Tell fsl to generate all output in uncompressed nifti format fsl.FSLCommand.set_default_output_type('NIFTI') # Set the way matlab should be called # import nipype.interfaces.matlab as mlab # how to run matlab # mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") """The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1', 's3'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Preprocessing pipeline nodes ---------------------------- Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """Use :class:`nipype.interfaces.spm.Realign` for motion correction and register all images to the mean image. """ realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True """Use :class:`nipype.algorithms.rapidart` to determine which of the images in the functional series are outliers based on deviations in intensity or movement. """ art = pe.Node(interface=ra.ArtifactDetect(), name="art") art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'SPM' """Skull strip structural images using :class:`nipype.interfaces.fsl.BET`. """ skullstrip = pe.Node(interface=fsl.BET(), name="skullstrip") skullstrip.inputs.mask = True """Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid body registration of the functional data to the structural data. """ coregister = pe.Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estimate' """Warp functional and structural data to SPM's T1 template using :class:`nipype.interfaces.spm.Normalize`. The tutorial data set includes the template image, T1.nii. """ normalize = pe.Node(interface=spm.Normalize(), name = "normalize") normalize.inputs.template = os.path.abspath('data/T1.nii') """Smooth the functional data using :class:`nipype.interfaces.spm.Smooth`. """ smooth = pe.Node(interface=spm.Smooth(), name = "smooth") fwhmlist = [4] smooth.iterables = ('fwhm',fwhmlist) """ Set up analysis components -------------------------- Here we create a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. """ def subjectinfo(subject_id): from nipype.interfaces.base import Bunch from copy import deepcopy print "Subject ID: %s\n"%str(subject_id) output = [] names = ['Task-Odd','Task-Even'] for r in range(4): onsets = [range(15,240,60),range(45,240,60)] output.insert(r, Bunch(conditions=names, onsets=deepcopy(onsets), durations=[[15] for s in names])) return output """Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrasts = [cont1,cont2] """Generate SPM-specific design information using :class:`nipype.interfaces.spm.SpecifyModel`. """ modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec") modelspec.inputs.concatenate_runs = False modelspec.inputs.input_units = 'secs' modelspec.inputs.output_units = 'secs' modelspec.inputs.time_repetition = 3. modelspec.inputs.high_pass_filter_cutoff = 120 """Generate a first level SPM.mat file for analysis :class:`nipype.interfaces.spm.Level1Design`. """ level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.timing_units = modelspec.inputs.output_units level1design.inputs.interscan_interval = modelspec.inputs.time_repetition level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} """Use :class:`nipype.interfaces.spm.EstimateModel` to determine the parameters of the model. """ level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} """Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the first level contrasts specified in a few steps above. """ contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") contrastestimate.inputs.contrasts = contrasts contrastestimate.overwrite = True contrastestimate.config = {'execution': {'remove_unnecessary_outputs': False}} """ Setup the pipeline ------------------ The nodes created above do not describe the flow of data. They merely describe the parameters used for each function. In this section we setup the connections between the nodes such that appropriate outputs from nodes are piped into appropriate inputs of other nodes. Use the :class:`nipype.pipeline.engine.Pipeline` to create a graph-based execution pipeline for first level analysis. The config options tells the pipeline engine to use `workdir` as the disk location to use when running the processes and keeping their outputs. The `use_parameterized_dirs` tells the engine to create sub-directories under `workdir` corresponding to the iterables in the pipeline. Thus for this pipeline there will be subject specific sub-directories. The ``nipype.pipeline.engine.Pipeline.connect`` function creates the links between the processes, i.e., how data should flow in and out of the processing nodes. """ l1pipeline = pe.Workflow(name="level1") l1pipeline.base_dir = os.path.abspath('spm_tutorial/workingdir') l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource,realign,[('func','in_files')]), (realign,coregister,[('mean_image', 'source'), ('realigned_files','apply_to_files')]), (datasource,coregister,[('struct', 'target')]), (datasource,normalize,[('struct', 'source')]), (coregister, normalize, [('coregistered_files','apply_to_files')]), (normalize, smooth, [('normalized_files', 'in_files')]), (infosource,modelspec,[(('subject_id', subjectinfo), 'subject_info')]), (realign,modelspec,[('realignment_parameters','realignment_parameters')]), (smooth,modelspec,[('smoothed_files','functional_runs')]), (normalize,skullstrip,[('normalized_source','in_file')]), (realign,art,[('realignment_parameters','realignment_parameters')]), (normalize,art,[('normalized_files','realigned_files')]), (skullstrip,art,[('mask_file','mask_file')]), (art,modelspec,[('outlier_files','outlier_files')]), (modelspec,level1design,[('session_info','session_info')]), (skullstrip,level1design,[('mask_file','mask_image')]), (level1design,level1estimate,[('spm_mat_file','spm_mat_file')]), (level1estimate,contrastestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), ]) """ Setup storage results --------------------- Use :class:`nipype.interfaces.io.DataSink` to store selected outputs from the pipeline in a specific location. This allows the user to selectively choose important output bits from the analysis and keep them. The first step is to create a datasink node and then to connect outputs from the modules above to storage locations. These take the following form directory_name[.[@]subdir] where parts between [] are optional. For example 'realign.@mean' below creates a directory called realign in 'l1output/subject_id/' and stores the mean image output from the Realign process in the realign directory. If the @ is left out, then a sub-directory with the name 'mean' would be created and the mean image would be copied to that directory. """ datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('spm_tutorial/l1output') def getstripdir(subject_id): import os return os.path.join(os.path.abspath('spm_tutorial/workingdir'),'_subject_id_%s' % subject_id) # store relevant outputs from various stages of the 1st level analysis l1pipeline.connect([(infosource,datasink,[('subject_id','container'), (('subject_id', getstripdir),'strip_dir')]), (realign,datasink,[('mean_image','realign.@mean'), ('realignment_parameters','realign.@param')]), (art,datasink,[('outlier_files','art.@outliers'), ('statistic_files','art.@stats')]), (level1design,datasink,[('spm_mat_file','model.pre-estimate')]), (level1estimate,datasink,[('spm_mat_file','model.@spm'), ('beta_images','model.@beta'), ('mask_image','model.@mask'), ('residual_image','model.@res'), ('RPVimage','model.@rpv')]), (contrastestimate,datasink,[('con_images','contrasts.@con'), ('spmT_images','contrasts.@T')]), ]) """ Setup level 2 pipeline ---------------------- Use :class:`nipype.interfaces.io.DataGrabber` to extract the contrast images across a group of first level subjects. Unlike the previous pipeline that iterated over subjects, this pipeline will iterate over contrasts. """ # collect all the con images for each contrast. contrast_ids = range(1,len(contrasts)+1) l2source = pe.Node(nio.DataGrabber(infields=['fwhm', 'con']), name="l2source") l2source.inputs.template=os.path.abspath('spm_tutorial/l1output/*/con*/*/_fwhm_%d/con_%04d.img') # iterate over all contrast images l2source.iterables = [('fwhm',fwhmlist), ('con',contrast_ids)] l2source.inputs.sort_filelist = True """Use :class:`nipype.interfaces.spm.OneSampleTTestDesign` to perform a simple statistical analysis of the contrasts from the group of subjects (n=2 in this example). """ # setup a 1-sample t-test node onesamplettestdes = pe.Node(interface=spm.OneSampleTTestDesign(), name="onesampttestdes") l2estimate = pe.Node(interface=spm.EstimateModel(), name="level2estimate") l2estimate.inputs.estimation_method = {'Classical' : 1} l2conestimate = pe.Node(interface = spm.EstimateContrast(), name="level2conestimate") cont1 = ('Group','T', ['mean'],[1]) l2conestimate.inputs.contrasts = [cont1] l2conestimate.inputs.group_contrast = True """As before, we setup a pipeline to connect these two nodes (l2source -> onesamplettest). """ l2pipeline = pe.Workflow(name="level2") l2pipeline.base_dir = os.path.abspath('spm_tutorial/l2output') l2pipeline.connect([(l2source,onesamplettestdes,[('outfiles','in_files')]), (onesamplettestdes,l2estimate,[('spm_mat_file','spm_mat_file')]), (l2estimate,l2conestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': l1pipeline.run('MultiProc') l2pipeline.run('MultiProc') nipype-0.9.2/examples/fmri_spm_auditory.py000077500000000000000000000345141227300005300207540ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ========================== fMRI: SPM Auditory dataset ========================== Introduction ============ The fmri_spm_auditory.py recreates the classical workflow described in the SPM8 manual (http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf) using auditory dataset that can be downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/auditory/: python fmri_spm_auditory.py Import necessary modules from nipype.""" import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.matlab as mlab # how to run matlabimport nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model specification import os # system functions """ Preliminaries ------------- """ # Set the way matlab should be called mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") """ Setting up workflows -------------------- In this tutorial we will be setting up a hierarchical workflow for spm analysis. This will demonstrate how pre-defined workflows can be setup and shared across users, projects and labs. Setup preprocessing workflow ---------------------------- This is a generic preprocessing workflow that can be used by different analyses """ preproc = pe.Workflow(name='preproc') """We strongly encourage to use 4D files insteead of series of 3D for fMRI analyses for many reasons (cleanness and saving and filesystem inodes are among them). However, the the workflow presented in the SPM8 manual which this tutorial is based on uses 3D files. Therefore we leave converting to 4D as an option. We are using `merge_to_4d` variable, because switching between 3d and 4d requires some additional steps (explauned later on). Use :class:`nipype.interfaces.fsl.Merge` to merge a series of 3D files along the time dimension creating a 4d file. """ merge_to_4d = True if merge_to_4d: merge = pe.Node(interface=fsl.Merge(), name="merge") merge.inputs.dimension="t" """Use :class:`nipype.interfaces.spm.Realign` for motion correction and register all images to the mean image. """ realign = pe.Node(interface=spm.Realign(), name="realign") """Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid body registration of the functional data to the structural data. """ coregister = pe.Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estimate' segment = pe.Node(interface=spm.Segment(), name="segment") """Uncomment the following line for faster execution """ #segment.inputs.gaussians_per_class = [1, 1, 1, 4] """Warp functional and structural data to SPM's T1 template using :class:`nipype.interfaces.spm.Normalize`. The tutorial data set includes the template image, T1.nii. """ normalize_func = pe.Node(interface=spm.Normalize(), name = "normalize_func") normalize_func.inputs.jobtype = "write" normalize_struc = pe.Node(interface=spm.Normalize(), name = "normalize_struc") normalize_struc.inputs.jobtype = "write" """Smooth the functional data using :class:`nipype.interfaces.spm.Smooth`. """ smooth = pe.Node(interface=spm.Smooth(), name = "smooth") """`write_voxel_sizes` is the input of the normalize interface that is recommended to be set to the voxel sizes of the target volume. There is no need to set it manually since we van infer it from data using the following function: """ def get_vox_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() voxdims = hdr.get_zooms() return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal voxel sizes. """ if merge_to_4d: preproc.connect([(merge, realign,[('merged_file', 'in_files')])]) preproc.connect([(realign,coregister,[('mean_image', 'target')]), (coregister, segment,[('coregistered_source','data')]), (segment, normalize_func, [('transformation_mat','parameter_file')]), (segment, normalize_struc, [('transformation_mat','parameter_file'), ('modulated_input_image', 'apply_to_files'), (('modulated_input_image', get_vox_dims), 'write_voxel_sizes')]), (realign, normalize_func, [('realigned_files', 'apply_to_files'), (('realigned_files', get_vox_dims), 'write_voxel_sizes')]), (normalize_func, smooth, [('normalized_files', 'in_files')]), ]) """ Set up analysis workflow ------------------------ """ l1analysis = pe.Workflow(name='analysis') """Generate SPM-specific design information using :class:`nipype.interfaces.spm.SpecifyModel`. """ modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec") """Generate a first level SPM.mat file for analysis :class:`nipype.interfaces.spm.Level1Design`. """ level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} """Use :class:`nipype.interfaces.spm.EstimateModel` to determine the parameters of the model. """ level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} threshold = pe.Node(interface=spm.Threshold(), name="threshold") """Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the first level contrasts specified in a few steps above. """ contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") l1analysis.connect([(modelspec,level1design,[('session_info','session_info')]), (level1design,level1estimate,[('spm_mat_file','spm_mat_file')]), (level1estimate,contrastestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), (contrastestimate, threshold,[('spm_mat_file','spm_mat_file'), ('spmT_images', 'stat_image')]), ]) """ Preproc + Analysis pipeline --------------------------- """ l1pipeline = pe.Workflow(name='firstlevel') l1pipeline.connect([(preproc, l1analysis, [('realign.realignment_parameters', 'modelspec.realignment_parameters')])]) """Pluging in `functional_runs` is a bit more complicated, because model spec expects a list of `runs`. Every run can be a 4D file or a list of 3D files. Therefore for 3D analysis we need a list of lists and to make one we need a helper function. """ if merge_to_4d: l1pipeline.connect([(preproc, l1analysis, [('smooth.smoothed_files', 'modelspec.functional_runs')])]) else: def makelist(item): return [item] l1pipeline.connect([(preproc, l1analysis, [(('smooth.smoothed_files',makelist), 'modelspec.functional_runs')])]) """ Data specific components ------------------------ In this tutorial there is only one subject `M00223`. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. """ # Specify the location of the data downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/auditory/ data_dir = os.path.abspath('spm_auditory_data') # Specify the subject directories subject_list = ['M00223'] # Map field names to individual subject runs. info = dict(func=[['f', 'subject_id', 'f', 'subject_id', range(16,100)]], struct=[['s', 'subject_id', 's', 'subject_id', 2]]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s%s/%s%s_%03d.img' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Experimental paradigm specific components ----------------------------------------- Here we create a structure that provides information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. """ from nipype.interfaces.base import Bunch subjectinfo = [Bunch(conditions=['Task'], onsets=[range(6,84,12)], durations=[[6]])] """Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ('active > rest','T', ['Task'],[1]) contrasts = [cont1] # set up node specific inputs modelspecref = l1pipeline.inputs.analysis.modelspec modelspecref.input_units = 'scans' modelspecref.output_units = 'scans' modelspecref.time_repetition = 7 modelspecref.high_pass_filter_cutoff = 120 l1designref = l1pipeline.inputs.analysis.level1design l1designref.timing_units = modelspecref.output_units l1designref.interscan_interval = modelspecref.time_repetition l1pipeline.inputs.preproc.smooth.fwhm = [6, 6, 6] l1pipeline.inputs.analysis.modelspec.subject_info = subjectinfo l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts l1pipeline.inputs.analysis.threshold.contrast_index = 1 """ Setup the pipeline ------------------ The nodes created above do not describe the flow of data. They merely describe the parameters used for each function. In this section we setup the connections between the nodes such that appropriate outputs from nodes are piped into appropriate inputs of other nodes. Use the :class:`nipype.pipeline.engine.Pipeline` to create a graph-based execution pipeline for first level analysis. The config options tells the pipeline engine to use `workdir` as the disk location to use when running the processes and keeping their outputs. The `use_parameterized_dirs` tells the engine to create sub-directories under `workdir` corresponding to the iterables in the pipeline. Thus for this pipeline there will be subject specific sub-directories. The ``nipype.pipeline.engine.Pipeline.connect`` function creates the links between the processes, i.e., how data should flow in and out of the processing nodes. """ level1 = pe.Workflow(name="level1") level1.base_dir = os.path.abspath('spm_auditory_tutorial/workingdir') level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource,l1pipeline,[('struct', 'preproc.coregister.source')]) ]) if merge_to_4d: level1.connect([(datasource,l1pipeline,[('func','preproc.merge.in_files')])]) else: level1.connect([(datasource,l1pipeline,[('func','preproc.realign.in_files')])]) """ Setup storage results --------------------- Use :class:`nipype.interfaces.io.DataSink` to store selected outputs from the pipeline in a specific location. This allows the user to selectively choose important output bits from the analysis and keep them. The first step is to create a datasink node and then to connect outputs from the modules above to storage locations. These take the following form directory_name[.[@]subdir] where parts between [] are optional. For example 'realign.@mean' below creates a directory called realign in 'l1output/subject_id/' and stores the mean image output from the Realign process in the realign directory. If the @ is left out, then a sub-directory with the name 'mean' would be created and the mean image would be copied to that directory. """ datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('spm_auditory_tutorial/l1output') def getstripdir(subject_id): import os return os.path.join(os.path.abspath('spm_auditory_tutorial/workingdir'),'_subject_id_%s' % subject_id) # store relevant outputs from various stages of the 1st level analysis level1.connect([(infosource, datasink,[('subject_id','container'), (('subject_id', getstripdir),'strip_dir')]), (l1pipeline, datasink,[('analysis.contrastestimate.con_images','contrasts.@con'), ('analysis.contrastestimate.spmT_images','contrasts.@T')]), ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': level1.run() level1.write_graph() nipype-0.9.2/examples/fmri_spm_dartel.py000077500000000000000000000503411227300005300203630ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ================= fMRI: DARTEL, SPM ================= The fmri_spm_dartel.py integrates several interfaces to perform a first and second level analysis on a two-subject data set. The tutorial can be found in the examples folder. Run the tutorial from inside the nipype tutorial directory: python fmri_spm_dartel.py Import necessary modules from nipype.""" import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.workflows.fmri.spm as spm_wf # spm import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.rapidart as ra # artifact detection import nipype.algorithms.modelgen as model # model specification import os # system functions """ Preliminaries ------------- Set any package specific configuration. The output file format for FSL routines is being set to uncompressed NIFTI and a specific version of matlab is being used. The uncompressed format is required because SPM does not handle compressed NIFTI. """ # Tell fsl to generate all output in uncompressed nifti format fsl.FSLCommand.set_default_output_type('NIFTI') # Set the way matlab should be called #mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") #mlab.MatlabCommand.set_default_paths('/software/spm8') """ Setting up workflows -------------------- In this tutorial we will be setting up a hierarchical workflow for spm analysis. This will demonstrate how pre-defined workflows can be setup and shared across users, projects and labs. Setup preprocessing workflow ---------------------------- This is a generic preprocessing workflow that can be used by different analyses """ preproc = pe.Workflow(name='preproc') """Use :class:`nipype.interfaces.spm.Realign` for motion correction and register all images to the mean image. """ realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True """Use :class:`nipype.algorithms.rapidart` to determine which of the images in the functional series are outliers based on deviations in intensity or movement. """ art = pe.Node(interface=ra.ArtifactDetect(), name="art") art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'SPM' """Skull strip structural images using :class:`nipype.interfaces.fsl.BET`. """ skullstrip = pe.Node(interface=fsl.BET(), name="skullstrip") skullstrip.inputs.mask = True """Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid body registration of the functional data to the structural data. """ coregister = pe.Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estimate' """Normalize and smooth functional data using DARTEL template """ normalize_and_smooth_func = pe.Node(spm.DARTELNorm2MNI(modulate=True), name='normalize_and_smooth_func') fwhmlist = [4] normalize_and_smooth_func.iterables = ('fwhm',fwhmlist) """Normalize structural data using DARTEL template """ normalize_struct = pe.Node(spm.DARTELNorm2MNI(modulate=True), name='normalize_struct') normalize_struct.inputs.fwhm = 2 preproc.connect([(realign,coregister,[('mean_image', 'source'), ('realigned_files','apply_to_files')]), (coregister, normalize_and_smooth_func, [('coregistered_files','apply_to_files')]), (normalize_struct,skullstrip,[('normalized_files','in_file')]), (realign,art,[('realignment_parameters','realignment_parameters')]), (normalize_and_smooth_func,art,[('normalized_files','realigned_files')]), (skullstrip,art,[('mask_file','mask_file')]), ]) """ Set up analysis workflow ------------------------ """ l1analysis = pe.Workflow(name='analysis') """Generate SPM-specific design information using :class:`nipype.interfaces.spm.SpecifyModel`. """ modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec") modelspec.inputs.concatenate_runs = True """Generate a first level SPM.mat file for analysis :class:`nipype.interfaces.spm.Level1Design`. """ level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} """Use :class:`nipype.interfaces.spm.EstimateModel` to determine the parameters of the model. """ level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} """Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the first level contrasts specified in a few steps above. """ contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") """Use :class: `nipype.interfaces.utility.Select` to select each contrast for reporting. """ selectcontrast = pe.Node(interface=util.Select(), name="selectcontrast") """Use :class:`nipype.interfaces.fsl.Overlay` to combine the statistical output of the contrast estimate and a background image into one volume. """ overlaystats = pe.Node(interface=fsl.Overlay(), name="overlaystats") overlaystats.inputs.stat_thresh = (3,10) overlaystats.inputs.show_negative_stats=True overlaystats.inputs.auto_thresh_bg=True """Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid statistical volumes for a report of the first-level results. """ slicestats = pe.Node(interface=fsl.Slicer(), name="slicestats") slicestats.inputs.all_axial = True slicestats.inputs.image_width = 750 l1analysis.connect([(modelspec,level1design,[('session_info','session_info')]), (level1design,level1estimate,[('spm_mat_file','spm_mat_file')]), (level1estimate,contrastestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), (contrastestimate,selectcontrast,[('spmT_images','inlist')]), (selectcontrast,overlaystats,[('out','stat_image')]), (overlaystats,slicestats,[('out_file','in_file')]) ]) """ Preproc + Analysis pipeline --------------------------- """ l1pipeline = pe.Workflow(name='firstlevel') l1pipeline.connect([(preproc, l1analysis, [('realign.realignment_parameters', 'modelspec.realignment_parameters'), ('normalize_and_smooth_func.normalized_files', 'modelspec.functional_runs'), ('art.outlier_files', 'modelspec.outlier_files'), ('skullstrip.mask_file', 'level1design.mask_image'), ('normalize_struct.normalized_files', 'overlaystats.background_image')]), ]) """ Data specific components ------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1', 's3'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """We need to create a separate workflow to make the DARTEL template """ datasource_dartel = pe.MapNode(interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct']), name = 'datasource_dartel', iterfield = ['subject_id']) datasource_dartel.inputs.base_directory = data_dir datasource_dartel.inputs.template = '%s/%s.nii' datasource_dartel.inputs.template_args = dict(struct=[['subject_id','struct']]) datasource_dartel.inputs.sort_filelist = True datasource_dartel.inputs.subject_id = subject_list """Here we make sure that struct files have names corresponding to the subject ids. This way we will be able to pick the right field flows later. """ rename_dartel = pe.MapNode(util.Rename(format_string="subject_id_%(subject_id)s_struct"), iterfield=['in_file', 'subject_id'], name = 'rename_dartel') rename_dartel.inputs.subject_id = subject_list rename_dartel.inputs.keep_ext = True dartel_workflow = spm_wf.create_DARTEL_template(name='dartel_workflow') dartel_workflow.inputs.inputspec.template_prefix = "template" """This function will allow to pick the right field flow for each subject """ def pickFieldFlow(dartel_flow_fields, subject_id): from nipype.utils.filemanip import split_filename for f in dartel_flow_fields: _, name, _ = split_filename(f) if name.find("subject_id_%s"%subject_id): return f raise Exception pick_flow = pe.Node(util.Function(input_names=['dartel_flow_fields', 'subject_id'], output_names=['dartel_flow_field'], function = pickFieldFlow), name = "pick_flow") """ Experimental paradigm specific components ----------------------------------------- Here we create a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. """ def subjectinfo(subject_id): from nipype.interfaces.base import Bunch from copy import deepcopy print "Subject ID: %s\n"%str(subject_id) output = [] names = ['Task-Odd','Task-Even'] for r in range(4): onsets = [range(15,240,60),range(45,240,60)] output.insert(r, Bunch(conditions=names, onsets=deepcopy(onsets), durations=[[15] for s in names], amplitudes=None, tmod=None, pmod=None, regressor_names=None, regressors=None)) return output """Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrasts = [cont1,cont2] # set up node specific inputs modelspecref = l1pipeline.inputs.analysis.modelspec modelspecref.input_units = 'secs' modelspecref.output_units = 'secs' modelspecref.time_repetition = 3. modelspecref.high_pass_filter_cutoff = 120 l1designref = l1pipeline.inputs.analysis.level1design l1designref.timing_units = modelspecref.output_units l1designref.interscan_interval = modelspecref.time_repetition l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts # Iterate over each contrast and create report images. selectcontrast.iterables = ('index',[[i] for i in range(len(contrasts))]) """ Setup the pipeline ------------------ The nodes created above do not describe the flow of data. They merely describe the parameters used for each function. In this section we setup the connections between the nodes such that appropriate outputs from nodes are piped into appropriate inputs of other nodes. Use the :class:`nipype.pipeline.engine.Pipeline` to create a graph-based execution pipeline for first level analysis. The config options tells the pipeline engine to use `workdir` as the disk location to use when running the processes and keeping their outputs. The `use_parameterized_dirs` tells the engine to create sub-directories under `workdir` corresponding to the iterables in the pipeline. Thus for this pipeline there will be subject specific sub-directories. The ``nipype.pipeline.engine.Pipeline.connect`` function creates the links between the processes, i.e., how data should flow in and out of the processing nodes. """ level1 = pe.Workflow(name="level1") level1.base_dir = os.path.abspath('spm_dartel_tutorial/workingdir') level1.connect([(datasource_dartel, rename_dartel, [('struct', 'in_file')]), (rename_dartel, dartel_workflow, [('out_file','inputspec.structural_files')]), (infosource, datasource, [('subject_id', 'subject_id')]), (datasource,l1pipeline,[('func','preproc.realign.in_files'), ('struct', 'preproc.coregister.target'), ('struct', 'preproc.normalize_struct.apply_to_files')]), (dartel_workflow, l1pipeline, [('outputspec.template_file', 'preproc.normalize_struct.template_file'), ('outputspec.template_file', 'preproc.normalize_and_smooth_func.template_file')]), (infosource, pick_flow, [('subject_id', 'subject_id')]), (dartel_workflow, pick_flow, [('outputspec.flow_fields', 'dartel_flow_fields')]), (pick_flow, l1pipeline, [('dartel_flow_field', 'preproc.normalize_struct.flowfield_files'), ('dartel_flow_field', 'preproc.normalize_and_smooth_func.flowfield_files')]), (infosource,l1pipeline,[(('subject_id', subjectinfo), 'analysis.modelspec.subject_info')]), ]) """ Setup storage results --------------------- Use :class:`nipype.interfaces.io.DataSink` to store selected outputs from the pipeline in a specific location. This allows the user to selectively choose important output bits from the analysis and keep them. The first step is to create a datasink node and then to connect outputs from the modules above to storage locations. These take the following form directory_name[.[@]subdir] where parts between [] are optional. For example 'realign.@mean' below creates a directory called realign in 'l1output/subject_id/' and stores the mean image output from the Realign process in the realign directory. If the @ is left out, then a sub-directory with the name 'mean' would be created and the mean image would be copied to that directory. """ datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('spm_dartel_tutorial/l1output') report = pe.Node(interface=nio.DataSink(), name='report') report.inputs.base_directory = os.path.abspath('spm_dartel_tutorial/report') report.inputs.parameterization = False def getstripdir(subject_id): import os return os.path.join(os.path.abspath('spm_dartel_tutorial/workingdir'),'_subject_id_%s' % subject_id) # store relevant outputs from various stages of the 1st level analysis level1.connect([(infosource, datasink,[('subject_id','container'), (('subject_id', getstripdir),'strip_dir')]), (l1pipeline, datasink,[('analysis.contrastestimate.con_images','contrasts.@con'), ('analysis.contrastestimate.spmT_images','contrasts.@T')]), (infosource, report,[('subject_id', 'container'), (('subject_id', getstripdir),'strip_dir')]), (l1pipeline, report,[('analysis.slicestats.out_file', '@report')]), ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': level1.run(plugin_args={'n_procs': 4}) level1.write_graph() """ Setup level 2 pipeline ---------------------- Use :class:`nipype.interfaces.io.DataGrabber` to extract the contrast images across a group of first level subjects. Unlike the previous pipeline that iterated over subjects, this pipeline will iterate over contrasts. """ # collect all the con images for each contrast. contrast_ids = range(1,len(contrasts)+1) l2source = pe.Node(nio.DataGrabber(infields=['fwhm', 'con']), name="l2source") l2source.inputs.template=os.path.abspath('spm_dartel_tutorial/l1output/*/con*/*/_fwhm_%d/con_%04d.img') # iterate over all contrast images l2source.iterables = [('fwhm',fwhmlist), ('con',contrast_ids)] l2source.inputs.sort_filelist = True """Use :class:`nipype.interfaces.spm.OneSampleTTestDesign` to perform a simple statistical analysis of the contrasts from the group of subjects (n=2 in this example). """ # setup a 1-sample t-test node onesamplettestdes = pe.Node(interface=spm.OneSampleTTestDesign(), name="onesampttestdes") l2estimate = pe.Node(interface=spm.EstimateModel(), name="level2estimate") l2estimate.inputs.estimation_method = {'Classical' : 1} l2conestimate = pe.Node(interface = spm.EstimateContrast(), name="level2conestimate") cont1 = ('Group','T', ['mean'],[1]) l2conestimate.inputs.contrasts = [cont1] l2conestimate.inputs.group_contrast = True """As before, we setup a pipeline to connect these two nodes (l2source -> onesamplettest). """ l2pipeline = pe.Workflow(name="level2") l2pipeline.base_dir = os.path.abspath('spm_dartel_tutorial/l2output') l2pipeline.connect([(l2source,onesamplettestdes,[('outfiles','in_files')]), (onesamplettestdes,l2estimate,[('spm_mat_file','spm_mat_file')]), (l2estimate,l2conestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), ]) """ Execute the second level pipeline --------------------------------- """ if __name__ == '__main__': l2pipeline.run() nipype-0.9.2/examples/fmri_spm_face.py000077500000000000000000000450451227300005300200130ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ===================================== fMRI: Famous vs non-famous faces, SPM ===================================== Introduction ============ The fmri_spm_face.py recreates the classical workflow described in the SPM8 manual (http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf) using auditory dataset that can be downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/face_rep/face_rep_SPM5.html:: python fmri_spm.py Import necessary modules from nipype.""" import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.interfaces.matlab as mlab # how to run matlab import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model specification import os # system functions """ Preliminaries ------------- Set any package specific configuration. The output file format for FSL routines is being set to uncompressed NIFTI and a specific version of matlab is being used. The uncompressed format is required because SPM does not handle compressed NIFTI. """ # Set the way matlab should be called mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") # If SPM is not in your MATLAB path you should add it here # mlab.MatlabCommand.set_default_paths('/path/to/your/spm8') """ Setting up workflows -------------------- In this tutorial we will be setting up a hierarchical workflow for spm analysis. It one is slightly different then the one used in spm_tutorial2. Setup preprocessing workflow ---------------------------- This is a generic preprocessing workflow that can be used by different analyses """ preproc = pe.Workflow(name='preproc') """Use :class:`nipype.interfaces.spm.Realign` for motion correction and register all images to the mean image. """ realign = pe.Node(interface=spm.Realign(), name="realign") slice_timing = pe.Node(interface=spm.SliceTiming(), name="slice_timing") """Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid body registration of the functional data to the structural data. """ coregister = pe.Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estimate' segment = pe.Node(interface=spm.Segment(), name="segment") segment.inputs.save_bias_corrected = True """Uncomment the following line for faster execution """ #segment.inputs.gaussians_per_class = [1, 1, 1, 4] """Warp functional and structural data to SPM's T1 template using :class:`nipype.interfaces.spm.Normalize`. The tutorial data set includes the template image, T1.nii. """ normalize_func = pe.Node(interface=spm.Normalize(), name = "normalize_func") normalize_func.inputs.jobtype = "write" normalize_struc = pe.Node(interface=spm.Normalize(), name = "normalize_struc") normalize_struc.inputs.jobtype = "write" """Smooth the functional data using :class:`nipype.interfaces.spm.Smooth`. """ smooth = pe.Node(interface=spm.Smooth(), name = "smooth") """`write_voxel_sizes` is the input of the normalize interface that is recommended to be set to the voxel sizes of the target volume. There is no need to set it manually since we van infer it from data using the following function: """ def get_vox_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() voxdims = hdr.get_zooms() return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal voxel sizes. """ preproc.connect([(realign,coregister,[('mean_image', 'target')]), (coregister, segment,[('coregistered_source','data')]), (segment, normalize_func, [('transformation_mat','parameter_file')]), (segment, normalize_struc, [('transformation_mat','parameter_file'), ('bias_corrected_image', 'apply_to_files'), (('bias_corrected_image', get_vox_dims), 'write_voxel_sizes')]), (realign, slice_timing, [('realigned_files', 'in_files')]), (slice_timing, normalize_func, [('timecorrected_files', 'apply_to_files'), (('timecorrected_files', get_vox_dims), 'write_voxel_sizes')]), (normalize_func, smooth, [('normalized_files', 'in_files')]), ]) """ Set up analysis workflow ------------------------ """ l1analysis = pe.Workflow(name='analysis') """Generate SPM-specific design information using :class:`nipype.interfaces.spm.SpecifyModel`. """ modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec") """Generate a first level SPM.mat file for analysis :class:`nipype.interfaces.spm.Level1Design`. """ level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") """Use :class:`nipype.interfaces.spm.EstimateModel` to determine the parameters of the model. """ level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} threshold = pe.Node(interface=spm.Threshold(), name="threshold") """Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the first level contrasts specified in a few steps above. """ contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") def pickfirst(l): return l[0] l1analysis.connect([(modelspec,level1design,[('session_info','session_info')]), (level1design,level1estimate,[('spm_mat_file','spm_mat_file')]), (level1estimate,contrastestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), (contrastestimate, threshold,[('spm_mat_file','spm_mat_file'), (('spmT_images', pickfirst), 'stat_image')]), ]) """ Preproc + Analysis pipeline --------------------------- """ l1pipeline = pe.Workflow(name='firstlevel') l1pipeline.connect([(preproc, l1analysis, [('realign.realignment_parameters', 'modelspec.realignment_parameters')])]) """Pluging in `functional_runs` is a bit more complicated, because model spec expects a list of `runs`. Every run can be a 4D file or a list of 3D files. Therefore for 3D analysis we need a list of lists and to make one we need a helper function. """ def makelist(item): return [item] l1pipeline.connect([(preproc, l1analysis, [(('smooth.smoothed_files',makelist), 'modelspec.functional_runs')])]) """ Data specific components ------------------------ In this tutorial there is only one subject `M03953`. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. """ # Specify the location of the data downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/face_rep/face_rep_SPM5.html data_dir = os.path.abspath('spm_face_data') # Specify the subject directories subject_list = ['M03953'] # Map field names to individual subject runs. info = dict(func=[['RawEPI', 'subject_id', 5, ["_%04d"%i for i in range(6,357)]]], struct=[['Structural', 'subject_id', 7, '']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/s%s_%04d%s.img' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Experimental paradigm specific components ----------------------------------------- Here we create a structure that provides information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. """ from nipype.interfaces.base import Bunch """We're importing the onset times from a mat file (found on http://www.fil.ion.ucl.ac.uk/spm/data/face_rep/face_rep_SPM5.html """ from scipy.io.matlab import loadmat mat = loadmat(os.path.join(data_dir, "sots.mat"), struct_as_record=False) sot = mat['sot'][0] itemlag = mat['itemlag'][0] subjectinfo = [Bunch(conditions=['N1', 'N2', 'F1', 'F2'], onsets=[sot[0], sot[1], sot[2], sot[3]], durations=[[0], [0], [0], [0]], amplitudes=None, tmod=None, pmod=None, regressor_names=None, regressors=None)] """Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cond1 = ('positive effect of condition','T', ['N1*bf(1)','N2*bf(1)','F1*bf(1)','F2*bf(1)'],[1,1,1,1]) cond2 = ('positive effect of condition_dtemo','T', ['N1*bf(2)','N2*bf(2)','F1*bf(2)','F2*bf(2)'],[1,1,1,1]) cond3 = ('positive effect of condition_ddisp','T', ['N1*bf(3)','N2*bf(3)','F1*bf(3)','F2*bf(3)'],[1,1,1,1]) # non-famous > famous fam1 = ('positive effect of Fame','T', ['N1*bf(1)','N2*bf(1)','F1*bf(1)','F2*bf(1)'],[1,1,-1,-1]) fam2 = ('positive effect of Fame_dtemp','T', ['N1*bf(2)','N2*bf(2)','F1*bf(2)','F2*bf(2)'],[1,1,-1,-1]) fam3 = ('positive effect of Fame_ddisp','T', ['N1*bf(3)','N2*bf(3)','F1*bf(3)','F2*bf(3)'],[1,1,-1,-1]) # rep1 > rep2 rep1 = ('positive effect of Rep','T', ['N1*bf(1)','N2*bf(1)','F1*bf(1)','F2*bf(1)'],[1,-1,1,-1]) rep2 = ('positive effect of Rep_dtemp','T', ['N1*bf(2)','N2*bf(2)','F1*bf(2)','F2*bf(2)'],[1,-1,1,-1]) rep3 = ('positive effect of Rep_ddisp','T', ['N1*bf(3)','N2*bf(3)','F1*bf(3)','F2*bf(3)'],[1,-1,1,-1]) int1 = ('positive interaction of Fame x Rep','T', ['N1*bf(1)','N2*bf(1)','F1*bf(1)','F2*bf(1)'],[-1,-1,-1,1]) int2 = ('positive interaction of Fame x Rep_dtemp','T', ['N1*bf(2)','N2*bf(2)','F1*bf(2)','F2*bf(2)'],[1,-1,-1,1]) int3 = ('positive interaction of Fame x Rep_ddisp','T', ['N1*bf(3)','N2*bf(3)','F1*bf(3)','F2*bf(3)'],[1,-1,-1,1]) contf1 = ['average effect condition','F', [cond1, cond2, cond3]] contf2 = ['main effect Fam', 'F', [fam1, fam2, fam3]] contf3 = ['main effect Rep', 'F', [rep1, rep2, rep3]] contf4 = ['interaction: Fam x Rep', 'F', [int1, int2, int3]] contrasts = [cond1, cond2, cond3, fam1, fam2, fam3, rep1, rep2, rep3, int1, int2, int3, contf1, contf2,contf3,contf4] """Setting up nodes inputs """ num_slices = 24 TR = 2. slice_timingref = l1pipeline.inputs.preproc.slice_timing slice_timingref.num_slices = num_slices slice_timingref.time_repetition = TR slice_timingref.time_acquisition = TR - TR/float(num_slices) slice_timingref.slice_order = range(num_slices,0,-1) slice_timingref.ref_slice = int(num_slices/2) l1pipeline.inputs.preproc.smooth.fwhm = [8, 8, 8] # set up node specific inputs modelspecref = l1pipeline.inputs.analysis.modelspec modelspecref.input_units = 'scans' modelspecref.output_units = 'scans' modelspecref.time_repetition = TR modelspecref.high_pass_filter_cutoff = 120 l1designref = l1pipeline.inputs.analysis.level1design l1designref.timing_units = modelspecref.output_units l1designref.interscan_interval = modelspecref.time_repetition l1designref.microtime_resolution = slice_timingref.num_slices l1designref.microtime_onset = slice_timingref.ref_slice l1designref.bases = {'hrf':{'derivs': [1,1]}} """ The following lines automatically inform SPM to create a default set of contrats for a factorial design. """ #l1designref.factor_info = [dict(name = 'Fame', levels = 2), # dict(name = 'Rep', levels = 2)] l1pipeline.inputs.analysis.modelspec.subject_info = subjectinfo l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts l1pipeline.inputs.analysis.threshold.contrast_index = 1 """ Use derivative estimates in the non-parametric model """ l1pipeline.inputs.analysis.contrastestimate.use_derivs = True """ Setting up parametricvariation of the model """ subjectinfo_param = [Bunch(conditions=['N1', 'N2', 'F1', 'F2'], onsets=[sot[0], sot[1], sot[2], sot[3]], durations=[[0], [0], [0], [0]], amplitudes=None, tmod=None, pmod=[None, Bunch(name=['Lag'], param=itemlag[1].tolist(), poly=[2]), None, Bunch(name=['Lag'], param=itemlag[3].tolist(), poly=[2])], regressor_names=None, regressors=None)] cont1 = ('Famous_lag1','T', ['F2xLag^1'],[1]) cont2 = ('Famous_lag2','T', ['F2xLag^2'],[1]) fcont1 = ('Famous Lag', 'F', [cont1, cont2]) paramcontrasts = [cont1, cont2, fcont1] paramanalysis = l1analysis.clone(name='paramanalysis') paramanalysis.inputs.level1design.bases = {'hrf':{'derivs': [0,0]}} paramanalysis.inputs.modelspec.subject_info = subjectinfo_param paramanalysis.inputs.contrastestimate.contrasts = paramcontrasts paramanalysis.inputs.contrastestimate.use_derivs = False l1pipeline.connect([(preproc, paramanalysis, [('realign.realignment_parameters', 'modelspec.realignment_parameters'), (('smooth.smoothed_files',makelist), 'modelspec.functional_runs')])]) """ Setup the pipeline ------------------ The nodes created above do not describe the flow of data. They merely describe the parameters used for each function. In this section we setup the connections between the nodes such that appropriate outputs from nodes are piped into appropriate inputs of other nodes. Use the :class:`nipype.pipeline.engine.Pipeline` to create a graph-based execution pipeline for first level analysis. The config options tells the pipeline engine to use `workdir` as the disk location to use when running the processes and keeping their outputs. The `use_parameterized_dirs` tells the engine to create sub-directories under `workdir` corresponding to the iterables in the pipeline. Thus for this pipeline there will be subject specific sub-directories. The ``nipype.pipeline.engine.Pipeline.connect`` function creates the links between the processes, i.e., how data should flow in and out of the processing nodes. """ level1 = pe.Workflow(name="level1") level1.base_dir = os.path.abspath('spm_face_tutorial/workingdir') level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource,l1pipeline,[('struct', 'preproc.coregister.source'), ('func','preproc.realign.in_files')]) ]) """ Setup storage results --------------------- Use :class:`nipype.interfaces.io.DataSink` to store selected outputs from the pipeline in a specific location. This allows the user to selectively choose important output bits from the analysis and keep them. The first step is to create a datasink node and then to connect outputs from the modules above to storage locations. These take the following form directory_name[.[@]subdir] where parts between [] are optional. For example 'realign.@mean' below creates a directory called realign in 'l1output/subject_id/' and stores the mean image output from the Realign process in the realign directory. If the @ is left out, then a sub-directory with the name 'mean' would be created and the mean image would be copied to that directory. """ datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('spm_auditory_tutorial/l1output') def getstripdir(subject_id): import os return os.path.join(os.path.abspath('spm_auditory_tutorial/workingdir'),'_subject_id_%s' % subject_id) # store relevant outputs from various stages of the 1st level analysis level1.connect([(infosource, datasink,[('subject_id','container'), (('subject_id', getstripdir),'strip_dir')]), (l1pipeline, datasink,[('analysis.contrastestimate.con_images','contrasts.@con'), ('analysis.contrastestimate.spmT_images','contrasts.@T'), ('paramanalysis.contrastestimate.con_images','paramcontrasts.@con'), ('paramanalysis.contrastestimate.spmT_images','paramcontrasts.@T')]), ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': level1.run() level1.write_graph() nipype-0.9.2/examples/fmri_spm_nested.py000077500000000000000000000432201227300005300203700ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ========================== fMRI: SPM nested workflows ========================== The fmri_spm.py integrates several interfaces to perform a first and second level analysis on a two-subject data set. The tutorial can be found in the examples folder. Run the tutorial from inside the nipype tutorial directory: python fmri_spm_nested.py Import necessary modules from nipype.""" import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.interfaces.matlab as mlab # how to run matlab import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.rapidart as ra # artifact detection import nipype.algorithms.modelgen as model # model specification import os # system functions """ Preliminaries ------------- Set any package specific configuration. The output file format for FSL routines is being set to uncompressed NIFTI and a specific version of matlab is being used. The uncompressed format is required because SPM does not handle compressed NIFTI. """ # Tell fsl to generate all output in uncompressed nifti format fsl.FSLCommand.set_default_output_type('NIFTI') # Set the way matlab should be called #mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") #mlab.MatlabCommand.set_default_paths('/software/spm8') """ Setting up workflows -------------------- In this tutorial we will be setting up a hierarchical workflow for spm analysis. This will demonstrate how pre-defined workflows can be setup and shared across users, projects and labs. Setup preprocessing workflow ---------------------------- This is a generic preprocessing workflow that can be used by different analyses """ preproc = pe.Workflow(name='preproc') """Use :class:`nipype.interfaces.spm.Realign` for motion correction and register all images to the mean image. """ realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True """Use :class:`nipype.algorithms.rapidart` to determine which of the images in the functional series are outliers based on deviations in intensity or movement. """ art = pe.Node(interface=ra.ArtifactDetect(), name="art") art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'SPM' """Skull strip structural images using :class:`nipype.interfaces.fsl.BET`. """ skullstrip = pe.Node(interface=fsl.BET(), name="skullstrip") skullstrip.inputs.mask = True """Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid body registration of the functional data to the structural data. """ coregister = pe.Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estimate' """Warp functional and structural data to SPM's T1 template using :class:`nipype.interfaces.spm.Normalize`. The tutorial data set includes the template image, T1.nii. """ normalize = pe.Node(interface=spm.Normalize(), name = "normalize") normalize.inputs.template = os.path.abspath('data/T1.nii') """Smooth the functional data using :class:`nipype.interfaces.spm.Smooth`. """ smooth = pe.Node(interface=spm.Smooth(), name = "smooth") fwhmlist = [4] smooth.iterables = ('fwhm',fwhmlist) preproc.connect([(realign,coregister,[('mean_image', 'source'), ('realigned_files','apply_to_files')]), (coregister, normalize, [('coregistered_files','apply_to_files')]), (normalize, smooth, [('normalized_files', 'in_files')]), (normalize,skullstrip,[('normalized_source','in_file')]), (realign,art,[('realignment_parameters','realignment_parameters')]), (normalize,art,[('normalized_files','realigned_files')]), (skullstrip,art,[('mask_file','mask_file')]), ]) """ Set up analysis workflow ------------------------ """ l1analysis = pe.Workflow(name='analysis') """Generate SPM-specific design information using :class:`nipype.interfaces.spm.SpecifyModel`. """ modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec") modelspec.inputs.concatenate_runs = True """Generate a first level SPM.mat file for analysis :class:`nipype.interfaces.spm.Level1Design`. """ level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} """Use :class:`nipype.interfaces.spm.EstimateModel` to determine the parameters of the model. """ level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} """Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the first level contrasts specified in a few steps above. """ contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") """Use :class: `nipype.interfaces.utility.Select` to select each contrast for reporting. """ selectcontrast = pe.Node(interface=util.Select(), name="selectcontrast") """Use :class:`nipype.interfaces.fsl.Overlay` to combine the statistical output of the contrast estimate and a background image into one volume. """ overlaystats = pe.Node(interface=fsl.Overlay(), name="overlaystats") overlaystats.inputs.stat_thresh = (3,10) overlaystats.inputs.show_negative_stats=True overlaystats.inputs.auto_thresh_bg=True """Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid statistical volumes for a report of the first-level results. """ slicestats = pe.Node(interface=fsl.Slicer(), name="slicestats") slicestats.inputs.all_axial = True slicestats.inputs.image_width = 750 l1analysis.connect([(modelspec,level1design,[('session_info','session_info')]), (level1design,level1estimate,[('spm_mat_file','spm_mat_file')]), (level1estimate,contrastestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), (contrastestimate,selectcontrast,[('spmT_images','inlist')]), (selectcontrast,overlaystats,[('out','stat_image')]), (overlaystats,slicestats,[('out_file','in_file')]) ]) """ Preproc + Analysis pipeline --------------------------- """ l1pipeline = pe.Workflow(name='firstlevel') l1pipeline.connect([(preproc, l1analysis, [('realign.realignment_parameters', 'modelspec.realignment_parameters'), ('smooth.smoothed_files', 'modelspec.functional_runs'), ('art.outlier_files', 'modelspec.outlier_files'), ('skullstrip.mask_file', 'level1design.mask_image'), ('normalize.normalized_source', 'overlaystats.background_image')]), ]) """ Data specific components ------------------------ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1', 's3'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Now we create a :class:`nipype.interfaces.io.DataGrabber` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Experimental paradigm specific components ----------------------------------------- Here we create a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. """ def subjectinfo(subject_id): from nipype.interfaces.base import Bunch from copy import deepcopy print "Subject ID: %s\n"%str(subject_id) output = [] names = ['Task-Odd','Task-Even'] for r in range(4): onsets = [range(15,240,60),range(45,240,60)] output.insert(r, Bunch(conditions=names, onsets=deepcopy(onsets), durations=[[15] for s in names], amplitudes=None, tmod=None, pmod=None, regressor_names=None, regressors=None)) return output """Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the `names` listed in the `subjectinfo` function described above. """ cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrasts = [cont1,cont2] # set up node specific inputs modelspecref = l1pipeline.inputs.analysis.modelspec modelspecref.input_units = 'secs' modelspecref.output_units = 'secs' modelspecref.time_repetition = 3. modelspecref.high_pass_filter_cutoff = 120 l1designref = l1pipeline.inputs.analysis.level1design l1designref.timing_units = modelspecref.output_units l1designref.interscan_interval = modelspecref.time_repetition l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts # Iterate over each contrast and create report images. selectcontrast.iterables = ('index',[[i] for i in range(len(contrasts))]) """ Setup the pipeline ------------------ The nodes created above do not describe the flow of data. They merely describe the parameters used for each function. In this section we setup the connections between the nodes such that appropriate outputs from nodes are piped into appropriate inputs of other nodes. Use the :class:`nipype.pipeline.engine.Pipeline` to create a graph-based execution pipeline for first level analysis. The config options tells the pipeline engine to use `workdir` as the disk location to use when running the processes and keeping their outputs. The `use_parameterized_dirs` tells the engine to create sub-directories under `workdir` corresponding to the iterables in the pipeline. Thus for this pipeline there will be subject specific sub-directories. The ``nipype.pipeline.engine.Pipeline.connect`` function creates the links between the processes, i.e., how data should flow in and out of the processing nodes. """ level1 = pe.Workflow(name="level1") level1.base_dir = os.path.abspath('spm_tutorial2/workingdir') level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource,l1pipeline,[('func','preproc.realign.in_files'), ('struct', 'preproc.coregister.target'), ('struct', 'preproc.normalize.source')]), (infosource,l1pipeline,[(('subject_id', subjectinfo), 'analysis.modelspec.subject_info')]), ]) """ Setup storage results --------------------- Use :class:`nipype.interfaces.io.DataSink` to store selected outputs from the pipeline in a specific location. This allows the user to selectively choose important output bits from the analysis and keep them. The first step is to create a datasink node and then to connect outputs from the modules above to storage locations. These take the following form directory_name[.[@]subdir] where parts between [] are optional. For example 'realign.@mean' below creates a directory called realign in 'l1output/subject_id/' and stores the mean image output from the Realign process in the realign directory. If the @ is left out, then a sub-directory with the name 'mean' would be created and the mean image would be copied to that directory. """ datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('spm_tutorial2/l1output') report = pe.Node(interface=nio.DataSink(), name='report') report.inputs.base_directory = os.path.abspath('spm_tutorial2/report') report.inputs.parameterization = False def getstripdir(subject_id): import os return os.path.join(os.path.abspath('spm_tutorial2/workingdir'),'_subject_id_%s' % subject_id) # store relevant outputs from various stages of the 1st level analysis level1.connect([(infosource, datasink,[('subject_id','container'), (('subject_id', getstripdir),'strip_dir')]), (l1pipeline, datasink,[('analysis.contrastestimate.con_images','contrasts.@con'), ('analysis.contrastestimate.spmT_images','contrasts.@T')]), (infosource, report,[('subject_id', 'container'), (('subject_id', getstripdir),'strip_dir')]), (l1pipeline, report,[('analysis.slicestats.out_file', '@report')]), ]) """ Execute the pipeline -------------------- The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. """ if __name__ == '__main__': level1.run() level1.write_graph() """ Setup level 2 pipeline ---------------------- Use :class:`nipype.interfaces.io.DataGrabber` to extract the contrast images across a group of first level subjects. Unlike the previous pipeline that iterated over subjects, this pipeline will iterate over contrasts. """ # collect all the con images for each contrast. contrast_ids = range(1,len(contrasts)+1) l2source = pe.Node(nio.DataGrabber(infields=['fwhm', 'con']), name="l2source") l2source.inputs.template=os.path.abspath('spm_tutorial2/l1output/*/con*/*/_fwhm_%d/con_%04d.img') # iterate over all contrast images l2source.iterables = [('fwhm',fwhmlist), ('con',contrast_ids)] l2source.inputs.sort_filelist = True """Use :class:`nipype.interfaces.spm.OneSampleTTestDesign` to perform a simple statistical analysis of the contrasts from the group of subjects (n=2 in this example). """ # setup a 1-sample t-test node onesamplettestdes = pe.Node(interface=spm.OneSampleTTestDesign(), name="onesampttestdes") l2estimate = pe.Node(interface=spm.EstimateModel(), name="level2estimate") l2estimate.inputs.estimation_method = {'Classical' : 1} l2conestimate = pe.Node(interface = spm.EstimateContrast(), name="level2conestimate") cont1 = ('Group','T', ['mean'],[1]) l2conestimate.inputs.contrasts = [cont1] l2conestimate.inputs.group_contrast = True """As before, we setup a pipeline to connect these two nodes (l2source -> onesamplettest). """ l2pipeline = pe.Workflow(name="level2") l2pipeline.base_dir = os.path.abspath('spm_tutorial2/l2output') l2pipeline.connect([(l2source,onesamplettestdes,[('outfiles','in_files')]), (onesamplettestdes,l2estimate,[('spm_mat_file','spm_mat_file')]), (l2estimate,l2conestimate,[('spm_mat_file','spm_mat_file'), ('beta_images','beta_images'), ('residual_image','residual_image')]), ]) """ Execute the second level pipeline --------------------------------- """ if __name__ == '__main__': l2pipeline.run() nipype-0.9.2/examples/frontiers_paper/000077500000000000000000000000001227300005300200365ustar00rootroot00000000000000nipype-0.9.2/examples/frontiers_paper/smoothing_comparison.py000066400000000000000000000200251227300005300246500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ =========================== Paper: Smoothing comparison =========================== """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.interfaces.freesurfer as fs # freesurfer import nipype.interfaces.nipy as nipy import nipype.interfaces.utility as util import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model specification import nipype.workflows.fmri.fsl as fsl_wf from nipype.interfaces.base import Bunch import os # system functions preprocessing = pe.Workflow(name="preprocessing") iter_fwhm = pe.Node(interface=util.IdentityInterface(fields=["fwhm"]), name="iter_fwhm") iter_fwhm.iterables = [('fwhm', [4, 8])] iter_smoothing_method = pe.Node(interface=util.IdentityInterface(fields=["smoothing_method"]), name="iter_smoothing_method") iter_smoothing_method.iterables = [('smoothing_method',['isotropic_voxel', 'anisotropic_voxel', 'isotropic_surface'])] realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True isotropic_voxel_smooth = pe.Node(interface=spm.Smooth(), name="isotropic_voxel_smooth") preprocessing.connect(realign, "realigned_files", isotropic_voxel_smooth, "in_files") preprocessing.connect(iter_fwhm, "fwhm", isotropic_voxel_smooth, "fwhm") compute_mask = pe.Node(interface=nipy.ComputeMask(), name="compute_mask") preprocessing.connect(realign, "mean_image", compute_mask, "mean_volume") anisotropic_voxel_smooth = fsl_wf.create_susan_smooth(name="anisotropic_voxel_smooth", separate_masks=False) anisotropic_voxel_smooth.inputs.smooth.output_type = 'NIFTI' preprocessing.connect(realign, "realigned_files", anisotropic_voxel_smooth, "inputnode.in_files") preprocessing.connect(iter_fwhm, "fwhm", anisotropic_voxel_smooth, "inputnode.fwhm") preprocessing.connect(compute_mask, "brain_mask", anisotropic_voxel_smooth, 'inputnode.mask_file') recon_all = pe.Node(interface=fs.ReconAll(), name = "recon_all") surfregister = pe.Node(interface=fs.BBRegister(),name='surfregister') surfregister.inputs.init = 'fsl' surfregister.inputs.contrast_type = 't2' preprocessing.connect(realign, 'mean_image', surfregister, 'source_file') preprocessing.connect(recon_all, 'subject_id', surfregister, 'subject_id') preprocessing.connect(recon_all, 'subjects_dir', surfregister, 'subjects_dir') isotropic_surface_smooth = pe.MapNode(interface=fs.Smooth(proj_frac_avg=(0,1,0.1)), iterfield=['in_file'], name="isotropic_surface_smooth") preprocessing.connect(surfregister, 'out_reg_file', isotropic_surface_smooth, 'reg_file') preprocessing.connect(realign, "realigned_files", isotropic_surface_smooth, "in_file") preprocessing.connect(iter_fwhm, "fwhm", isotropic_surface_smooth, "surface_fwhm") preprocessing.connect(iter_fwhm, "fwhm", isotropic_surface_smooth, "vol_fwhm") preprocessing.connect(recon_all, 'subjects_dir', isotropic_surface_smooth, 'subjects_dir') merge_smoothed_files = pe.Node(interface=util.Merge(3), name='merge_smoothed_files') preprocessing.connect(isotropic_voxel_smooth, 'smoothed_files', merge_smoothed_files, 'in1') preprocessing.connect(anisotropic_voxel_smooth, 'outputnode.smoothed_files', merge_smoothed_files, 'in2') preprocessing.connect(isotropic_surface_smooth, 'smoothed_file', merge_smoothed_files, 'in3') select_smoothed_files = pe.Node(interface=util.Select(), name="select_smoothed_files") preprocessing.connect(merge_smoothed_files, 'out', select_smoothed_files, 'inlist') def chooseindex(roi): return {'isotropic_voxel':range(0,4), 'anisotropic_voxel':range(4,8), 'isotropic_surface':range(8,12)}[roi] preprocessing.connect(iter_smoothing_method, ("smoothing_method", chooseindex), select_smoothed_files, 'index') rename = pe.MapNode(util.Rename(format_string="%(orig)s"), name="rename", iterfield=['in_file']) rename.inputs.parse_string = "(?P.*)" preprocessing.connect(select_smoothed_files, 'out', rename, 'in_file') specify_model = pe.Node(interface=model.SpecifyModel(), name="specify_model") specify_model.inputs.input_units = 'secs' specify_model.inputs.time_repetition = 3. specify_model.inputs.high_pass_filter_cutoff = 120 specify_model.inputs.subject_info = [Bunch(conditions=['Task-Odd','Task-Even'], onsets=[range(15,240,60), range(45,240,60)], durations=[[15], [15]])]*4 level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} level1design.inputs.timing_units = 'secs' level1design.inputs.interscan_interval = specify_model.inputs.time_repetition level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") contrastestimate.inputs.contrasts = [('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5])] modelling = pe.Workflow(name="modelling") modelling.connect(specify_model, 'session_info', level1design, 'session_info') modelling.connect(level1design, 'spm_mat_file', level1estimate, 'spm_mat_file') modelling.connect(level1estimate,'spm_mat_file', contrastestimate, 'spm_mat_file') modelling.connect(level1estimate,'beta_images', contrastestimate,'beta_images') modelling.connect(level1estimate,'residual_image', contrastestimate, 'residual_image') main_workflow = pe.Workflow(name="main_workflow") main_workflow.base_dir = "smoothing_comparison_workflow" main_workflow.connect(preprocessing, "realign.realignment_parameters", modelling, "specify_model.realignment_parameters") main_workflow.connect(preprocessing, "select_smoothed_files.out", modelling, "specify_model.functional_runs") main_workflow.connect(preprocessing, "compute_mask.brain_mask", modelling, "level1design.mask_image") datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = os.path.abspath('data') datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info = dict(func=[['subject_id', ['f3','f5','f7','f10']]], struct=[['subject_id','struct']]) datasource.inputs.subject_id = 's1' datasource.inputs.sort_filelist = True main_workflow.connect(datasource, 'func', preprocessing, 'realign.in_files') main_workflow.connect(datasource, 'struct', preprocessing, 'recon_all.T1_files') datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('smoothing_comparison_workflow/output') datasink.inputs.regexp_substitutions = [("_rename[0-9]", "")] main_workflow.connect(modelling, 'contrastestimate.spmT_images', datasink, 'contrasts') main_workflow.connect(preprocessing, 'rename.out_file', datasink, 'smoothed_epi') main_workflow.run() main_workflow.write_graph() nipype-0.9.2/examples/frontiers_paper/workflow_from_scratch.py000066400000000000000000000160721227300005300250220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ===================== Workflow from scratch ===================== """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model specification from nipype.interfaces.base import Bunch import os # system functions """In the following section, to showcase NiPyPe, we will describe how to create and extend a typical fMRI processing pipeline. We will begin with a basic processing layout and follow with extending it by adding/exchanging different components. Most fMRI pipeline can be divided into two sections - preprocessing and modelling. First one deals with cleaning data from confounds and noise and the second one fits a model based on the experimental design. Preprocessing stage in our first iteration of a pipeline will consist of only two steps: realignment and smoothing. In NiPyPe Every processing step consist of an Interface (which defines how to execute corresponding software) encapsulated in a Node (which defines for example a unique name). For realignment (motion correction achieved by coregistering all volumes to the mean) and smoothing (convolution with 3D Gaussian kernel) we will use SPM implementation. Definition of appropriate nodes can be found in Listing 1 (TODO). Inputs (such as register_to_mean from listing 1) of nodes are accessible through the inputs property. Upon setting any input its type is verified to avoid errors during the execution.""" realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True smooth = pe.Node(interface=spm.Smooth(), name="smooth") smooth.inputs.fwhm = 4 """To connect two nodes a Workflow has to be created. connect() method of a Workflow allows to specify which outputs of which Nodes should be connected to which inputs of which Nodes (see Listing 2). By connecting realigned_files output of realign to in_files input of Smooth we have created a simple preprocessing workflow (see Figure TODO).""" preprocessing = pe.Workflow(name="preprocessing") preprocessing.connect(realign, "realigned_files", smooth, "in_files") """Creating a modelling workflow which will define the design, estimate model and contrasts follows the same suite. We will again use SPM implementations. NiPyPe, however, adds extra abstraction layer to model definition which allows using the same definition for many model estimation implemantations (for example one from FSL or nippy). Therefore we will need four nodes: SpecifyModel (NiPyPe specific abstraction layer), Level1Design (SPM design definition), ModelEstimate, and ContrastEstimate. The connected modelling Workflow can be seen on Figure TODO. Model specification supports block, event and sparse designs. Contrasts provided to ContrastEstimate are defined using the same names of regressors as defined in the SpecifyModel.""" specify_model = pe.Node(interface=model.SpecifyModel(), name="specify_model") specify_model.inputs.input_units = 'secs' specify_model.inputs.time_repetition = 3. specify_model.inputs.high_pass_filter_cutoff = 120 specify_model.inputs.subject_info = [Bunch(conditions=['Task-Odd','Task-Even'], onsets=[range(15,240,60), range(45,240,60)], durations=[[15], [15]])]*4 level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} level1design.inputs.timing_units = 'secs' level1design.inputs.interscan_interval = specify_model.inputs.time_repetition level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrastestimate.inputs.contrasts = [cont1, cont2] modelling = pe.Workflow(name="modelling") modelling.connect(specify_model, 'session_info', level1design, 'session_info') modelling.connect(level1design, 'spm_mat_file', level1estimate, 'spm_mat_file') modelling.connect(level1estimate,'spm_mat_file', contrastestimate,'spm_mat_file') modelling.connect(level1estimate,'beta_images', contrastestimate,'beta_images') modelling.connect(level1estimate,'residual_image', contrastestimate,'residual_image') """Having preprocessing and modelling workflows we need to connect them together, add data grabbing facility and save the results. For this we will create a master Workflow which will host preprocessing and model Workflows as well as DataGrabber and DataSink Nodes. NiPyPe allows connecting Nodes between Workflows. We will use this feature to connect realignment_parameters and smoothed_files to modelling workflow.""" main_workflow = pe.Workflow(name="main_workflow") main_workflow.base_dir = "workflow_from_scratch" main_workflow.connect(preprocessing, "realign.realignment_parameters", modelling, "specify_model.realignment_parameters") main_workflow.connect(preprocessing, "smooth.smoothed_files", modelling, "specify_model.functional_runs") """DataGrabber allows to define flexible search patterns which can be parameterized by user defined inputs (such as subject ID, session etc.). This allows to adapt to a wide range of file layouts. In our case we will parameterize it with subject ID. In this way we will be able to run it for different subjects. We can automate this by iterating over a list of subject Ids, by setting an iterables property on the subject_id input of DataGrabber. Its output will be connected to realignment node from preprocessing workflow.""" datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func']), name = 'datasource') datasource.inputs.base_directory = os.path.abspath('data') datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = dict(func=[['subject_id', ['f3','f5','f7','f10']]]) datasource.inputs.subject_id = 's1' datasource.inputs.sort_filelist = True main_workflow.connect(datasource, 'func', preprocessing, 'realign.in_files') """DataSink on the other side provides means to storing selected results to a specified location. It supports automatic creation of folder stricter and regular expression based substitutions. In this example we will store T maps.""" datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('workflow_from_scratch/output') main_workflow.connect(modelling, 'contrastestimate.spmT_images', datasink, 'contrasts.@T') main_workflow.run() main_workflow.write_graph() nipype-0.9.2/examples/howto_caching_example.py000066400000000000000000000027501227300005300215410ustar00rootroot00000000000000""" =========================================== HOWTO: Using caching without using Workflow =========================================== Using nipype in an imperative way: caching without workflow Note that in the following example, we are calling command-lines with disk I/O that persists across runs, but we never have to worry about the file names or the directories. The disk location of the persistence is encoded by hashes. To find out where an operation has been persisted, simply look in it's output variable:: out.runtime.cwd """ from nipype.interfaces import fsl fsl.FSLCommand.set_default_output_type('NIFTI') from nipype.caching import Memory import glob # First retrieve the list of files that we want to work upon in_files = glob.glob('data/*/f3.nii') # Create a memory context mem = Memory('.') # Apply an arbitrary (and pointless, here) threshold to the files) threshold = [mem.cache(fsl.Threshold)(in_file=f, thresh=i) for i, f in enumerate(in_files)] # Merge all these files along the time dimension out_merge = mem.cache(fsl.Merge)(dimension="t", in_files=[t.outputs.out_file for t in threshold], ) # And finally compute the mean out_mean = mem.cache(fsl.MeanImage)(in_file=out_merge.outputs.merged_file) # To avoid having increasing disk size we can keep only what was touched # in this run #mem.clear_previous_runs() # or what wasn't used since the start of 2011 #mem.clear_runs_since(year=2011) nipype-0.9.2/examples/nipype_tutorial.ipynb000066400000000000000000001330021227300005300211250ustar00rootroot00000000000000{ "metadata": { "_draft": { "nbviewer_url": "gisting : nipype.ipynb\r\n" }, "name": "nipype_tutorial" }, "nbformat": 3, "nbformat_minor": 0, "worksheets": [ { "cells": [ { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Dissecting Nipype Workflows\n", "\n", "
\n", "Nipype team | contact: satra@mit.edu | nipy.org/nipype\n", "
\n", "(Hit Esc to get an overview)\n", "
[Latest version][notebook] | [Latest slideshow][slideshow]\n", "\n", "[notebook]: http://nbviewer.ipython.org/urls/raw.github.com/nipy/nipype/master/examples/nipype_tutorial.ipynb\n", "[slideshow]: http://slideviewer.herokuapp.com/url/raw.github.com/nipy/nipype/master/examples/nipype_tutorial.ipynb" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# Contributors\n", "\n", "http://nipy.org/nipype/about.html#code-contributors\n", "\n", "# Funding\n", "\n", "- 1R03EB008673-01 from NIBIB, Satrajit Ghosh, Susan Whitfield-Gabrieli\n", "- 5R01MH081909-02 from NIMH, Mark D'Esposito\n", "- INCF\n", "\n", "# Conflict of interest\n", "\n", "
\n", "Satrajit Ghosh: TankThink Labs, LLC\n", "
" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# What is Nipype?\n", "\n", "
\n", "\n", "
\n", "Figure designed and created by: Arno Klein (www.mindboggle.info)\n", "
\n" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# Make life a little easier\n", "\n", "\n", "\n", "Poline _et al._ (2012)" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# Many workflow systems out there\n", "\n", "- [BioImage Suite](http://www.bioimagesuite.org/)\n", "- [BIRN Tools](https://wiki.birncommunity.org/x/LgFrAQ)\n", "- [BrainVisa](http://brainvisa.info)\n", "- [CambaFX](http://www-bmu.psychiatry.cam.ac.uk/software/)\n", "- [JIST for MIPAV](http://www.nitrc.org/projects/jist/)\n", "- [LONI pipeline](http://pipeline.loni.ucla.edu)\n", "- [MEVIS Lab](http://www.mevislab.de)\n", "- [PSOM](http://code.google.com/p/psom/)\n" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "skip" } }, "source": [ "" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Solution requirements\n", "\n", "Coming at it from a developer's perspective, we needed something\n", "\n", "- lightweight\n", "- scriptable\n", "- provided formal, common semantics\n", "- allowed interactive exploration\n", "- supported efficient batch processing\n", "- enabled rapid algorithm prototyping\n", "- was flexible and adaptive\n", "- part of an ecosystem" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# Python ecosystem\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "
\n" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#Existing technologies\n", "\n", "**shell scripting**:\n", "\n", " Can be quick to do, and powerful, but only provides application specific \n", " scalability, and not easy to port across different architectures.\n", "\n", "**make/CMake**:\n", "\n", " Similar in concept to workflow execution in Nipype, but again limited by the\n", " need for command line tools and flexibility in terms of scaling across\n", " hardware architectures (although see [makeflow](http://nd.edu/~ccl/software/makeflow).\n", "\n", "**Octave/MATLAB**:\n", "\n", " Integration with other tools is *ad hoc* (i.e., system call) and dataflow is\n", " managed at a programmatic level. However, see [PSOM](http://code.google.com/p/psom/) which offers a nice\n", " alternative to some aspects of Nipype for Octave/Matlab users.\n", "\n", "**Graphical options**: (e.g., [LONI Pipeline](http://pipeline.loni.ucla.edu), [VisTrails](http://www.vistrails.org/))\n", "\n", " Are easy to use but reduces flexibility relative to scripting options." ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "#Nipype architecture\n", "\n", "" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "notes" } }, "source": [ "* **Interface**: Wraps a program or function\n", "\n", "- **Node/MapNode**: Wraps an `Interface` for use in a Workflow that provides\n", " caching and other goodies (e.g., pseudo-sandbox)\n", "- **Workflow**: A *graph* or *forest of graphs* whose nodes are of type `Node`,\n", " `MapNode` or `Workflow` and whose edges represent data flow\n", "\n", "* **Plugin**: A component that describes how a `Workflow` should be executed" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#Software interfaces\n", "\n", "Currently supported (5-2-2013). [Click here for latest](http://www.mit.edu/~satra/nipype-nightly/documentation.html)\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "
\n", "\n", "\n", "\n", "
\n", "\n", "Most used/contributed policy!\n", "\n", "Not all components of these packages are available." ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "skip" } }, "source": [ "# Workflows\n", "\n", "- Properties:\n", "\n", " - processing pipeline is a directed acyclic graph (DAG)\n", " - nodes are processes\n", " - edges represent data flow\n", " - compact represenation for any process\n", " - code and data separation" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#Execution Plugins\n", "\n", "Allows seamless execution across many architectures\n", "\n", " - Local\n", "\n", " - Serial\n", " - Multicore\n", "\n", " - Clusters\n", "\n", " - HTCondor\n", " - PBS/Torque/SGE/LSF (native and via IPython)\n", " - SSH (via IPython)\n", " - Soma Workflow" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Learn Nipype concepts in 10 easy steps\n", "\n", "\n", "1. Installing and testing the installation \n", "2. Working with interfaces\n", "3. Using Nipype caching\n", "4. Creating Nodes, MapNodes and Workflows\n", "5. Getting and saving data\n", "6. Using Iterables\n", "7. Function nodes\n", "8. Distributed computation\n", "9. Connecting to databases\n", "10. Execution configuration options" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Step 1. Installing Nipype\n", "\n", "## Scientific Python:\n", "\n", "* Debian/Ubuntu/Scientific Fedora\n", "* [Canopy from Enthought](https://www.enthought.com/products/canopy/)\n", "* [Anaconda from Contnuum Analytics](https://store.continuum.io/cshop/anaconda/)\n", "\n", "## Installing Nipype:\n", "\n", "* Available from [@NeuroDebian](http://neuro.debian.net/pkgs/python-nipype.html),\n", " [@PyPI](http://pypi.python.org/pypi/nipype/), and\n", " [@GitHub](http://github.com/nipy/nipype)\n", " \n", " - pip install nipype\n", " - easy_install nipype\n", " - sudo apt-get install python-nipype\n", "\n", "* Dependencies: networkx, nibabel, numpy, scipy, traits\n", "\n", "## Running Nipype ([Quickstart](http://nipy.org/nipype/quickstart.html)):\n", "\n", "* Ensure underlying tools are installed and accessible\n", "* Nipype **is a wrapper, not a substitute** for AFNI, ANTS, FreeSurfer, FSL, SPM,\n", " NiPy, etc.,." ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# Step 1. Testing nipype" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "```\n", "$ ipython notebook\n", "```" ] }, { "cell_type": "code", "collapsed": false, "input": [ "import nipype\n", "\n", "# Comment the following section to increase verbosity of output\n", "nipype.config.set('logging', 'workflow_level', 'CRITICAL')\n", "nipype.config.set('logging', 'interface_level', 'CRITICAL')\n", "nipype.logging.update_logging(nipype.config)\n", "\n", "nipype.test(verbose=0) # Increase verbosity parameter for more info" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "If all goes well you will see an OK:\n", "\n", " ----------------------------------------------------------------------\n", " Ran 2497 tests in 68.486s\n", "\n", " OK (SKIP=13)\n", "\n", "The number of tests and time will vary depending on which interfaces you have installed on your system." ] }, { "cell_type": "code", "collapsed": false, "input": [ "nipype.get_info()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# Environment and data setup\n", "\n", "Setting up your Ipython notebook environment and download some data to play with" ] }, { "cell_type": "code", "collapsed": false, "input": [ "%pylab inline" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "# Some preliminaries\n", "import os\n", "cwd = os.getcwd()\n", "tutorial_dir = '/software/temp/nipype-tutorial/ohbm/'\n", "if not os.path.exists(tutorial_dir):\n", " os.mkdir(tutorial_dir)\n", "os.chdir(tutorial_dir)" ], "language": "python", "metadata": { "slideshow": { "slide_type": "-" } }, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "import urllib\n", "required_files = ['ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", " 'ds107/sub001/BOLD/task001_run002/bold.nii.gz',\n", " 'ds107/sub001/anatomy/highres001.nii.gz',\n", " 'ds107/sub044/BOLD/task001_run001/bold.nii.gz',\n", " 'ds107/sub044/BOLD/task001_run002/bold.nii.gz',\n", " 'ds107/sub044/anatomy/highres001.nii.gz'\n", " ]\n", "base_url = 'http://openfmri.aws.amazon.com.s3.amazonaws.com/'\n", "for filepath in required_files:\n", " file_location = os.path.join(tutorial_dir, filepath)\n", " if not os.path.exists(file_location):\n", " print('Retrieving: ' + file_location)\n", " os.makedirs(os.path.dirname(file_location))\n", " urllib.urlretrieve(base_url + filepath, file_location)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Step 2. Working with interfaces" ] }, { "cell_type": "code", "collapsed": false, "input": [ "import nipype.algorithms" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.fsl import DTIFit\n", "from nipype.interfaces.spm import Realign" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### Finding interface inputs and outputs and examples" ] }, { "cell_type": "code", "collapsed": false, "input": [ "DTIFit.help()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "Realign.help()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### Creating a directory for running interfaces" ] }, { "cell_type": "code", "collapsed": false, "input": [ "import os\n", "from shutil import copyfile\n", "library_dir = os.path.join(tutorial_dir, 'as_a_library')\n", "if not os.path.exists(library_dir):\n", " os.mkdir(library_dir)\n", "os.chdir(library_dir)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "## Executing interfaces" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.freesurfer import MRIConvert\n", "convert = MRIConvert(in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", " out_file='ds107.nii')\n", "print(convert.cmdline)\n", "results = convert.run(terminal_output='none') # allatonce, stream (default), file" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "results.outputs" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "## Other ways" ] }, { "cell_type": "code", "collapsed": false, "input": [ "convert = MRIConvert()\n", "convert.inputs.in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz'\n", "convert.inputs.out_file='ds107.nii'\n", "convert.run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "convert = MRIConvert()\n", "convert.run(in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", " out_file='ds107.nii')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": true, "input": [ "convert.inputs" ], "language": "python", "metadata": { "slideshow": { "slide_type": "subslide" } }, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#### Look at only the defined inputs" ] }, { "cell_type": "code", "collapsed": false, "input": [ "results.inputs" ], "language": "python", "metadata": { "slideshow": { "slide_type": "-" } }, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### Experiment with other interfaces\n", "\n", "For example, run realignment with SPM" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.spm import Realign\n", "results1 = Realign(in_files='ds107.nii',\n", " register_to_mean=False).run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "And now use FSL" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.fsl import MCFLIRT\n", "results2 = MCFLIRT(in_file='ds107.nii', ref_vol=0,\n", " save_plots=True).run()" ], "language": "python", "metadata": { "slideshow": { "slide_type": "-" } }, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### Now we can look at some results" ] }, { "cell_type": "code", "collapsed": false, "input": [ "print results1.runtime.duration, results2.runtime.duration\n", "subplot(211);plot(genfromtxt('ds107_mcf.nii.gz.par')[:, 3:]);title('FSL')\n", "subplot(212);plot(genfromtxt('rp_ds107.txt')[:,:3]);title('SPM')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### if i execute the MCFLIRT line again, well, it runs again!" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Step 3. Nipype caching" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.caching import Memory\n", "mem = Memory('.')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Create `cacheable` objects" ] }, { "cell_type": "code", "collapsed": false, "input": [ "spm_realign = mem.cache(Realign)\n", "fsl_realign = mem.cache(MCFLIRT)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### Execute interfaces" ] }, { "cell_type": "code", "collapsed": false, "input": [ "spm_results = spm_realign(in_files='ds107.nii', register_to_mean=False)\n", "fsl_results = fsl_realign(in_file='ds107.nii', ref_vol=0, save_plots=True)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "subplot(211);plot(genfromtxt(fsl_results.outputs.par_file)[:, 3:])\n", "subplot(212);plot(genfromtxt(spm_results.outputs.realignment_parameters)[:,:3])" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "spm_results = spm_realign(in_files='ds107.nii', register_to_mean=False)\n", "fsl_results = fsl_realign(in_file='ds107.nii', ref_vol=0, save_plots=True)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# More caching" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from os.path import abspath as opap\n", "files = [opap('../ds107/sub001/BOLD/task001_run001/bold.nii.gz'),\n", " opap('../ds107/sub001/BOLD/task001_run002/bold.nii.gz')]\n", "converter = mem.cache(MRIConvert)\n", "newfiles = []\n", "for idx, fname in enumerate(files):\n", " newfiles.append(converter(in_file=fname,\n", " out_type='nii').outputs.out_file)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "os.chdir(tutorial_dir)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Step 4: Nodes, Mapnodes and workflows\n", "\n", "**Where:**" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.pipeline.engine import Node, MapNode, Workflow" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Node**:" ] }, { "cell_type": "code", "collapsed": false, "input": [ "realign_spm = Node(Realign(), name='motion_correct')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Mapnode**:\n", "\n", "" ] }, { "cell_type": "code", "collapsed": false, "input": [ "convert2nii = MapNode(MRIConvert(out_type='nii'),\n", " iterfield=['in_file'],\n", " name='convert2nii')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# \"Hello World\" of Nipype workflows" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### Connect them up:" ] }, { "cell_type": "code", "collapsed": false, "input": [ "realignflow = Workflow(name='realign_with_spm')\n", "realignflow.connect(convert2nii, 'out_file',\n", " realign_spm, 'in_files')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "convert2nii.inputs.in_file = files\n", "realign_spm.inputs.register_to_mean = False\n", "\n", "realignflow.base_dir = opap('.')\n", "realignflow.run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#Visualize the workflow" ] }, { "cell_type": "code", "collapsed": false, "input": [ "realignflow.write_graph()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "from IPython.core.display import Image\n", "Image('realign_with_spm/graph.dot.png')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "realignflow.write_graph(graph2use='orig')\n", "Image('realign_with_spm/graph_detailed.dot.png')" ], "language": "python", "metadata": { "slideshow": { "slide_type": "subslide" } }, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Step 5. Getting and saving data\n", "\n", "### Instead of assigning data ourselves, let's *glob* it" ] }, { "cell_type": "code", "collapsed": false, "input": [ "os.chdir(tutorial_dir)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.io import DataGrabber, DataFinder\n", "ds = Node(DataGrabber(infields=['subject_id'], outfields=['func']),\n", " name='datasource')\n", "ds.inputs.base_directory = opap('ds107')\n", "ds.inputs.template = '%s/BOLD/task001*/bold.nii.gz'\n", "ds.inputs.sort_filelist = True\n", "\n", "ds.inputs.subject_id = 'sub001'\n", "print ds.run().outputs" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "ds.inputs.subject_id = 'sub044'\n", "print ds.run().outputs" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#Multiple files\n", "\n", "### A little more practical usage" ] }, { "cell_type": "code", "collapsed": false, "input": [ "ds = Node(DataGrabber(infields=['subject_id', 'task_id'],\n", " outfields=['func', 'anat']),\n", " name='datasource')\n", "ds.inputs.base_directory = opap('ds107')\n", "ds.inputs.template = '*'\n", "ds.inputs.template_args = {'func': [['subject_id', 'task_id']],\n", " 'anat': [['subject_id']]}\n", "ds.inputs.field_template = {'func': '%s/BOLD/task%03d*/bold.nii.gz',\n", " 'anat': '%s/anatomy/highres001.nii.gz'}\n", "ds.inputs.sort_filelist = True\n", "ds.inputs.subject_id = 'sub001'\n", "ds.inputs.task_id = 1\n", "print ds.run().outputs" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "# Connecting to computation" ] }, { "cell_type": "code", "collapsed": false, "input": [ "convert2nii = MapNode(MRIConvert(out_type='nii'),\n", " iterfield=['in_file'],\n", " name='convert2nii')\n", "\n", "realign_spm = Node(Realign(), name='motion_correct')\n", "realign_spm.inputs.register_to_mean = False\n", "\n", "connectedworkflow = Workflow(name='connectedtogether')\n", "connectedworkflow.base_dir = opap('working_dir')\n", "connectedworkflow.connect(ds, 'func', convert2nii, 'in_file')\n", "connectedworkflow.connect(convert2nii, 'out_file', realign_spm, 'in_files')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#Data sinking\n", "\n", "###Take output computed in a workflow out of it." ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.io import DataSink\n", "sinker = Node(DataSink(), name='sinker')\n", "sinker.inputs.base_directory = opap('output')\n", "connectedworkflow.connect(realign_spm, 'realigned_files',\n", " sinker, 'realigned')\n", "connectedworkflow.connect(realign_spm, 'realignment_parameters',\n", " sinker, 'realigned.@parameters')\n", "connectedworkflow.run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### How to determine output location\n", "\n", " 'base_directory/container/parameterization/destloc/filename'\n", " \n", " destloc = [@]string[[.[@]]string[[.[@]]string]...] and\n", " destloc = realigned.@parameters --> 'realigned'\n", " destloc = realigned.parameters.@1 --> 'realigned/parameters'\n", " destloc = realigned.parameters.@2 --> 'realigned/parameters'\n", " filename comes from the input to the connect statement." ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "#Step 6: *iterables* - parametric execution\n", "\n", "**Workflow + iterables**: runs subgraph several times, attribute not input" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "" ] }, { "cell_type": "code", "collapsed": false, "input": [ "ds.iterables = ('subject_id', ['sub001', 'sub044'])\n", "connectedworkflow.run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "#Putting it all together\n", "\n", "### iterables + MapNode + Node + Workflow + DataGrabber + DataSink" ] }, { "cell_type": "code", "collapsed": false, "input": [ "connectedworkflow.write_graph()\n", "Image('working_dir/connectedtogether/graph.dot.png')" ], "language": "python", "metadata": { "slideshow": { "slide_type": "-" } }, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Step 7: The Function interface\n", "\n", "### The do anything you want card" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.utility import Function\n", "\n", "def myfunc(input1, input2):\n", " \"\"\"Add and subtract two inputs\n", " \"\"\"\n", " return input1 + input2, input1 - input2\n", "\n", "calcfunc = Node(Function(input_names=['input1', 'input2'],\n", " output_names = ['sum', 'difference'],\n", " function=myfunc),\n", " name='mycalc')\n", "calcfunc.inputs.input1 = 1\n", "calcfunc.inputs.input2 = 2\n", "res = calcfunc.run()\n", "print res.outputs" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "#Step 8: Distributed computing\n", "\n", "### Normally calling run executes the workflow in series" ] }, { "cell_type": "code", "collapsed": false, "input": [ "connectedworkflow.run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### but you can scale very easily\n", "\n", "For example, to use multiple cores on your local machine" ] }, { "cell_type": "code", "collapsed": false, "input": [ "connectedworkflow.run('MultiProc', plugin_args={'n_procs': 4})" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### Or to other job managers\n", "\n", "- connectedworkflow.run('PBS', plugin_args={'qsub_args': '-q many'})\n", "- connectedworkflow.run('SGE', plugin_args={'qsub_args': '-q many'})\n", "- connectedworkflow.run('LSF', plugin_args={'qsub_args': '-q many'})\n", "- connectedworkflow.run('Condor')\n", "- connectedworkflow.run('IPython')\n", "\n", "### or submit graphs as a whole\n", "\n", "\n", "- connectedworkflow.run('PBSGraph', plugin_args={'qsub_args': '-q many'})\n", "- connectedworkflow.run('SGEGraph', plugin_args={'qsub_args': '-q many'})\n", "- connectedworkflow.run('CondorDAGMan')\n", "\n", "### Current Requirement: **SHARED FILESYSTEM**" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "### You can also set node specific plugin arguments" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", "- node.plugin_args = {'qsub_args': '-l nodes=1:ppn=3', 'overwrite': True}\n" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "#Step 9: Connecting to Databases" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from os.path import abspath as opap\n", "\n", "from nipype.interfaces.io import XNATSource\n", "from nipype.pipeline.engine import Node, Workflow\n", "from nipype.interfaces.fsl import BET\n", "\n", "subject_id = 'xnat_S00001'\n", "\n", "dg = Node(XNATSource(infields=['subject_id'],\n", " outfields=['struct'],\n", " config='/Users/satra/xnat_configs/nitrc_ir_config'),\n", " name='xnatsource')\n", "dg.inputs.query_template = ('/projects/fcon_1000/subjects/%s/experiments/xnat_E00001'\n", " '/scans/%s/resources/NIfTI/files')\n", "dg.inputs.query_template_args['struct'] = [['subject_id', 'anat_mprage_anonymized']]\n", "dg.inputs.subject_id = subject_id\n", "\n", "bet = Node(BET(), name='skull_stripper')\n", "\n", "wf = Workflow(name='testxnat')\n", "wf.base_dir = opap('xnattest')\n", "wf.connect(dg, 'struct', bet, 'in_file')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.io import XNATSink\n", "\n", "ds = Node(XNATSink(config='/Users/satra/xnat_configs/central_config'),\n", " name='xnatsink')\n", "ds.inputs.project_id = 'NPTEST'\n", "ds.inputs.subject_id = 'NPTEST_xnat_S00001'\n", "ds.inputs.experiment_id = 'test_xnat'\n", "ds.inputs.reconstruction_id = 'bet'\n", "ds.inputs.share = True\n", "wf.connect(bet, 'out_file', ds, 'brain')" ], "language": "python", "metadata": { "slideshow": { "slide_type": "skip" } }, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "wf.run()" ], "language": "python", "metadata": { "slideshow": { "slide_type": "subslide" } }, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "#Step 10: Configuration options\n", "\n", "[Configurable options](http://nipy.org/nipype/users/config_file.html) control workflow and node execution options\n", "\n", "At the global level:" ] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype import config, logging\n", "\n", "config.enable_debug_mode()\n", "logging.update_logging(config)\n", "\n", "config.set('execution', 'stop_on_first_crash', 'true')" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "At the workflow level:" ] }, { "cell_type": "code", "collapsed": false, "input": [ "wf.config['execution']['hash_method'] = 'content'" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "Configurations can also be set at the node level." ] }, { "cell_type": "code", "collapsed": false, "input": [ "bet.config = {'execution': {'keep_unnecessary_outputs': 'true'}}" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "wf.run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Reusable workflows" ] }, { "cell_type": "code", "collapsed": false, "input": [ "config.set_default_config()\n", "logging.update_logging(config)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth\n", "\n", "smooth = create_susan_smooth()\n", "smooth.inputs.inputnode.in_files = opap('output/realigned/_subject_id_sub044/rbold_out.nii')\n", "smooth.inputs.inputnode.fwhm = 5\n", "smooth.inputs.inputnode.mask_file = 'mask.nii'\n", "\n", "smooth.run() # Will error because mask.nii does not exist" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "from nipype.interfaces.fsl import BET, MeanImage, ImageMaths\n", "from nipype.pipeline.engine import Node\n", "\n", "\n", "remove_nan = Node(ImageMaths(op_string= '-nan'), name='nanremove')\n", "remove_nan.inputs.in_file = opap('output/realigned/_subject_id_sub044/rbold_out.nii')\n", "\n", "mi = Node(MeanImage(), name='mean')\n", "\n", "mask = Node(BET(mask=True), name='mask')\n", "\n", "wf = Workflow('reuse')\n", "wf.base_dir = opap('.')\n", "wf.connect(remove_nan, 'out_file', mi, 'in_file')\n", "wf.connect(mi, 'out_file', mask, 'in_file')\n", "wf.connect(mask, 'out_file', smooth, 'inputnode.mask_file')\n", "wf.connect(remove_nan, 'out_file', smooth, 'inputnode.in_files')\n", "\n", "wf.run()" ], "language": "python", "metadata": { "slideshow": { "slide_type": "subslide" } }, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "subslide" } }, "source": [ "## Setting internal parameters of workflows" ] }, { "cell_type": "code", "collapsed": false, "input": [ "print(smooth.list_node_names())\n", "\n", "median = smooth.get_node('median')\n", "median.inputs.op_string = '-k %s -p 60'" ], "language": "python", "metadata": { "slideshow": { "slide_type": "-" } }, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "wf.run()" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Summary\n", "\n", "\n", "- This tutorial covers the concepts of Nipype\n", "\n", " 1. Installing and testing the installation \n", " 2. Working with interfaces\n", " 3. Using Nipype caching\n", " 4. Creating Nodes, MapNodes and Workflows\n", " 5. Getting and saving data\n", " 6. Using Iterables\n", " 7. Function nodes\n", " 8. Distributed computation\n", " 9. Connecting to databases\n", " 10. Execution configuration options\n", "\n", "- It will allow you to reuse and debug the various workflows available in Nipype, BIPS and CPAC\n", "- Please contribute new interfaces and workflows!" ] }, { "cell_type": "code", "collapsed": false, "input": [ "import os\n", "basedir = '/Users/satra/Dropbox/WORK/notebooks/'\n", "if os.path.exists(basedir):\n", " os.chdir(basedir)" ], "language": "python", "metadata": { "slideshow": { "slide_type": "skip" } }, "outputs": [] } ], "metadata": {} } ] }nipype-0.9.2/examples/rsfmri_fsl.py000077500000000000000000000240461227300005300173650ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ =========================== rsfMRI: FSL - CSF regressed =========================== A pipeline example that uses intergrates several interfaces to perform a first and second level analysis on a two-subject data set. 1. Tell python where to find the appropriate functions. """ import numpy as np import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model generation import os # system functions ##################################################################### # Preliminaries """ 2. Setup any package specific configuration. The output file format for FSL routines is being set to uncompressed NIFTI and a specific version of matlab is being used. The uncompressed format is required because SPM does not handle compressed NIFTI. """ # Tell fsl to generate all output in compressed nifti format print fsl.Info.version() fsl.FSLCommand.set_default_output_type('NIFTI_GZ') extract_ref = pe.Node(interface=fsl.ExtractROI(t_min=42, t_size=1), name = 'extractref') # run FSL's bet # bet my_structural my_betted_structural """ in the provided data set, the nose is behind the head and causes problems for segmentation routines """ nosestrip = pe.Node(interface=fsl.BET(frac=0.3), name = 'nosestrip') skullstrip = pe.Node(interface=fsl.BET(mask = True), name = 'stripstruct') refskullstrip = pe.Node(interface=fsl.BET(mask = True), name = 'stripref') coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister') # Preprocess functionals motion_correct = pe.Node(interface=fsl.MCFLIRT(save_plots = True), name='realign') #iterfield = ['in_file']) """ skull strip functional data """ func_skullstrip = pe.Node(interface=fsl.BET(functional = True), name='stripfunc') #iterfield = ['in_file']) """ Run FAST on T1 anatomical image to obtain CSF mask. Create mask for three tissue types. """ getCSFmasks = pe.Node(interface=fsl.FAST(no_pve=True,segments=True), name = 'segment') """ Apply registration matrix to CSF segmentation mask. """ applyReg2CSFmask = pe.Node(interface=fsl.ApplyXfm(apply_xfm=True), name = 'applyreg2csfmask') """ Threshold CSF segmentation mask from .90 to 1 """ threshCSFseg = pe.Node(interface = fsl.ImageMaths(op_string = ' -thr .90 -uthr 1 -bin '), name = 'threshcsfsegmask') """ Extract CSF timeseries """ avgCSF = pe.Node(interface = fsl.ImageMeants(), name='extractcsfts') def pickfirst(files): return files[0] """ Create the workflow """ csffilter = pe.Workflow(name='csffilter') csffilter.connect([(extract_ref, motion_correct,[('roi_file', 'ref_file')]), (extract_ref, refskullstrip,[('roi_file', 'in_file')]), (nosestrip, skullstrip, [('out_file','in_file')]), (skullstrip, getCSFmasks,[('out_file','in_files')]), (skullstrip, coregister,[('mask_file','in_file')]), (refskullstrip, coregister,[('out_file','reference')]), (motion_correct, func_skullstrip, [('out_file', 'in_file')]), (getCSFmasks, applyReg2CSFmask,[(('tissue_class_files',pickfirst),'in_file')]), (refskullstrip, applyReg2CSFmask,[('out_file','reference')]), (coregister, applyReg2CSFmask,[('out_matrix_file','in_matrix_file')]), (applyReg2CSFmask,threshCSFseg,[('out_file','in_file')]), (func_skullstrip,avgCSF,[('out_file','in_file')]), (threshCSFseg,avgCSF,[('out_file','mask')]), ]) modelfit = pe.Workflow(name='modelfit') """ c. Use :class:`nipype.interfaces.spm.SpecifyModel` to generate SPM-specific design information. """ modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") """ d. Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf file for analysis """ level1design = pe.Node(interface=fsl.Level1Design(), name="fsfdesign") """ e. Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat file for use by FILMGLS """ modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen') """ f. Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a mat file and a functional run """ modelestimate = pe.Node(interface=fsl.FILMGLS(), name='modelestimate') #iterfield = ['design_file','in_file']) modelfit.connect([(modelspec,level1design,[('session_info','session_info')]), (level1design,modelgen,[('fsf_files','fsf_file'), ('ev_files', 'ev_files')]), (modelgen,modelestimate,[('design_file','design_file')]), ]) """ The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, ``s1`` and ``s2``. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii. Below we set some variables to inform the ``datasource`` about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (``struct`` or ``func``). These fields become the output fields of the ``datasource`` node in the pipeline. In the example below, run 'f3' is of type 'func' and gets mapped to a nifti filename through a template '%s.nii'. So 'f3' would become 'f3.nii'. """ # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1'] # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3',]]], #'f5','f7','f10']]], struct=[['subject_id','struct']]) infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """ Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The ``datasource`` attribute ``iterables`` tells the pipeline engine that it should repeat the analysis on each of the items in the ``subject_list``. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list. """ infosource.iterables = ('subject_id', subject_list) """ Preprocessing pipeline nodes ---------------------------- Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. The :class:`nipype.pipeline.NodeWrapper` module wraps the interface object and provides additional housekeeping and pipeline specific functionality. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ a. Setup a function that returns subject-specific information about the experimental paradigm. This is used by the :class:`nipype.modelgen.SpecifyModel` to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. Other examples of this function are available in the `doc/examples` folder. Note: Python knowledge required here. """ def subjectinfo(meantsfile): import numpy as np from nipype.interfaces.base import Bunch ts = np.loadtxt(meantsfile) output = [Bunch(regressor_names=['MeanIntensity'], regressors=[ts.tolist()])] return output hpcutoff = np.inf TR = 3. modelfit.inputs.modelspec.input_units = 'secs' modelfit.inputs.modelspec.time_repetition = TR modelfit.inputs.modelspec.high_pass_filter_cutoff = hpcutoff modelfit.inputs.fsfdesign.interscan_interval = TR modelfit.inputs.fsfdesign.bases = {'none': None} modelfit.inputs.fsfdesign.model_serial_correlations = False modelfit.inputs.modelestimate.autocorr_noestimate = True """ Band pass filter the data to remove frequencies below .1 Hz """ bandPassFilterData = pe.Node(interface=fsl.ImageMaths(op_string = ' -bptf 128 12.5 '), name='bandpassfiltermcdata_fslmaths') """ Set up complete workflow ======================== """ l1pipeline = pe.Workflow(name= "resting") l1pipeline.base_dir = os.path.abspath('./fslresting/workingdir') l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource, csffilter, [('struct','nosestrip.in_file'), ('func', 'realign.in_file'), #(('func', pickfirst), 'extractref.in_file'), ('func', 'extractref.in_file'), ]), (csffilter, modelfit, [('stripfunc.out_file', 'modelspec.functional_runs'), ('realign.par_file', 'modelspec.realignment_parameters'), (('extractcsfts.out_file', subjectinfo),'modelspec.subject_info'), ('stripfunc.out_file', 'modelestimate.in_file') ]), (modelfit, bandPassFilterData, [('modelestimate.residual4d', 'in_file')]), ]) if __name__ == '__main__': l1pipeline.run() l1pipeline.write_graph() nipype-0.9.2/examples/rsfmri_fsl_compcorr.py000077500000000000000000000067251227300005300212750ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ============================== rsfMRI: FSL, Nipype, tCompCorr ============================== Performs preprocessing for resting state data based on the tCompCorr method described in Behzadi et al. (2007). Tell python where to find the appropriate functions. """ import os # system functions import nipype.interfaces.io as nio # Data i/o import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.utility as util ##################################################################### # Preliminaries from nipype.workflows.fmri.fsl import create_resting_preproc """ Set up parameters for the resting state preprocessing workflow. """ TR = 3.0 restingflow = create_resting_preproc() restingflow.inputs.inputspec.num_noise_components = 6 restingflow.inputs.inputspec.highpass_sigma = 100/(2*TR) restingflow.inputs.inputspec.lowpass_sigma = 12.5/(2*TR) # Specify the location of the data. data_dir = os.path.abspath('data') # Specify the subject directories subject_list = ['s1'] infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") """Here we set up iteration over all the subjects. """ infosource.iterables = ('subject_id', subject_list) """ Preprocessing pipeline nodes ---------------------------- Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the information from above about the layout of our data. """ datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name = 'datasource') datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' # Map field names to individual subject runs. info = dict(func=[['subject_id', ['f3',]]]) datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Store significant result-files in a special directory """ datasink = pe.Node(interface=nio.DataSink(parameterization=False), name='datasink') datasink.inputs.base_directory = os.path.abspath('./fslresting/compcorred') """ Set up complete workflow ------------------------ """ def get_substitutions(subject_id): '''Replace output names of files with more meaningful ones ''' return [('vol0000_warp_merged_detrended_regfilt_filt', '%s_filtered'%subject_id), ('vol0000_warp_merged_tsnr_stddev_thresh', '%s_noisyvoxels'%subject_id)] l1pipeline = pe.Workflow(name= "resting") l1pipeline.base_dir = os.path.abspath('./fslresting/workingdir') l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource, restingflow, [('func', 'inputspec.func')]), (infosource, datasink, [('subject_id', 'container'), (('subject_id', get_substitutions), 'substitutions')]), (restingflow, datasink, [('outputspec.noise_mask_file', '@noisefile'), ('outputspec.filtered_file', '@filteredfile')]) ]) if __name__ == '__main__': l1pipeline.run() l1pipeline.write_graph() nipype-0.9.2/examples/rsfmri_preprocessing.py000066400000000000000000001021511227300005300214530ustar00rootroot00000000000000#!/usr/bin/env python """ ================================================================ rsfMRI: AFNI, ANTS, DicomStack, FreeSurfer, FSL, Nipy, aCompCorr ================================================================ A preprocessing workflow for Siemens resting state data. This workflow makes use of: - AFNI - ANTS - C3D_Affine_Tool - DicomStack - FreeSurfer - FSL - NiPy For example:: python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii -s subj001 -n 2 --despike -o output -p PBS --plugin_args "dict(qsub_args='-q many')" This workflow takes resting timeseries and a Siemens dicom file corresponding to it and preprocesses it to produce timeseries coordinates or grayordinates. This workflow also requires 2mm subcortical atlas and templates that are available from: http://mindboggle.info/data.html specifically the 2mm versions of: - `Joint Fusion Atlas `_ - `MNI template `_ The 2mm version was generated with:: >>> from nipype import freesurfer as fs >>> rs = fs.Resample() >>> rs.inputs.in_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152.nii.gz' >>> rs.inputs.resampled_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm.nii.gz' >>> rs.inputs.voxel_size = (2., 2., 2.) >>> rs.inputs.args = '-rt nearest -ns 1' >>> res = rs.run() """ import os from nipype.interfaces.base import CommandLine CommandLine.set_default_terminal_output('file') from nipype import config config.enable_provenance() from nipype import (ants, afni, fsl, freesurfer, nipy, Function, DataSink) from nipype import Workflow, Node, MapNode from nipype.algorithms.rapidart import ArtifactDetect from nipype.algorithms.misc import TSNR from nipype.interfaces.fsl import EPIDeWarp from nipype.interfaces.io import FreeSurferSource from nipype.interfaces.c3 import C3dAffineTool from nipype.interfaces.utility import Merge, IdentityInterface from nipype.utils.filemanip import filename_to_list import numpy as np import scipy as sp import nibabel as nb from dcmstack.extract import default_extractor from dicom import read_file imports = ['import os', 'import nibabel as nb', 'import numpy as np', 'import scipy as sp', 'from nipype.utils.filemanip import filename_to_list' ] def get_info(dicom_files): """Given a Siemens dicom file return metadata Returns ------- RepetitionTime Slice Acquisition Times Spacing between slices """ meta = default_extractor(read_file(filename_to_list(dicom_files)[0], stop_before_pixels=True, force=True)) return (meta['RepetitionTime']/1000., meta['CsaImage.MosaicRefAcqTimes'], meta['SpacingBetweenSlices']) def median(in_files): """Computes an average of the median of each realigned timeseries Parameters ---------- in_files: one or more realigned Nifti 4D time series Returns ------- out_file: a 3D Nifti file """ average = None for idx, filename in enumerate(filename_to_list(in_files)): img = nb.load(filename) data = np.median(img.get_data(), axis=3) if not average: average = data else: average = average + data median_img = nb.Nifti1Image(average/float(idx + 1), img.get_affine(), img.get_header()) filename = os.path.join(os.getcwd(), 'median.nii.gz') median_img.to_filename(filename) return filename def get_aparc_aseg(files): """Return the aparc+aseg.mgz file""" for name in files: if 'aparc+aseg.mgz' in name: return name raise ValueError('aparc+aseg.mgz not found') def motion_regressors(motion_params, order=2, derivatives=2): """Compute motion regressors upto given order and derivative motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) """ out_files = [] for idx, filename in enumerate(filename_to_list(motion_params)): params = np.genfromtxt(filename) out_params = params for d in range(1, derivatives + 1): cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0), params)) out_params = np.hstack((out_params, np.diff(cparams, d, axis=0))) out_params2 = out_params for i in range(2, order + 1): out_params2 = np.hstack((out_params2, np.power(out_params, i))) filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx) np.savetxt(filename, out_params2, fmt="%.10f") out_files.append(filename) return out_files def build_filter1(motion_params, comp_norm, outliers): """Builds a regressor set comprisong motion parameters, composite norm and outliers The outliers are added as a single time point column for each outlier Parameters ---------- motion_params: a text file containing motion parameters and its derivatives comp_norm: a text file containing the composite norm outliers: a text file containing 0-based outlier indices Returns ------- components_file: a text file containing all the regressors """ out_files = [] for idx, filename in enumerate(filename_to_list(motion_params)): params = np.genfromtxt(filename) norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx]) out_params = np.hstack((params, norm_val[:, None])) try: outlier_val = np.genfromtxt(filename_to_list(outliers)[idx]) except IOError: outlier_val = np.empty((0)) for index in np.atleast_1d(outlier_val): outlier_vector = np.zeros((out_params.shape[0], 1)) outlier_vector[index] = 1 out_params = np.hstack((out_params, outlier_vector)) filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx) np.savetxt(filename, out_params, fmt="%.10f") out_files.append(filename) return out_files def extract_noise_components(realigned_file, mask_file, num_components=6): """Derive components most reflective of physiological noise Parameters ---------- realigned_file: a 4D Nifti file containing realigned volumes mask_file: a 3D Nifti file containing white matter + ventricular masks num_components: number of components to use for noise decomposition Returns ------- components_file: a text file containing the noise components """ imgseries = nb.load(realigned_file) noise_mask = nb.load(mask_file) voxel_timecourses = imgseries.get_data()[np.nonzero(noise_mask.get_data())] voxel_timecourses = voxel_timecourses.byteswap().newbyteorder() voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 _, _, v = sp.linalg.svd(voxel_timecourses, full_matrices=False) components_file = os.path.join(os.getcwd(), 'noise_components.txt') np.savetxt(components_file, v[:num_components, :].T) return components_file def extract_subrois(timeseries_file, label_file, indices): """Extract voxel time courses for each subcortical roi index Parameters ---------- timeseries_file: a 4D Nifti file label_file: a 3D file containing rois in the same space/size of the 4D file indices: a list of indices for ROIs to extract. Returns ------- out_file: a text file containing time courses for each voxel of each roi The first four columns are: freesurfer index, i, j, k positions in the label file """ img = nb.load(timeseries_file) data = img.get_data() roiimg = nb.load(label_file) rois = roiimg.get_data() out_ts_file = os.path.join(os.getcwd(), 'subcortical_timeseries.txt') with open(out_ts_file, 'wt') as fp: for fsindex in indices: ijk = np.nonzero(rois == fsindex) ts = data[ijk] for i0, row in enumerate(ts): fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0], ijk[1][i0], ijk[2][i0]) + ','.join(['%.10f' % val for val in row]) + '\n') return out_ts_file def combine_hemi(left, right): """Combine left and right hemisphere time series into a single text file """ lh_data = nb.load(left).get_data() rh_data = nb.load(right).get_data() indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None], 2000000 + np.arange(0, rh_data.shape[0])[:, None])) all_data = np.hstack((indices, np.vstack((lh_data.squeeze(), rh_data.squeeze())))) filename = 'combined_surf.txt' np.savetxt(filename, all_data, fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1))) return os.path.abspath(filename) """ Creates the main preprocessing workflow """ def create_workflow(files, subject_id, n_vol=0, despike=True, TR=None, slice_times=None, slice_thickness=None, fieldmap_images=[], norm_threshold=1, num_components=6, vol_fwhm=None, surf_fwhm=None, lowpass_freq=-1, highpass_freq=-1, sink_directory=os.getcwd(), FM_TEdiff=2.46, FM_sigma=2, FM_echo_spacing=.7, target_subject=['fsaverage3', 'fsaverage4'], name='resting'): wf = Workflow(name=name) # Skip starting volumes remove_vol = MapNode(fsl.ExtractROI(t_min=n_vol, t_size=-1), iterfield=['in_file'], name="remove_volumes") remove_vol.inputs.in_file = files # Run AFNI's despike. This is always run, however, whether this is fed to # realign depends on the input configuration despiker = MapNode(afni.Despike(outputtype='NIFTI_GZ'), iterfield=['in_file'], name='despike') #despiker.plugin_args = {'qsub_args': '-l nodes=1:ppn='} wf.connect(remove_vol, 'roi_file', despiker, 'in_file') # Run Nipy joint slice timing and realignment algorithm realign = Node(nipy.SpaceTimeRealigner(), name='realign') realign.inputs.tr = TR realign.inputs.slice_times = slice_times realign.inputs.slice_info = 2 if despike: wf.connect(despiker, 'out_file', realign, 'in_file') else: wf.connect(remove_vol, 'roi_file', realign, 'in_file') # Comute TSNR on realigned data regressing polynomials upto order 2 tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr') wf.connect(realign, 'out_file', tsnr, 'in_file') # Compute the median image across runs calc_median = Node(Function(input_names=['in_files'], output_names=['median_file'], function=median, imports=imports), name='median') wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') # Coregister the median to the surface register = Node(freesurfer.BBRegister(), name='bbregister') register.inputs.subject_id = subject_id register.inputs.init = 'fsl' register.inputs.contrast_type = 't2' register.inputs.out_fsl_file = True register.inputs.epi_mask = True # Compute fieldmaps and unwarp using them if fieldmap_images: fieldmap = Node(interface=EPIDeWarp(), name='fieldmap_unwarp') fieldmap.inputs.tediff = FM_TEdiff fieldmap.inputs.esp = FM_echo_spacing fieldmap.inputs.sigma = FM_sigma fieldmap.inputs.mag_file = fieldmap_images[0] fieldmap.inputs.dph_file = fieldmap_images[1] wf.connect(calc_median, 'median_file', fieldmap, 'exf_file') dewarper = MapNode(interface=fsl.FUGUE(), iterfield=['in_file'], name='dewarper') wf.connect(tsnr, 'detrended_file', dewarper, 'in_file') wf.connect(fieldmap, 'exf_mask', dewarper, 'mask_file') wf.connect(fieldmap, 'vsm_file', dewarper, 'shift_in_file') wf.connect(fieldmap, 'exfdw', register, 'source_file') else: wf.connect(calc_median, 'median_file', register, 'source_file') # Get the subject's freesurfer source directory fssource = Node(FreeSurferSource(), name='fssource') fssource.inputs.subject_id = subject_id fssource.inputs.subjects_dir = os.environ['SUBJECTS_DIR'] # Extract wm+csf, brain masks by eroding freesurfer lables and then # transform the masks into the space of the median wmcsf = Node(freesurfer.Binarize(), name='wmcsfmask') mask = wmcsf.clone('anatmask') wmcsftransform = Node(freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), name='wmcsftransform') wmcsftransform.inputs.subjects_dir = os.environ['SUBJECTS_DIR'] wmcsf.inputs.wm_ven_csf = True wmcsf.inputs.match = [4, 5, 14, 15, 24, 31, 43, 44, 63] wmcsf.inputs.binary_file = 'wmcsf.nii.gz' wmcsf.inputs.erode = int(np.ceil(slice_thickness)) wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), wmcsf, 'in_file') if fieldmap_images: wf.connect(fieldmap, 'exf_mask', wmcsftransform, 'source_file') else: wf.connect(calc_median, 'median_file', wmcsftransform, 'source_file') wf.connect(register, 'out_reg_file', wmcsftransform, 'reg_file') wf.connect(wmcsf, 'binary_file', wmcsftransform, 'target_file') mask.inputs.binary_file = 'mask.nii.gz' mask.inputs.dilate = int(np.ceil(slice_thickness)) + 1 mask.inputs.erode = int(np.ceil(slice_thickness)) mask.inputs.min = 0.5 wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), mask, 'in_file') masktransform = wmcsftransform.clone("masktransform") if fieldmap_images: wf.connect(fieldmap, 'exf_mask', masktransform, 'source_file') else: wf.connect(calc_median, 'median_file', masktransform, 'source_file') wf.connect(register, 'out_reg_file', masktransform, 'reg_file') wf.connect(mask, 'binary_file', masktransform, 'target_file') # Compute Art outliers art = Node(interface=ArtifactDetect(use_differences=[True, False], use_norm=True, norm_threshold=norm_threshold, zintensity_threshold=3, parameter_source='NiPy', bound_by_brainmask=True, save_plot=False, mask_type='file'), name="art") if fieldmap_images: wf.connect(dewarper, 'unwarped_file', art, 'realigned_files') else: wf.connect(tsnr, 'detrended_file', art, 'realigned_files') wf.connect(realign, 'par_file', art, 'realignment_parameters') wf.connect(masktransform, 'transformed_file', art, 'mask_file') # Compute motion regressors motreg = Node(Function(input_names=['motion_params', 'order', 'derivatives'], output_names=['out_files'], function=motion_regressors, imports=imports), name='getmotionregress') wf.connect(realign, 'par_file', motreg, 'motion_params') # Create a filter to remove motion and art confounds createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm', 'outliers'], output_names=['out_files'], function=build_filter1, imports=imports), name='makemotionbasedfilter') wf.connect(motreg, 'out_files', createfilter1, 'motion_params') wf.connect(art, 'norm_files', createfilter1, 'comp_norm') wf.connect(art, 'outlier_files', createfilter1, 'outliers') # Filter the motion and art confounds filter1 = MapNode(fsl.GLM(out_res_name='timeseries.nii.gz', demean=True), iterfield=['in_file', 'design'], name='filtermotion') if fieldmap_images: wf.connect(dewarper, 'unwarped_file', filter1, 'in_file') else: wf.connect(tsnr, 'detrended_file', filter1, 'in_file') wf.connect(createfilter1, 'out_files', filter1, 'design') wf.connect(masktransform, 'transformed_file', filter1, 'mask') # Create a filter to remove noise components based on white matter and CSF createfilter2 = MapNode(Function(input_names=['realigned_file', 'mask_file', 'num_components'], output_names=['out_files'], function=extract_noise_components, imports=imports), iterfield=['realigned_file'], name='makecompcorrfilter') createfilter2.inputs.num_components = num_components wf.connect(filter1, 'out_res', createfilter2, 'realigned_file') wf.connect(masktransform, 'transformed_file', createfilter2, 'mask_file') # Filter noise components filter2 = MapNode(fsl.GLM(out_res_name='timeseries_cleaned.nii.gz', demean=True), iterfield=['in_file', 'design'], name='filtercompcorr') wf.connect(filter1, 'out_res', filter2, 'in_file') wf.connect(createfilter2, 'out_files', filter2, 'design') wf.connect(masktransform, 'transformed_file', filter2, 'mask') # Smoothing using surface and volume smoothing smooth = MapNode(freesurfer.Smooth(), iterfield=['in_file'], name='smooth') smooth.inputs.proj_frac_avg = (0.1, 0.9, 0.1) if surf_fwhm is None: surf_fwhm = 5 * slice_thickness smooth.inputs.surface_fwhm = surf_fwhm if vol_fwhm is None: vol_fwhm = 2 * slice_thickness smooth.inputs.vol_fwhm = vol_fwhm wf.connect(filter2, 'out_res', smooth, 'in_file') wf.connect(register, 'out_reg_file', smooth, 'reg_file') # Bandpass filter the data bandpass = MapNode(fsl.TemporalFilter(), iterfield=['in_file'], name='bandpassfilter') if highpass_freq < 0: bandpass.inputs.highpass_sigma = -1 else: bandpass.inputs.highpass_sigma = 1. / (2 * TR * highpass_freq) if lowpass_freq < 0: bandpass.inputs.lowpass_sigma = -1 else: bandpass.inputs.lowpass_sigma = 1. / (2 * TR * lowpass_freq) wf.connect(smooth, 'smoothed_file', bandpass, 'in_file') # Convert aparc to subject functional space aparctransform = wmcsftransform.clone("aparctransform") if fieldmap_images: wf.connect(fieldmap, 'exf_mask', aparctransform, 'source_file') else: wf.connect(calc_median, 'median_file', aparctransform, 'source_file') wf.connect(register, 'out_reg_file', aparctransform, 'reg_file') wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparctransform, 'target_file') # Sample the average time series in aparc ROIs sampleaparc = MapNode(freesurfer.SegStats(avgwf_txt_file=True, default_color_table=True), iterfield=['in_file'], name='aparc_ts') sampleaparc.inputs.segment_id = ([8] + range(10, 14) + [17, 18, 26, 47] + range(49, 55) + [58] + range(1001, 1036) + range(2001, 2036)) wf.connect(aparctransform, 'transformed_file', sampleaparc, 'segmentation_file') wf.connect(bandpass, 'out_file', sampleaparc, 'in_file') # Sample the time series onto the surface of the target surface. Performs # sampling into left and right hemisphere target = Node(IdentityInterface(fields=['target_subject']), name='target') target.iterables = ('target_subject', filename_to_list(target_subject)) samplerlh = MapNode(freesurfer.SampleToSurface(), iterfield=['source_file'], name='sampler_lh') samplerlh.inputs.sampling_method = "average" samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1) samplerlh.inputs.sampling_units = "frac" samplerlh.inputs.interp_method = "trilinear" #samplerlh.inputs.cortex_mask = True samplerlh.inputs.out_type = 'niigz' samplerlh.inputs.subjects_dir = os.environ['SUBJECTS_DIR'] samplerrh = samplerlh.clone('sampler_rh') samplerlh.inputs.hemi = 'lh' wf.connect(bandpass, 'out_file', samplerlh, 'source_file') wf.connect(register, 'out_reg_file', samplerlh, 'reg_file') wf.connect(target, 'target_subject', samplerlh, 'target_subject') samplerrh.set_input('hemi', 'rh') wf.connect(bandpass, 'out_file', samplerrh, 'source_file') wf.connect(register, 'out_reg_file', samplerrh, 'reg_file') wf.connect(target, 'target_subject', samplerrh, 'target_subject') # Combine left and right hemisphere to text file combiner = MapNode(Function(input_names=['left', 'right'], output_names=['out_file'], function=combine_hemi, imports=imports), iterfield=['left', 'right'], name="combiner") wf.connect(samplerlh, 'out_file', combiner, 'left') wf.connect(samplerrh, 'out_file', combiner, 'right') # Compute registration between the subject's structural and MNI template # This is currently set to perform a very quick registration. However, the # registration can be made significantly more accurate for cortical # structures by increasing the number of iterations # All parameters are set using the example from: # https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh reg = Node(ants.Registration(), name='antsRegister') reg.inputs.output_transform_prefix = "output_" reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN'] reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)] # reg.inputs.number_of_iterations = ([[10000, 111110, 11110]]*3 + # [[100, 50, 30]]) reg.inputs.number_of_iterations = [[100, 100, 100]] * 3 + [[100, 20, 10]] reg.inputs.dimension = 3 reg.inputs.write_composite_transform = True reg.inputs.collapse_output_transforms = False reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']] reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]] reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]] reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]] reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]] reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01] reg.inputs.convergence_window_size = [20] * 3 + [5] reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]] reg.inputs.sigma_units = ['vox'] * 4 reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]]*2 + [[4, 2, 1]] reg.inputs.use_estimate_learning_rate_once = [True] * 4 reg.inputs.use_histogram_matching = [False] * 3 + [True] reg.inputs.output_warped_image = 'output_warped_image.nii.gz' reg.inputs.fixed_image = \ os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz') reg.inputs.num_threads = 4 reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'} # Convert T1.mgz to nifti for using with ANTS convert = Node(freesurfer.MRIConvert(out_type='niigz'), name='convert2nii') wf.connect(fssource, 'T1', convert, 'in_file') # Mask the T1.mgz file with the brain mask computed earlier maskT1 = Node(fsl.BinaryMaths(operation='mul'), name='maskT1') wf.connect(mask, 'binary_file', maskT1, 'operand_file') wf.connect(convert, 'out_file', maskT1, 'in_file') wf.connect(maskT1, 'out_file', reg, 'moving_image') # Convert the BBRegister transformation to ANTS ITK format convert2itk = MapNode(C3dAffineTool(), iterfield=['transform_file', 'source_file'], name='convert2itk') convert2itk.inputs.fsl2ras = True convert2itk.inputs.itk_transform = True wf.connect(register, 'out_fsl_file', convert2itk, 'transform_file') if fieldmap_images: wf.connect(fieldmap, 'exf_mask', convert2itk, 'source_file') else: wf.connect(calc_median, 'median_file', convert2itk, 'source_file') wf.connect(convert, 'out_file', convert2itk, 'reference_file') # Concatenate the affine and ants transforms into a list pickfirst = lambda x: x[0] merge = MapNode(Merge(2), iterfield=['in2'], name='mergexfm') wf.connect(convert2itk, 'itk_transform', merge, 'in2') wf.connect(reg, ('composite_transform', pickfirst), merge, 'in1') # Apply the combined transform to the time series file sample2mni = MapNode(ants.ApplyTransforms(), iterfield=['input_image', 'transforms'], name='sample2mni') sample2mni.inputs.input_image_type = 3 sample2mni.inputs.interpolation = 'BSpline' sample2mni.inputs.invert_transform_flags = [False, False] sample2mni.inputs.reference_image = \ os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz') sample2mni.inputs.terminal_output = 'file' wf.connect(bandpass, 'out_file', sample2mni, 'input_image') wf.connect(merge, 'out', sample2mni, 'transforms') # Sample the time series file for each subcortical roi ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file', 'indices'], output_names=['out_file'], function=extract_subrois, imports=imports), iterfield=['timeseries_file'], name='getsubcortts') ts2txt.inputs.indices = [8] + range(10, 14) + [17, 18, 26, 47] +\ range(49, 55) + [58] ts2txt.inputs.label_file = \ os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_' '2mm.nii.gz')) wf.connect(sample2mni, 'output_image', ts2txt, 'timeseries_file') # Save the relevant data into an output directory datasink = Node(interface=DataSink(), name="datasink") datasink.inputs.base_directory = sink_directory datasink.inputs.container = subject_id datasink.inputs.substitutions = [('_target_subject_', '')] datasink.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2') wf.connect(despiker, 'out_file', datasink, 'resting.qa.despike') wf.connect(realign, 'par_file', datasink, 'resting.qa.motion') wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr') wf.connect(tsnr, 'mean_file', datasink, 'resting.qa.tsnr.@mean') wf.connect(tsnr, 'stddev_file', datasink, 'resting.qa.@tsnr_stddev') if fieldmap_images: wf.connect(fieldmap, 'exf_mask', datasink, 'resting.reference') else: wf.connect(calc_median, 'median_file', datasink, 'resting.reference') wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm') wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity') wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files') wf.connect(mask, 'binary_file', datasink, 'resting.mask') wf.connect(masktransform, 'transformed_file', datasink, 'resting.mask.@transformed_file') wf.connect(register, 'out_reg_file', datasink, 'resting.registration.bbreg') wf.connect(reg, ('composite_transform', pickfirst), datasink, 'resting.registration.ants') wf.connect(register, 'min_cost_file', datasink, 'resting.qa.bbreg.@mincost') wf.connect(smooth, 'smoothed_file', datasink, 'resting.timeseries.fullpass') wf.connect(bandpass, 'out_file', datasink, 'resting.timeseries.bandpassed') wf.connect(sample2mni, 'output_image', datasink, 'resting.timeseries.mni') wf.connect(createfilter1, 'out_files', datasink, 'resting.regress.@regressors') wf.connect(createfilter2, 'out_files', datasink, 'resting.regress.@compcorr') wf.connect(sampleaparc, 'summary_file', datasink, 'resting.parcellations.aparc') wf.connect(sampleaparc, 'avgwf_txt_file', datasink, 'resting.parcellations.aparc.@avgwf') wf.connect(ts2txt, 'out_file', datasink, 'resting.parcellations.grayo.@subcortical') datasink2 = Node(interface=DataSink(), name="datasink2") datasink2.inputs.base_directory = sink_directory datasink2.inputs.container = subject_id datasink2.inputs.substitutions = [('_target_subject_', '')] datasink2.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2') wf.connect(combiner, 'out_file', datasink2, 'resting.parcellations.grayo.@surface') return wf """ Creates the full workflow including getting information from dicom files """ def create_resting_workflow(args, name='resting'): TR = args.TR slice_times = args.slice_times slice_thickness = None if args.dicom_file: TR, slice_times, slice_thickness = get_info(args.dicom_file) slice_times = (np.array(slice_times)/1000.).tolist() if slice_thickness is None: from nibabel import load img = load(args.files[0]) slice_thickness = max(img.get_header().get_zooms()[:3]) kwargs = dict(files=[os.path.abspath(filename) for filename in args.files], subject_id=args.subject_id, n_vol=args.n_vol, despike=args.despike, TR=TR, slice_times=slice_times, slice_thickness=slice_thickness, lowpass_freq=args.lowpass_freq, highpass_freq=args.highpass_freq, sink_directory=os.path.abspath(args.sink), name=name) if args.field_maps: kwargs.update(**dict(fieldmap_images=args.field_maps, FM_TEdiff=args.TE_diff, FM_echo_spacing=args.echo_spacing, FM_sigma=args.sigma)) wf = create_workflow(**kwargs) return wf if __name__ == "__main__": from argparse import ArgumentParser parser = ArgumentParser(description=__doc__) parser.add_argument("-d", "--dicom_file", dest="dicom_file", help="an example dicom file from the resting series") parser.add_argument("-f", "--files", dest="files", nargs="+", help="4d nifti files for resting state", required=True) parser.add_argument("-s", "--subject_id", dest="subject_id", help="FreeSurfer subject id", required=True) parser.add_argument("-n", "--n_vol", dest="n_vol", default=0, type=int, help="Volumes to skip at the beginning") parser.add_argument("--despike", dest="despike", default=False, action="store_true", help="Use despiked data") parser.add_argument("--TR", dest="TR", default=None, help="TR if dicom not provided in seconds") parser.add_argument("--slice_times", dest="slice_times", nargs="+", type=float, help="Slice times in seconds") parser.add_argument("-l", "--lowpass_freq", dest="lowpass_freq", default=-1, help="Low pass frequency (Hz)") parser.add_argument("-u", "--highpass_freq", dest="highpass_freq", default=-1, help="High pass frequency (Hz)") parser.add_argument("-o", "--output_dir", dest="sink", help="Output directory base") parser.add_argument("-w", "--work_dir", dest="work_dir", help="Output directory base") parser.add_argument("-p", "--plugin", dest="plugin", default='Linear', help="Plugin to use") parser.add_argument("--plugin_args", dest="plugin_args", help="Plugin arguments") parser.add_argument("--field_maps", dest="field_maps", nargs="+", help="field map niftis") parser.add_argument("--fm_echospacing", dest="echo_spacing", type=float, help="field map echo spacing") parser.add_argument("--fm_TE_diff", dest='TE_diff', type=float, help="field map echo time difference") parser.add_argument("--fm_sigma", dest='sigma', type=float, help="field map sigma value") args = parser.parse_args() wf = create_resting_workflow(args) if args.work_dir: work_dir = os.path.abspath(args.work_dir) else: work_dir = os.getcwd() wf.base_dir = work_dir if args.plugin_args: wf.run(args.plugin, plugin_args=eval(args.plugin_args)) else: wf.run(args.plugin) nipype-0.9.2/examples/smri_ants_build_template.py000066400000000000000000000110671227300005300222640ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ =============================================== sMRI: Using new ANTS for creating a T1 template =============================================== In this tutorial we will use ANTS (old version aka "ANTS") based workflow to create a template out of multiple T1 volumes. 1. Tell python where to find the appropriate functions. """ import os import nipype.interfaces.utility as util import nipype.interfaces.ants as ants import nipype.interfaces.io as io import nipype.pipeline.engine as pe # pypeline engine from nipype.workflows.smri.ants import ANTSTemplateBuildSingleIterationWF """ 2. Download T1 volumes into home directory """ import urllib2 homeDir=os.getenv("HOME") requestedPath=os.path.join(homeDir,'nipypeTestPath') mydatadir=os.path.realpath(requestedPath) if not os.path.exists(mydatadir): os.makedirs(mydatadir) print mydatadir MyFileURLs=[ ('http://slicer.kitware.com/midas3/download?bitstream=13121','01_T1_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13122','02_T1_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13124','03_T1_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13128','01_T1_inv_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13123','02_T1_inv_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13125','03_T1_inv_half.nii.gz'), ] for tt in MyFileURLs: myURL=tt[0] localFilename=os.path.join(mydatadir,tt[1]) if not os.path.exists(localFilename): remotefile = urllib2.urlopen(myURL) localFile = open(localFilename, 'wb') localFile.write(remotefile.read()) localFile.close() print("Downloaded file: {0}".format(localFilename)) else: print("File previously downloaded {0}".format(localFilename)) input_images=[ os.path.join(mydatadir,'01_T1_half.nii.gz'), os.path.join(mydatadir,'02_T1_half.nii.gz'), os.path.join(mydatadir,'03_T1_half.nii.gz') ] input_passive_images=[ {'INV_T1':os.path.join(mydatadir,'01_T1_inv_half.nii.gz')}, {'INV_T1':os.path.join(mydatadir,'02_T1_inv_half.nii.gz')}, {'INV_T1':os.path.join(mydatadir,'03_T1_inv_half.nii.gz')} ] """ 3. Define the workflow and its working directory """ tbuilder=pe.Workflow(name="ANTSTemplateBuilder") tbuilder.base_dir=requestedPath """ 4. Define data sources. In real life these would be replace by DataGrabbers """ datasource = pe.Node(interface=util.IdentityInterface(fields= ['imageList', 'passiveImagesDictionariesList']), run_without_submitting=True, name='InputImages' ) datasource.inputs.imageList=input_images datasource.inputs.passiveImagesDictionariesList=input_passive_images datasource.inputs.sort_filelist = True """ 5. Template is initialized by a simple average """ initAvg = pe.Node(interface=ants.AverageImages(), name ='initAvg') initAvg.inputs.dimension = 3 initAvg.inputs.normalize = True tbuilder.connect(datasource, "imageList", initAvg, "images") """ 6. Define the first iteration of template building """ buildTemplateIteration1=ANTSTemplateBuildSingleIterationWF('iteration01') tbuilder.connect(initAvg, 'output_average_image', buildTemplateIteration1, 'inputspec.fixed_image') tbuilder.connect(datasource, 'imageList', buildTemplateIteration1, 'inputspec.images') tbuilder.connect(datasource, 'passiveImagesDictionariesList', buildTemplateIteration1, 'inputspec.ListOfPassiveImagesDictionaries') """ 7. Define the second iteration of template building """ buildTemplateIteration2 = ANTSTemplateBuildSingleIterationWF('iteration02') tbuilder.connect(buildTemplateIteration1, 'outputspec.template', buildTemplateIteration2, 'inputspec.fixed_image') tbuilder.connect(datasource, 'imageList', buildTemplateIteration2, 'inputspec.images') tbuilder.connect(datasource, 'passiveImagesDictionariesList', buildTemplateIteration2, 'inputspec.ListOfPassiveImagesDictionaries') """ 8. Move selected files to a designated results folder """ datasink = pe.Node(io.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.join(requestedPath, "results") tbuilder.connect(buildTemplateIteration2, 'outputspec.template',datasink,'PrimaryTemplate') tbuilder.connect(buildTemplateIteration2, 'outputspec.passive_deformed_templates',datasink,'PassiveTemplate') tbuilder.connect(initAvg, 'output_average_image', datasink,'PreRegisterAverage') """ 8. Run the workflow """ tbuilder.run() nipype-0.9.2/examples/smri_ants_registration.py000066400000000000000000000056311227300005300220040ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ================================== sMRI: Using ANTS for registration ================================== In this simple tutorial we will use the Registration interface from ANTS to coregister two T1 volumes. 1. Tell python where to find the appropriate functions. """ import os import urllib2 from nipype.interfaces.ants import Registration """ 2. Download T1 volumes into home directory """ homeDir=os.getenv("HOME") requestedPath=os.path.join(homeDir,'nipypeTestPath') mydatadir=os.path.realpath(requestedPath) if not os.path.exists(mydatadir): os.makedirs(mydatadir) print mydatadir MyFileURLs=[ ('http://slicer.kitware.com/midas3/download?bitstream=13121','01_T1_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13122','02_T1_half.nii.gz'), ] for tt in MyFileURLs: myURL=tt[0] localFilename=os.path.join(mydatadir,tt[1]) if not os.path.exists(localFilename): remotefile = urllib2.urlopen(myURL) localFile = open(localFilename, 'wb') localFile.write(remotefile.read()) localFile.close() print("Downloaded file: {0}".format(localFilename)) else: print("File previously downloaded {0}".format(localFilename)) input_images=[ os.path.join(mydatadir,'01_T1_half.nii.gz'), os.path.join(mydatadir,'02_T1_half.nii.gz'), ] """ 3. Define the parameters of the registration """ reg = Registration() reg.inputs.fixed_image = input_images[0] reg.inputs.moving_image = input_images[1] reg.inputs.output_transform_prefix = 'thisTransform' reg.inputs.output_warped_image = 'INTERNAL_WARPED.nii.gz' reg.inputs.output_transform_prefix = "output_" reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN'] reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)] reg.inputs.number_of_iterations = ([[10000, 111110, 11110]]*3 + [[100, 50, 30]]) reg.inputs.dimension = 3 reg.inputs.write_composite_transform = True reg.inputs.collapse_output_transforms = False reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']] reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]] reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]] reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]] reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]] reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01] reg.inputs.convergence_window_size = [20] * 3 + [5] reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]] reg.inputs.sigma_units = ['vox'] * 4 reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]]*2 + [[4, 2, 1]] reg.inputs.use_estimate_learning_rate_once = [True] * 4 reg.inputs.use_histogram_matching = [False] * 3 + [True] reg.inputs.initial_moving_transform_com = True print reg.cmdline """ 3. Run the registration """ reg.run() nipype-0.9.2/examples/smri_antsregistration_build_template.py000066400000000000000000000157251227300005300247240ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ ====================================================== sMRI: Using new ANTS for creating a T1 template (ITK4) ====================================================== In this tutorial we will use ANTS (new ITK4 version aka "antsRegistration") based workflow to create a template out of multiple T1 volumes. We will also showcase how to fine tune SGE jobs requirements. 1. Tell python where to find the appropriate functions. """ import os import nipype.interfaces.utility as util import nipype.interfaces.ants as ants import nipype.interfaces.io as io import nipype.pipeline.engine as pe # pypeline engine from nipype.workflows.smri.ants import antsRegistrationTemplateBuildSingleIterationWF """ 2. Download T1 volumes into home directory """ import urllib2 homeDir=os.getenv("HOME") requestedPath=os.path.join(homeDir,'nipypeTestPath') mydatadir=os.path.realpath(requestedPath) if not os.path.exists(mydatadir): os.makedirs(mydatadir) print mydatadir MyFileURLs=[ ('http://slicer.kitware.com/midas3/download?bitstream=13121','01_T1_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13122','02_T1_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13124','03_T1_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13128','01_T1_inv_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13123','02_T1_inv_half.nii.gz'), ('http://slicer.kitware.com/midas3/download?bitstream=13125','03_T1_inv_half.nii.gz'), ] for tt in MyFileURLs: myURL=tt[0] localFilename=os.path.join(mydatadir,tt[1]) if not os.path.exists(localFilename): remotefile = urllib2.urlopen(myURL) localFile = open(localFilename, 'wb') localFile.write(remotefile.read()) localFile.close() print("Downloaded file: {0}".format(localFilename)) else: print("File previously downloaded {0}".format(localFilename)) """ ListOfImagesDictionaries - a list of dictionaries where each dictionary is for one scan session, and the mappings in the dictionary are for all the co-aligned images for that one scan session """ ListOfImagesDictionaries=[ {'T1':os.path.join(mydatadir,'01_T1_half.nii.gz'),'INV_T1':os.path.join(mydatadir,'01_T1_inv_half.nii.gz'),'LABEL_MAP':os.path.join(mydatadir,'01_T1_inv_half.nii.gz')}, {'T1':os.path.join(mydatadir,'02_T1_half.nii.gz'),'INV_T1':os.path.join(mydatadir,'02_T1_inv_half.nii.gz'),'LABEL_MAP':os.path.join(mydatadir,'02_T1_inv_half.nii.gz')}, {'T1':os.path.join(mydatadir,'03_T1_half.nii.gz'),'INV_T1':os.path.join(mydatadir,'03_T1_inv_half.nii.gz'),'LABEL_MAP':os.path.join(mydatadir,'03_T1_inv_half.nii.gz')} ] input_passive_images=[ {'INV_T1':os.path.join(mydatadir,'01_T1_inv_half.nii.gz')}, {'INV_T1':os.path.join(mydatadir,'02_T1_inv_half.nii.gz')}, {'INV_T1':os.path.join(mydatadir,'03_T1_inv_half.nii.gz')} ] """ registrationImageTypes - A list of the image types to be used actively during the estimation process of registration, any image type not in this list will be passively resampled with the estimated transforms. ['T1','T2'] """ registrationImageTypes=['T1'] """ interpolationMap - A map of image types to interpolation modes. If an image type is not listed, it will be linearly interpolated. { 'labelmap':'NearestNeighbor', 'FLAIR':'WindowedSinc' } """ interpolationMapping={'INV_T1':'LanczosWindowedSinc','LABEL_MAP':'NearestNeighbor','T1':'Linear'} """ 3. Define the workflow and its working directory """ tbuilder=pe.Workflow(name="antsRegistrationTemplateBuilder") tbuilder.base_dir=requestedPath """ 4. Define data sources. In real life these would be replace by DataGrabbers """ InitialTemplateInputs=[ mdict['T1'] for mdict in ListOfImagesDictionaries ] datasource = pe.Node(interface=util.IdentityInterface(fields= ['InitialTemplateInputs', 'ListOfImagesDictionaries', 'registrationImageTypes','interpolationMapping']), run_without_submitting=True, name='InputImages' ) datasource.inputs.InitialTemplateInputs=InitialTemplateInputs datasource.inputs.ListOfImagesDictionaries=ListOfImagesDictionaries datasource.inputs.registrationImageTypes=registrationImageTypes datasource.inputs.interpolationMapping=interpolationMapping datasource.inputs.sort_filelist = True """ 5. Template is initialized by a simple average in this simple example, any reference image could be used (i.e. a previously created template) """ initAvg = pe.Node(interface=ants.AverageImages(), name ='initAvg') initAvg.inputs.dimension = 3 initAvg.inputs.normalize = True tbuilder.connect(datasource, "InitialTemplateInputs", initAvg, "images") """ 6. Define the first iteration of template building """ buildTemplateIteration1=antsRegistrationTemplateBuildSingleIterationWF('iteration01') """ Here we are fine tuning parameters of the SGE job (memory limit, numebr of cores etc.) """ BeginANTS = buildTemplateIteration1.get_node("BeginANTS") BeginANTS.plugin_args={'qsub_args': '-S /bin/bash -pe smp1 8-12 -l mem_free=6000M -o /dev/null -e /dev/null queue_name', 'overwrite': True} tbuilder.connect(initAvg, 'output_average_image', buildTemplateIteration1, 'inputspec.fixed_image') tbuilder.connect(datasource, 'ListOfImagesDictionaries', buildTemplateIteration1, 'inputspec.ListOfImagesDictionaries') tbuilder.connect(datasource, 'registrationImageTypes', buildTemplateIteration1, 'inputspec.registrationImageTypes') tbuilder.connect(datasource, 'interpolationMapping', buildTemplateIteration1, 'inputspec.interpolationMapping') """ 7. Define the second iteration of template building """ buildTemplateIteration2 = antsRegistrationTemplateBuildSingleIterationWF('iteration02') BeginANTS = buildTemplateIteration2.get_node("BeginANTS") BeginANTS.plugin_args={'qsub_args': '-S /bin/bash -pe smp1 8-12 -l mem_free=6000M -o /dev/null -e /dev/null queue_name', 'overwrite': True} tbuilder.connect(buildTemplateIteration1, 'outputspec.template', buildTemplateIteration2, 'inputspec.fixed_image') tbuilder.connect(datasource, 'ListOfImagesDictionaries', buildTemplateIteration2, 'inputspec.ListOfImagesDictionaries') tbuilder.connect(datasource, 'registrationImageTypes', buildTemplateIteration2, 'inputspec.registrationImageTypes') tbuilder.connect(datasource, 'interpolationMapping', buildTemplateIteration2, 'inputspec.interpolationMapping') """ 8. Move selected files to a designated results folder """ datasink = pe.Node(io.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.join(requestedPath, "results") tbuilder.connect(buildTemplateIteration2, 'outputspec.template',datasink,'PrimaryTemplate') tbuilder.connect(buildTemplateIteration2, 'outputspec.passive_deformed_templates',datasink,'PassiveTemplate') tbuilder.connect(initAvg, 'output_average_image', datasink,'PreRegisterAverage') """ 9. Run the workflow """ tbuilder.run(plugin="SGE") nipype-0.9.2/examples/smri_freesurfer.py000066400000000000000000000034621227300005300204150ustar00rootroot00000000000000#!/usr/bin/env python """ ================ sMRI: FreeSurfer ================ This script, smri_freesurfer.py, demonstrates the ability to call reconall on a set of subjects and then make an average subject. python smri_freesurfer.py Import necessary modules from nipype. """ import os import nipype.pipeline.engine as pe import nipype.interfaces.io as nio from nipype.interfaces.freesurfer.preprocess import ReconAll from nipype.interfaces.freesurfer.utils import MakeAverageSubject subject_list = ['s1', 's3'] data_dir = os.path.abspath('data') subjects_dir = os.path.abspath('amri_freesurfer_tutorial/subjects_dir') wf = pe.Workflow(name="l1workflow") wf.base_dir = os.path.abspath('amri_freesurfer_tutorial/workdir') """ Grab data """ datasource = pe.MapNode(interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct']), name='datasource', iterfield=['subject_id']) datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']]) datasource.inputs.subject_id = subject_list datasource.inputs.sort_filelist = True """ Run recon-all """ recon_all = pe.MapNode(interface=ReconAll(), name='recon_all', iterfield=['subject_id', 'T1_files']) recon_all.inputs.subject_id = subject_list if not os.path.exists(subjects_dir): os.mkdir(subjects_dir) recon_all.inputs.subjects_dir = subjects_dir wf.connect(datasource, 'struct', recon_all, 'T1_files') """ Make average subject """ average = pe.Node(interface=MakeAverageSubject(), name="average") average.inputs.subjects_dir = subjects_dir wf.connect(recon_all, 'subject_id', average, 'subjects_ids') wf.run("MultiProc", plugin_args={'n_procs': 4}) nipype-0.9.2/examples/tessellation_tutorial.py000066400000000000000000000067001227300005300216420ustar00rootroot00000000000000#!/usr/bin/env python """ ================================================= sMRI: Regional Tessellation and Surface Smoothing ================================================= Introduction ============ This script, tessellation_tutorial.py, demonstrates the use of create_tessellation_flow from nipype.workflows.smri.freesurfer, and it can be run with: python tessellation_tutorial.py This example requires that the user has Freesurfer installed, and that the Freesurfer directory for 'fsaverage' is present. .. seealso:: ConnectomeViewer The Connectome Viewer connects Multi-Modal Multi-Scale Neuroimaging and Network Datasets For Analysis and Visualization in Python. http://www.geuz.org/gmsh/ Gmsh: a three-dimensional finite element mesh generator with built-in pre- and post-processing facilities http://www.blender.org/ Blender is the free open source 3D content creation suite, available for all major operating systems under the GNU General Public License. .. warning:: This workflow will take several hours to finish entirely, since smoothing the larger cortical surfaces is very time consuming. Packages and Data Setup ======================= Import the necessary modules and workflow from nipype. """ import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.cmtk as cmtk import nipype.interfaces.io as nio # Data i/o import os, os.path as op from nipype.workflows.smri.freesurfer import create_tessellation_flow """ Directories =========== Set the default directory and lookup table (LUT) paths """ fs_dir = os.environ['FREESURFER_HOME'] lookup_file = op.join(fs_dir,'FreeSurferColorLUT.txt') subjects_dir = op.join(fs_dir, 'subjects/') output_dir = './tessellate_tutorial' """ Inputs ====== Create the tessellation workflow and set inputs Here we will choose Gifti (gii) as the output format, because we want to able to view the surface in ConnectomeViewer. In you intend to view the meshes in gmsh or Blender, you should change the workflow creation to use stereolithographic (stl) format. """ tessflow = create_tessellation_flow(name='tessflow', out_format='gii') tessflow.inputs.inputspec.subject_id = 'fsaverage' tessflow.inputs.inputspec.subjects_dir = subjects_dir tessflow.inputs.inputspec.lookup_file = lookup_file """ We also create a conditional node to package the surfaces for ConnectomeViewer. Simply set cff to "False" to ignore this step. """ cff = True if cff: cff = pe.Node(interface=cmtk.CFFConverter(), name='cff') cff.inputs.out_file = 'Meshes.cff' """ Outputs ======= Create a datasink to organize the smoothed meshes Using regular-expression substitutions we can remove the extraneous folders generated by the mapnode. """ datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = 'meshes' datasink.inputs.regexp_substitutions = [('_smoother[\d]*/', '')] """ Execution ========= Finally, create and run another pipeline that connects the workflow and datasink """ tesspipe = pe.Workflow(name='tessellate_tutorial') tesspipe.base_dir = output_dir tesspipe.connect([(tessflow, datasink,[('outputspec.meshes', '@meshes.all')])]) """ If the surfaces are to be packaged, this will connect the CFFConverter node to the tessellation and smoothing workflow, as well as to the datasink. """ if cff: tesspipe.connect([(tessflow, cff,[('outputspec.meshes', 'gifti_surfaces')])]) tesspipe.connect([(cff, datasink,[('connectome_file', '@cff')])]) tesspipe.run() nipype-0.9.2/examples/workshop_dartmouth_2010.py000066400000000000000000000231621227300005300216170ustar00rootroot00000000000000""" ================================ Workshop: Dartmouth College 2010 ================================ First lets go to the directory with the data we'll be working on and start the interactive python interpreter (with some nipype specific configuration). Note that nipype does not need to be run through ipython - it is just much nicer to do interactive work in it. .. sourcecode:: bash cd $TDPATH ipython -p nipype For every neuroimaging procedure supported by nipype there exists a wrapper - a small piece of code managing the underlying software (FSL, SPM, AFNI etc.). We call those interfaces. They are standarised so we can hook them up together. Lets have a look at some of them. .. sourcecode:: ipython In [1]: import nipype.interfaces.fsl as fsl In [2]: fsl.BET.help() Inputs ------ Mandatory: in_file: input file to skull strip Optional: args: Additional parameters to the command center: center of gravity in voxels environ: Environment variables (default={}) frac: fractional intensity threshold functional: apply to 4D fMRI data mutually exclusive: functional, reduce_bias mask: create binary mask image mesh: generate a vtk mesh brain surface no_output: Don't generate segmented output out_file: name of output skull stripped image outline: create surface outline image output_type: FSL output type radius: head radius reduce_bias: bias field and neck cleanup mutually exclusive: functional, reduce_bias skull: create skull image threshold: apply thresholding to segmented brain image and mask vertical_gradient: vertical gradient in fractional intensity threshold (-1, 1) Outputs ------- mask_file: path/name of binary brain mask (if generated) meshfile: path/name of vtk mesh file (if generated) out_file: path/name of skullstripped file outline_file: path/name of outline file (if generated) In [3]: import nipype.interfaces.freesurfer as fs In [4]: fs.Smooth.help() Inputs ------ Mandatory: in_file: source volume num_iters: number of iterations instead of fwhm mutually exclusive: surface_fwhm reg_file: registers volume to surface anatomical surface_fwhm: surface FWHM in mm mutually exclusive: num_iters requires: reg_file Optional: args: Additional parameters to the command environ: Environment variables (default={}) proj_frac: project frac of thickness a long surface normal mutually exclusive: proj_frac_avg proj_frac_avg: average a long normal min max delta mutually exclusive: proj_frac smoothed_file: output volume subjects_dir: subjects directory vol_fwhm: volumesmoothing outside of surface Outputs ------- args: Additional parameters to the command environ: Environment variables smoothed_file: smoothed input volume subjects_dir: subjects directory You can read about all of the interfaces implemented in nipype at our online documentation at http://nipy.sourceforge.net/nipype/documentation.html#documentation . Check it out now. Using interfaces ---------------- Having interfaces allows us to use third party software (like FSL BET) as function. Look how simple it is. """ import nipype.interfaces.fsl as fsl result = fsl.BET(in_file='data/s1/struct.nii').run() print result """ Running a single program is not much of a breakthrough. Lets run motion correction followed by smoothing (isotropic - in other words not using SUSAN). Notice that in the first line we are setting the output data type for all FSL interfaces. """ fsl.FSLCommand.set_default_output_type('NIFTI_GZ') result1 = fsl.MCFLIRT(in_file='data/s1/f3.nii').run() result2 = fsl.Smooth(in_file='f3_mcf.nii.gz', fwhm=6).run() """ Simple workflow --------------- In the previous example we knew that fsl.MCFLIRT will produce a file called f3_mcf.nii.gz and we have hard coded this as an input to fsl.Smooth. This is quite limited, but luckily nipype supports joining interfaces in pipelines. This way output of one interface will be used as an input of another without having to hard code anything. Before connecting Interfaces we need to put them into (separate) Nodes and give them unique names. This way every interface will process data in a separate folder. """ import nipype.pipeline.engine as pe import os motion_correct = pe.Node(interface=fsl.MCFLIRT(in_file=os.path.abspath('data/s1/f3.nii')), name="motion_correct") smooth = pe.Node(interface=fsl.Smooth(fwhm=6), name="smooth") motion_correct_and_smooth = pe.Workflow(name="motion_correct_and_smooth") motion_correct_and_smooth.base_dir = os.path.abspath('.') # define where will be the root folder for the workflow motion_correct_and_smooth.connect([ (motion_correct, smooth, [('out_file', 'in_file')]) ]) # we are connecting 'out_file' output of motion_correct to 'in_file' input of smooth motion_correct_and_smooth.run() """ Another workflow ---------------- Another example of a simple workflow (calculate the mean of fMRI signal and subtract it). This time we'll be assigning inputs after defining the workflow. """ calc_mean = pe.Node(interface=fsl.ImageMaths(), name="calc_mean") calc_mean.inputs.op_string = "-Tmean" subtract = pe.Node(interface=fsl.ImageMaths(), name="subtract") subtract.inputs.op_string = "-sub" demean = pe.Workflow(name="demean") demean.base_dir = os.path.abspath('.') demean.connect([ (calc_mean, subtract, [('out_file', 'in_file2')]) ]) demean.inputs.calc_mean.in_file = os.path.abspath('data/s1/f3.nii') demean.inputs.subtract.in_file = os.path.abspath('data/s1/f3.nii') demean.run() """ Reusing workflows ----------------- The beauty of the workflows is that they are reusable. We can just import a workflow made by someone else and feed it with our data. """ from fmri_fsl import preproc preproc.base_dir = os.path.abspath('.') preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii') preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii') preproc.run() """ ... and we can run it again and it won't actually rerun anything because none of the parameters have changed. """ preproc.run() """ ... and we can change a parameter and run it again. Only the dependent nodes are rerun and that too only if the input state has changed. """ preproc.inputs.meanfuncmask.frac = 0.5 preproc.run() """ Visualizing workflows 1 ----------------------- So what did we run in this precanned workflow """ preproc.write_graph() """ Datasink -------- Datasink is a special interface for copying and arranging results. """ import nipype.interfaces.io as nio preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii') preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii') datasink = pe.Node(interface=nio.DataSink(),name='sinker') preprocess = pe.Workflow(name='preprocout') preprocess.base_dir = os.path.abspath('.') preprocess.connect([ (preproc, datasink, [('meanfunc2.out_file', 'meanfunc'), ('maskfunc3.out_file', 'funcruns')]) ]) preprocess.run() """ Datagrabber ----------- Datagrabber is (surprise, surprise) an interface for collecting files from hard drive. It is very flexible and supports almost any file organisation of your data you can imagine. """ datasource1 = nio.DataGrabber() datasource1.inputs.template = 'data/s1/f3.nii' datasource1.inputs.sort_filelist = True results = datasource1.run() print results.outputs datasource2 = nio.DataGrabber() datasource2.inputs.template = 'data/s*/f*.nii' datasource2.inputs.sort_filelist = True results = datasource2.run() print results.outputs datasource3 = nio.DataGrabber(infields=['run']) datasource3.inputs.template = 'data/s1/f%d.nii' datasource3.inputs.sort_filelist = True datasource3.inputs.run = [3, 7] results = datasource3.run() print results.outputs datasource4 = nio.DataGrabber(infields=['subject_id', 'run']) datasource4.inputs.template = 'data/%s/f%d.nii' datasource4.inputs.sort_filelist = True datasource4.inputs.run = [3, 7] datasource4.inputs.subject_id = ['s1', 's3'] results = datasource4.run() print results.outputs """ Iterables --------- Iterables is a special field of the Node class that enables to iterate all workfloes/nodes connected to it over some parameters. Here we'll use it to iterate over two subjects. """ import nipype.interfaces.utility as util infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource") infosource.iterables = ('subject_id', ['s1', 's3']) datasource = pe.Node(nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name="datasource") datasource.inputs.template = '%s/%s.nii' datasource.inputs.base_directory = os.path.abspath('data') datasource.inputs.template_args = dict(func=[['subject_id','f3']], struct=[['subject_id','struct']]) datasource.inputs.sort_filelist = True my_workflow = pe.Workflow(name="my_workflow") my_workflow.base_dir = os.path.abspath('.') my_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]), (datasource, preproc, [('func', 'inputspec.func'), ('struct', 'inputspec.struct')])]) my_workflow.run() """ and we can change a node attribute and run it again """ smoothnode = my_workflow.get_node('preproc.smooth') assert(str(smoothnode)=='preproc.smooth') smoothnode.iterables = ('fwhm', [5.,10.]) my_workflow.run() """ Visualizing workflows 2 ----------------------- In the case of nested workflows, we might want to look at expanded forms of the workflow. """ nipype-0.9.2/matlabscripts/000077500000000000000000000000001227300005300156665ustar00rootroot00000000000000nipype-0.9.2/matlabscripts/README.txt000066400000000000000000000000601227300005300173600ustar00rootroot00000000000000This directory contains useful matlab scripts. nipype-0.9.2/matlabscripts/parse_spm_config.m000066400000000000000000000024571227300005300213720ustar00rootroot00000000000000function parse_spm_config conf = spm_config; sub_parse_config(spm_config) function sub_parse_config(conf,level) if nargin<2, level = 1; end if ~isfield(conf,'tag') level = level - 1; else if ~strcmp(conf.type,'entry') fprintf('|%s-%s:%s\n',char(repmat(' |',1,level-1)),conf.tag,conf.type); else fprintf('|%s-%s:%s\n',char(repmat(' |',1,level-1)),conf.tag,conf.type); end end if isfield(conf,'values'), for i0=1:numel(conf.values), if isstruct(conf.values{i0}), sub_parse_config(conf.values{i0},level+1) else if ischar(conf.values{i0}) fprintf('|%s-[%s:%s]\n',char(repmat(' |',1,level)),conf.labels{i0},conf.values{i0}); else fprintf('|%s-[%s:%s]\n',char(repmat(' |',1,level)),conf.labels{i0},num2str(conf.values{i0})); end end end end if isfield(conf,'val'), for i0=1:numel(conf.val), if isstruct(conf.val{i0}), sub_parse_config(conf.val{i0},level+1) else if ischar(conf.val{i0}) fprintf('|%s-[default:%s]\n',char(repmat(' |',1,level)),conf.val{i0}); else fprintf('|%s-[default:%s]\n',char(repmat(' |',1,level)),num2str(conf.val{i0})); end end end end nipype-0.9.2/matlabscripts/parse_spm_config2.m000066400000000000000000000032341227300005300214460ustar00rootroot00000000000000function parse_spm_config2 conf = spm_config; sub_parse_config(spm_config) function sub_parse_config(conf,level) if nargin<2, level = 1; end if ~isfield(conf,'tag') level = level - 1; else if ~strcmp(conf.type,'entry') fprintf(' %s- %s[%s] :\n\n',char(repmat(' ',1,level-1)),conf.tag,conf.type); else fprintf(' %s- %s[%s] :\n\n',char(repmat(' ',1,level-1)),conf.tag,conf.type); end end if isfield(conf,'values'), if ~isempty(strmatch('help',fieldnames(conf))) if isstr(conf.help), fprintf('\n %s ::\n %s\n',char(repmat(' ',1,level-1)),strrep(conf.help,'_','\_')); else for h0=1:numel(conf.help), fprintf('\n %s ::\n %s\n',char(repmat(' ',1,level-1)),strrep(conf.help{h0},'_','\_')); end end end for i0=1:numel(conf.values), if isstruct(conf.values{i0}), sub_parse_config(conf.values{i0},level+1) else if ischar(conf.values{i0}) fprintf(' %s- [ %s : %s]\n\n',char(repmat(' ',1,level)),conf.labels{i0},conf.values{i0}); else fprintf(' %s- [ %s : %s]\n\n',char(repmat(' ',1,level)),conf.labels{i0},num2str(conf.values{i0})); end end end end if isfield(conf,'val'), for i0=1:numel(conf.val), if isstruct(conf.val{i0}), sub_parse_config(conf.val{i0},level+1) else if ischar(conf.val{i0}) fprintf(' %s- [default : %s]\n\n',char(repmat(' ',1,level)),conf.val{i0}); else fprintf(' %s- [default : %s]\n\n',char(repmat(' ',1,level)),num2str(conf.val{i0})); end end end end nipype-0.9.2/nipype/000077500000000000000000000000001227300005300143225ustar00rootroot00000000000000nipype-0.9.2/nipype/COMMIT_INFO.txt000066400000000000000000000004141227300005300166650ustar00rootroot00000000000000# This is an ini file that may contain information about the code state [commit hash] # The line below may contain a valid hash if it has been substituted during 'git archive' archive_subst_hash=162f65d # This line may be modified by the install process install_hash= nipype-0.9.2/nipype/__init__.py000066400000000000000000000047571227300005300164500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from info import (LONG_DESCRIPTION as __doc__, URL as __url__, STATUS as __status__, __version__) from utils.config import NipypeConfig config = NipypeConfig() from utils.logger import Logging logging = Logging(config) from distutils.version import LooseVersion from .fixes.numpy.testing import nosetester class _NoseTester(nosetester.NoseTester): """ Subclass numpy's NoseTester to add doctests by default """ def _get_custom_doctester(self): return None def test(self, label='fast', verbose=1, extra_argv=['--exe'], doctests=True, coverage=False): """Run the full test suite Examples -------- This will run the test suite and stop at the first failing example >>> from nipype import test >>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP """ return super(_NoseTester, self).test(label=label, verbose=verbose, extra_argv=extra_argv, doctests=doctests, coverage=coverage) try: test = _NoseTester(raise_warnings="release").test except TypeError: # Older versions of numpy do not have a raise_warnings argument test = _NoseTester().test del nosetester def _test_local_install(): """ Warn the user that running with nipy being imported locally is a bad idea. """ if os.getcwd() == os.sep.join( os.path.abspath(__file__).split(os.sep)[:-2]): import warnings warnings.warn('Running the tests from the install directory may ' 'trigger some failures') _test_local_install() # Set up package information function from pkg_info import get_pkg_info as _get_pkg_info get_info = lambda: _get_pkg_info(os.path.dirname(__file__)) # Cleanup namespace del _test_local_install # If this file is exec after being imported, the following lines will # fail try: del Tester except: pass from pipeline import Node, MapNode, JoinNode, Workflow from interfaces import (fsl, spm, freesurfer, afni, ants, slicer, dipy, nipy, mrtrix, camino, DataGrabber, DataSink, SelectFiles, IdentityInterface, Rename, Function, Select, Merge) nipype-0.9.2/nipype/algorithms/000077500000000000000000000000001227300005300164735ustar00rootroot00000000000000nipype-0.9.2/nipype/algorithms/__init__.py000066400000000000000000000003561227300005300206100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package contains pure python neuroimaging algorithms Exaples: artifactdetect """ __docformat__ = 'restructuredtext' nipype-0.9.2/nipype/algorithms/icc.py000066400000000000000000000114121227300005300176020ustar00rootroot00000000000000from numpy import ones, kron, mean, eye, hstack, dot, tile from scipy.linalg import pinv from ..interfaces.base import BaseInterfaceInputSpec, TraitedSpec, \ BaseInterface, traits, File import nibabel as nb import numpy as np import os class ICCInputSpec(BaseInterfaceInputSpec): subjects_sessions = traits.List(traits.List(File(exists=True)), desc="n subjects m sessions 3D stat files", mandatory=True) mask = File(exists=True, mandatory=True) class ICCOutputSpec(TraitedSpec): icc_map = File(exists=True) session_var_map = File(exists=True, desc="variance between sessions") subject_var_map = File(exists=True, desc="variance between subjects") class ICC(BaseInterface): ''' Calculates Interclass Correlation Coefficient (3,1) as defined in P. E. Shrout & Joseph L. Fleiss (1979). "Intraclass Correlations: Uses in Assessing Rater Reliability". Psychological Bulletin 86 (2): 420-428. This particular implementation is aimed at relaibility (test-retest) studies. ''' input_spec = ICCInputSpec output_spec = ICCOutputSpec def _run_interface(self, runtime): maskdata = nb.load(self.inputs.mask).get_data() maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata))) session_datas = [[nb.load(fname).get_data()[maskdata].reshape(-1, 1) for fname in sessions] for sessions in self.inputs.subjects_sessions] list_of_sessions = [np.dstack(session_data) for session_data in session_datas] all_data = np.hstack(list_of_sessions) icc = np.zeros(session_datas[0][0].shape) session_F = np.zeros(session_datas[0][0].shape) session_var = np.zeros(session_datas[0][0].shape) subject_var = np.zeros(session_datas[0][0].shape) for x in range(icc.shape[0]): Y = all_data[x, :, :] icc[x], subject_var[x], session_var[x], session_F[x], _, _ = ICC_rep_anova(Y) nim = nb.load(self.inputs.subjects_sessions[0][0]) new_data = np.zeros(nim.get_shape()) new_data[maskdata] = icc.reshape(-1,) new_img = nb.Nifti1Image(new_data, nim.get_affine(), nim.get_header()) nb.save(new_img, 'icc_map.nii') new_data = np.zeros(nim.get_shape()) new_data[maskdata] = session_var.reshape(-1,) new_img = nb.Nifti1Image(new_data, nim.get_affine(), nim.get_header()) nb.save(new_img, 'session_var_map.nii') new_data = np.zeros(nim.get_shape()) new_data[maskdata] = subject_var.reshape(-1,) new_img = nb.Nifti1Image(new_data, nim.get_affine(), nim.get_header()) nb.save(new_img, 'subject_var_map.nii') return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['icc_map'] = os.path.abspath('icc_map.nii') outputs['sessions_F_map'] = os.path.abspath('sessions_F_map.nii') outputs['session_var_map'] = os.path.abspath('session_var_map.nii') outputs['subject_var_map'] = os.path.abspath('subject_var_map.nii') return outputs def ICC_rep_anova(Y): ''' the data Y are entered as a 'table' ie subjects are in rows and repeated measures in columns -------------------------------------------------------------------------- One Sample Repeated measure ANOVA Y = XB + E with X = [FaTor / Subjects] -------------------------------------------------------------------------- ''' [nb_subjects, nb_conditions] = Y.shape dfc = nb_conditions - 1 dfe = (nb_subjects - 1) * dfc dfr = nb_subjects - 1 # Compute the repeated measure effect # ------------------------------------ # Sum Square Total mean_Y = mean(Y) SST = ((Y - mean_Y) ** 2).sum() # create the design matrix for the different levels x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects X = hstack([x, x0]) # Sum Square Error predicted_Y = dot(dot(dot(X, pinv(dot(X.T, X))), X.T), Y.flatten('F')) residuals = Y.flatten('F') - predicted_Y SSE = (residuals ** 2).sum() residuals.shape = Y.shape MSE = SSE / dfe # Sum square session effect - between colums/sessions SSC = ((mean(Y, 0) - mean_Y) ** 2).sum() * nb_subjects MSC = SSC / dfc / nb_subjects session_effect_F = MSC / MSE # Sum Square subject effect - between rows/subjects SSR = SST - SSC - SSE MSR = SSR / dfr # ICC(3,1) = (mean square subjeT - mean square error) / (mean square subjeT + (k-1)*-mean square error) ICC = (MSR - MSE) / (MSR + dfc * MSE) e_var = MSE #variance of error r_var = (MSR - MSE)/nb_conditions #variance between subjects return ICC, r_var, e_var, session_effect_F, dfc, dfe nipype-0.9.2/nipype/algorithms/mesh.py000066400000000000000000000070151227300005300200040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Miscellaneous algorithms for 2D contours and 3D triangularized meshes handling Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) ''' import numpy as np from scipy.spatial.distance import euclidean from .. import logging from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File, BaseInterfaceInputSpec) iflogger = logging.getLogger('interface') class P2PDistanceInputSpec(BaseInterfaceInputSpec): surface1 = File(exists=True, mandatory=True, desc=("Reference surface (vtk format) to which compute " "distance.")) surface2 = File(exists=True, mandatory=True, desc=("Test surface (vtk format) from which compute " "distance.")) weighting = traits.Enum("none", "surface", usedefault=True, desc=('"none": no weighting is performed, ' '"surface": edge distance is weighted by the ' 'corresponding surface area')) class P2PDistanceOutputSpec(TraitedSpec): distance = traits.Float(desc="computed distance") class P2PDistance(BaseInterface): """Calculates a point-to-point (p2p) distance between two corresponding VTK-readable meshes or contours. A point-to-point correspondence between nodes is required Example ------- >>> import nipype.algorithms.mesh as mesh >>> dist = mesh.P2PDistance() >>> dist.inputs.surface1 = 'surf1.vtk' >>> dist.inputs.surface2 = 'surf2.vtk' >>> res = dist.run() # doctest: +SKIP """ input_spec = P2PDistanceInputSpec output_spec = P2PDistanceOutputSpec def _triangle_area(self, A, B, C): ABxAC = euclidean(A,B) * euclidean(A,C) prod = np.dot(np.array(B)-np.array(A),np.array(C)-np.array(A)) angle = np.arccos( prod / ABxAC ) area = 0.5 * ABxAC * np.sin( angle ) return area def _run_interface(self, runtime): from tvtk.api import tvtk r1 = tvtk.PolyDataReader( file_name=self.inputs.surface1 ) r2 = tvtk.PolyDataReader( file_name=self.inputs.surface2 ) vtk1 = r1.output vtk2 = r2.output r1.update() r2.update() assert( len(vtk1.points) == len(vtk2.points) ) d = 0.0 totalWeight = 0.0 points = vtk1.points faces = vtk1.polys.to_array().reshape(-1,4).astype(int)[:,1:] for p1,p2 in zip( points, vtk2.points ): weight = 1.0 if (self.inputs.weighting == 'surface'): #compute surfaces, set in weight weight = 0.0 point_faces = faces[ (faces[:,:]==0).any(axis=1) ] for idset in point_faces: p1 = points[ int(idset[0]) ] p2 = points[ int(idset[1]) ] p3 = points[ int(idset[2]) ] weight = weight + self._triangle_area(p1, p2, p3) d+= weight*euclidean( p1, p2 ) totalWeight = totalWeight + weight self._distance = d / totalWeight return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['distance'] = self._distance return outputs nipype-0.9.2/nipype/algorithms/misc.py000066400000000000000000001246271227300005300200140ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Miscellaneous algorithms Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) ''' import os import os.path as op import nibabel as nb import numpy as np from math import floor, ceil from scipy.ndimage.morphology import grey_dilation from scipy.ndimage.morphology import binary_erosion from scipy.spatial.distance import cdist, euclidean, dice, jaccard from scipy.ndimage.measurements import center_of_mass, label from scipy.special import legendre import scipy.io as sio import itertools import scipy.stats as stats from .. import logging from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File, InputMultiPath, OutputMultiPath, BaseInterfaceInputSpec, isdefined) from ..utils.filemanip import fname_presuffix, split_filename iflogger = logging.getLogger('interface') class PickAtlasInputSpec(BaseInterfaceInputSpec): atlas = File(exists=True, desc="Location of the atlas that will be used.", mandatory=True) labels = traits.Either( traits.Int, traits.List(traits.Int), desc=("Labels of regions that will be included in the mask. Must be\ compatible with the atlas used."), mandatory=True ) hemi = traits.Enum( 'both', 'left', 'right', desc="Restrict the mask to only one hemisphere: left or right", usedefault=True ) dilation_size = traits.Int( usedefault=True, desc="Defines how much the mask will be dilated (expanded in 3D)." ) output_file = File(desc="Where to store the output mask.") class PickAtlasOutputSpec(TraitedSpec): mask_file = File(exists=True, desc="output mask file") class PickAtlas(BaseInterface): ''' Returns ROI masks given an atlas and a list of labels. Supports dilation and left right masking (assuming the atlas is properly aligned). ''' input_spec = PickAtlasInputSpec output_spec = PickAtlasOutputSpec def _run_interface(self, runtime): nim = self._get_brodmann_area() nb.save(nim, self._gen_output_filename()) return runtime def _gen_output_filename(self): if not isdefined(self.inputs.output_file): output = fname_presuffix(fname=self.inputs.atlas, suffix="_mask", newpath=os.getcwd(), use_ext=True) else: output = os.path.realpath(self.inputs.output_file) return output def _get_brodmann_area(self): nii = nb.load(self.inputs.atlas) origdata = nii.get_data() newdata = np.zeros(origdata.shape) if not isinstance(self.inputs.labels, list): labels = [self.inputs.labels] else: labels = self.inputs.labels for lab in labels: newdata[origdata == lab] = 1 if self.inputs.hemi == 'right': newdata[floor(float(origdata.shape[0]) / 2):, :, :] = 0 elif self.inputs.hemi == 'left': newdata[:ceil(float(origdata.shape[0]) / 2), :, :] = 0 if self.inputs.dilation_size != 0: newdata = grey_dilation( newdata, (2 * self.inputs.dilation_size + 1, 2 * self.inputs.dilation_size + 1, 2 * self.inputs.dilation_size + 1)) return nb.Nifti1Image(newdata, nii.get_affine(), nii.get_header()) def _list_outputs(self): outputs = self._outputs().get() outputs['mask_file'] = self._gen_output_filename() return outputs class SimpleThresholdInputSpec(BaseInterfaceInputSpec): volumes = InputMultiPath( File(exists=True), desc='volumes to be thresholded', mandatory=True) threshold = traits.Float( desc='volumes to be thresholdedeverything below this value will be set\ to zero', mandatory=True ) class SimpleThresholdOutputSpec(TraitedSpec): thresholded_volumes = OutputMultiPath( File(exists=True), desc="thresholded volumes") class SimpleThreshold(BaseInterface): input_spec = SimpleThresholdInputSpec output_spec = SimpleThresholdOutputSpec def _run_interface(self, runtime): for fname in self.inputs.volumes: img = nb.load(fname) data = np.array(img.get_data()) active_map = data > self.inputs.threshold thresholded_map = np.zeros(data.shape) thresholded_map[active_map] = data[active_map] new_img = nb.Nifti1Image( thresholded_map, img.get_affine(), img.get_header()) _, base, _ = split_filename(fname) nb.save(new_img, base + '_thresholded.nii') return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["thresholded_volumes"] = [] for fname in self.inputs.volumes: _, base, _ = split_filename(fname) outputs["thresholded_volumes"].append( os.path.abspath(base + '_thresholded.nii')) return outputs class ModifyAffineInputSpec(BaseInterfaceInputSpec): volumes = InputMultiPath( File(exists=True), desc='volumes which affine matrices will be modified', mandatory=True ) transformation_matrix = traits.Array( value=np.eye(4), shape=(4, 4), desc="transformation matrix that will be left multiplied by the\ affine matrix", usedefault=True ) class ModifyAffineOutputSpec(TraitedSpec): transformed_volumes = OutputMultiPath(File(exist=True)) class ModifyAffine(BaseInterface): ''' Left multiplies the affine matrix with a specified values. Saves the volume as a nifti file. ''' input_spec = ModifyAffineInputSpec output_spec = ModifyAffineOutputSpec def _gen_output_filename(self, name): _, base, _ = split_filename(name) return os.path.abspath(base + "_transformed.nii") def _run_interface(self, runtime): for fname in self.inputs.volumes: img = nb.load(fname) affine = img.get_affine() affine = np.dot(self.inputs.transformation_matrix, affine) nb.save(nb.Nifti1Image(img.get_data(), affine, img.get_header()), self._gen_output_filename(fname)) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['transformed_volumes'] = [] for fname in self.inputs.volumes: outputs['transformed_volumes'].append( self._gen_output_filename(fname)) return outputs class DistanceInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, mandatory=True, desc="Has to have the same dimensions as volume2.") volume2 = File( exists=True, mandatory=True, desc="Has to have the same dimensions as volume1." ) method = traits.Enum( "eucl_min", "eucl_cog", "eucl_mean", "eucl_wmean", "eucl_max", desc='""eucl_min": Euclidean distance between two closest points\ "eucl_cog": mean Euclidian distance between the Center of Gravity\ of volume1 and CoGs of volume2\ "eucl_mean": mean Euclidian minimum distance of all volume2 voxels\ to volume1\ "eucl_wmean": mean Euclidian minimum distance of all volume2 voxels\ to volume1 weighted by their values\ "eucl_max": maximum over minimum Euclidian distances of all volume2\ voxels to volume1 (also known as the Hausdorff distance)', usedefault=True ) mask_volume = File( exists=True, desc="calculate overlap only within this mask.") class DistanceOutputSpec(TraitedSpec): distance = traits.Float() point1 = traits.Array(shape=(3,)) point2 = traits.Array(shape=(3,)) histogram = File() class Distance(BaseInterface): ''' Calculates distance between two volumes. ''' input_spec = DistanceInputSpec output_spec = DistanceOutputSpec _hist_filename = "hist.pdf" def _find_border(self, data): eroded = binary_erosion(data) border = np.logical_and(data, np.logical_not(eroded)) return border def _get_coordinates(self, data, affine): if len(data.shape) == 4: data = data[:, :, :, 0] indices = np.vstack(np.nonzero(data)) indices = np.vstack((indices, np.ones(indices.shape[1]))) coordinates = np.dot(affine, indices) return coordinates[:3, :] def _eucl_min(self, nii1, nii2): origdata1 = nii1.get_data().astype(np.bool) border1 = self._find_border(origdata1) origdata2 = nii2.get_data().astype(np.bool) border2 = self._find_border(origdata2) set1_coordinates = self._get_coordinates(border1, nii1.get_affine()) set2_coordinates = self._get_coordinates(border2, nii2.get_affine()) dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) (point1, point2) = np.unravel_index( np.argmin(dist_matrix), dist_matrix.shape) return (euclidean(set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]), set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]) def _eucl_cog(self, nii1, nii2): origdata1 = nii1.get_data().astype(np.bool) cog_t = np.array(center_of_mass(origdata1)).reshape(-1, 1) cog_t = np.vstack((cog_t, np.array([1]))) cog_t_coor = np.dot(nii1.get_affine(), cog_t)[:3, :] origdata2 = nii2.get_data().astype(np.bool) (labeled_data, n_labels) = label(origdata2) cogs = np.ones((4, n_labels)) for i in range(n_labels): cogs[:3, i] = np.array(center_of_mass(origdata2, labeled_data, i + 1)) cogs_coor = np.dot(nii2.get_affine(), cogs)[:3, :] dist_matrix = cdist(cog_t_coor.T, cogs_coor.T) return np.mean(dist_matrix) def _eucl_mean(self, nii1, nii2, weighted=False): origdata1 = nii1.get_data().astype(np.bool) border1 = self._find_border(origdata1) origdata2 = nii2.get_data().astype(np.bool) set1_coordinates = self._get_coordinates(border1, nii1.get_affine()) set2_coordinates = self._get_coordinates(origdata2, nii2.get_affine()) dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) min_dist_matrix = np.amin(dist_matrix, axis=0) import matplotlib.pyplot as plt plt.figure() plt.hist(min_dist_matrix, 50, normed=1, facecolor='green') plt.savefig(self._hist_filename) plt.clf() plt.close() if weighted: return np.average( min_dist_matrix, weights=nii2.get_data()[origdata2].flat ) else: return np.mean(min_dist_matrix) def _eucl_max(self, nii1, nii2): origdata1 = nii1.get_data() origdata1 = np.logical_not( np.logical_or(origdata1 == 0, np.isnan(origdata1))) origdata2 = nii2.get_data() origdata2 = np.logical_not( np.logical_or(origdata2 == 0, np.isnan(origdata2))) if isdefined(self.inputs.mask_volume): maskdata = nb.load(self.inputs.mask_volume).get_data() maskdata = np.logical_not( np.logical_or(maskdata == 0, np.isnan(maskdata))) origdata1 = np.logical_and(maskdata, origdata1) origdata2 = np.logical_and(maskdata, origdata2) if origdata1.max() == 0 or origdata2.max() == 0: return np.NaN border1 = self._find_border(origdata1) border2 = self._find_border(origdata2) set1_coordinates = self._get_coordinates(border1, nii1.get_affine()) set2_coordinates = self._get_coordinates(border2, nii2.get_affine()) distances = cdist(set1_coordinates.T, set2_coordinates.T) mins = np.concatenate( (np.amin(distances, axis=0), np.amin(distances, axis=1))) return np.max(mins) def _run_interface(self, runtime): nii1 = nb.load(self.inputs.volume1) nii2 = nb.load(self.inputs.volume2) if self.inputs.method == "eucl_min": self._distance, self._point1, self._point2 = self._eucl_min( nii1, nii2) elif self.inputs.method == "eucl_cog": self._distance = self._eucl_cog(nii1, nii2) elif self.inputs.method == "eucl_mean": self._distance = self._eucl_mean(nii1, nii2) elif self.inputs.method == "eucl_wmean": self._distance = self._eucl_mean(nii1, nii2, weighted=True) elif self.inputs.method == "eucl_max": self._distance = self._eucl_max(nii1, nii2) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['distance'] = self._distance if self.inputs.method == "eucl_min": outputs['point1'] = self._point1 outputs['point2'] = self._point2 elif self.inputs.method in ["eucl_mean", "eucl_wmean"]: outputs['histogram'] = os.path.abspath(self._hist_filename) return outputs class OverlapInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, mandatory=True, desc="Has to have the same dimensions as volume2.") volume2 = File(exists=True, mandatory=True, desc="Has to have the same dimensions as volume1.") mask_volume = File( exists=True, desc="calculate overlap only within this mask.") out_file = File("diff.nii", usedefault=True) class OverlapOutputSpec(TraitedSpec): jaccard = traits.Float() dice = traits.Float() volume_difference = traits.Int() diff_file = File(exists=True) class Overlap(BaseInterface): """ Calculates various overlap measures between two maps. Example ------- >>> overlap = Overlap() >>> overlap.inputs.volume1 = 'cont1.nii' >>> overlap.inputs.volume1 = 'cont2.nii' >>> res = overlap.run() # doctest: +SKIP """ input_spec = OverlapInputSpec output_spec = OverlapOutputSpec def _bool_vec_dissimilarity(self, booldata1, booldata2, method): methods = {"dice": dice, "jaccard": jaccard} if not (np.any(booldata1) or np.any(booldata2)): return 0 return 1 - methods[method](booldata1.flat, booldata2.flat) def _run_interface(self, runtime): nii1 = nb.load(self.inputs.volume1) nii2 = nb.load(self.inputs.volume2) origdata1 = np.logical_not( np.logical_or(nii1.get_data() == 0, np.isnan(nii1.get_data()))) origdata2 = np.logical_not( np.logical_or(nii2.get_data() == 0, np.isnan(nii2.get_data()))) if isdefined(self.inputs.mask_volume): maskdata = nb.load(self.inputs.mask_volume).get_data() maskdata = np.logical_not( np.logical_or(maskdata == 0, np.isnan(maskdata))) origdata1 = np.logical_and(maskdata, origdata1) origdata2 = np.logical_and(maskdata, origdata2) for method in ("dice", "jaccard"): setattr(self, '_' + method, self._bool_vec_dissimilarity( origdata1, origdata2, method=method)) self._volume = int(origdata1.sum() - origdata2.sum()) both_data = np.zeros(origdata1.shape) both_data[origdata1] = 1 both_data[origdata2] += 2 nb.save(nb.Nifti1Image(both_data, nii1.get_affine(), nii1.get_header()), self.inputs.out_file) return runtime def _list_outputs(self): outputs = self._outputs().get() for method in ("dice", "jaccard"): outputs[method] = getattr(self, '_' + method) outputs['volume_difference'] = self._volume outputs['diff_file'] = os.path.abspath(self.inputs.out_file) return outputs class FuzzyOverlapInputSpec(BaseInterfaceInputSpec): in_ref = InputMultiPath( File(exists=True), mandatory=True, desc="Reference image. Requires the same dimensions as in_tst.") in_tst = InputMultiPath( File(exists=True), mandatory=True, desc="Test image. Requires the same dimensions as in_ref.") weighting = traits.Enum("none", "volume", "squared_vol", desc='""none": no class-overlap weighting is performed\ "volume": computed class-overlaps are weighted by class volume\ "squared_vol": computed class-overlaps are weighted by the squared volume of the class',usedefault=True) out_file = File("diff.nii", desc="alternative name for resulting difference-map", usedefault=True) class FuzzyOverlapOutputSpec(TraitedSpec): jaccard = traits.Float( desc="Fuzzy Jaccard Index (fJI), all the classes" ) dice = traits.Float( desc="Fuzzy Dice Index (fDI), all the classes" ) diff_file = File(exists=True, desc="resulting difference-map of all classes, using the chosen weighting" ) class_fji = traits.List( traits.Float(), desc="Array containing the fJIs of each computed class" ) class_fdi = traits.List( traits.Float(), desc="Array containing the fDIs of each computed class" ) class FuzzyOverlap(BaseInterface): """ Calculates various overlap measures between two maps, using the fuzzy definition proposed in: Crum et al., Generalized Overlap Measures for Evaluation and Validation in Medical Image Analysis, IEEE Trans. Med. Ima. 25(11),pp 1451-1461, Nov. 2006. in_ref and in_tst are lists of 2/3D images, each element on the list containing one volume fraction map of a class in a fuzzy partition of the domain. Example ------- >>> overlap = FuzzyOverlap() >>> overlap.inputs.in_ref = [ 'ref_class0.nii', 'ref_class1.nii' ] >>> overlap.inputs.in_tst = [ 'tst_class0.nii', 'tst_class1.nii' ] >>> overlap.inputs.weighting = 'volume' >>> res = overlap.run() # doctest: +SKIP """ input_spec = FuzzyOverlapInputSpec output_spec = FuzzyOverlapOutputSpec def _run_interface(self, runtime): ncomp = len(self.inputs.in_ref) assert( ncomp == len(self.inputs.in_tst) ) weights = np.ones( shape=ncomp ) img_ref = np.array( [ nb.load( fname ).get_data() for fname in self.inputs.in_ref ] ) img_tst = np.array( [ nb.load( fname ).get_data() for fname in self.inputs.in_tst ] ) msk = np.sum(img_ref, axis=0) msk[msk>0] = 1.0 tst_msk = np.sum(img_tst, axis=0) tst_msk[tst_msk>0] = 1.0 #check that volumes are normalized #img_ref[:][msk>0] = img_ref[:][msk>0] / (np.sum( img_ref, axis=0 ))[msk>0] #img_tst[tst_msk>0] = img_tst[tst_msk>0] / np.sum( img_tst, axis=0 )[tst_msk>0] self._jaccards = [] volumes = [] diff_im = np.zeros( img_ref.shape ) for ref_comp, tst_comp, diff_comp in zip( img_ref, img_tst, diff_im ): num = np.minimum( ref_comp, tst_comp ) ddr = np.maximum( ref_comp, tst_comp ) diff_comp[ddr>0]+= 1.0-(num[ddr>0]/ddr[ddr>0]) self._jaccards.append( np.sum( num ) / np.sum( ddr ) ) volumes.append( np.sum( ref_comp ) ) self._dices = 2.0*np.array(self._jaccards) / (np.array(self._jaccards) +1.0 ) if self.inputs.weighting != "none": weights = 1.0 / np.array(volumes) if self.inputs.weighting == "squared_vol": weights = weights**2 weights = weights / np.sum( weights ) setattr( self, '_jaccard', np.sum( weights * self._jaccards ) ) setattr( self, '_dice', np.sum( weights * self._dices ) ) diff = np.zeros( diff_im[0].shape ) for w,ch in zip(weights,diff_im): ch[msk==0] = 0 diff+= w* ch nb.save(nb.Nifti1Image(diff, nb.load( self.inputs.in_ref[0]).get_affine(), nb.load( self.inputs.in_ref[0]).get_header()), self.inputs.out_file ) return runtime def _list_outputs(self): outputs = self._outputs().get() for method in ("dice", "jaccard"): outputs[method] = getattr(self, '_' + method) #outputs['volume_difference'] = self._volume outputs['diff_file'] = os.path.abspath(self.inputs.out_file) outputs['class_fji'] = np.array(self._jaccards).astype(float).tolist(); outputs['class_fdi']= self._dices.astype(float).tolist(); return outputs class CreateNiftiInputSpec(BaseInterfaceInputSpec): data_file = File(exists=True, mandatory=True, desc="ANALYZE img file") header_file = File( exists=True, mandatory=True, desc="corresponding ANALYZE hdr file") affine = traits.Array(desc="affine transformation array") class CreateNiftiOutputSpec(TraitedSpec): nifti_file = File(exists=True) class CreateNifti(BaseInterface): input_spec = CreateNiftiInputSpec output_spec = CreateNiftiOutputSpec def _gen_output_file_name(self): _, base, _ = split_filename(self.inputs.data_file) return os.path.abspath(base + ".nii") def _run_interface(self, runtime): hdr = nb.AnalyzeHeader.from_fileobj( open(self.inputs.header_file, 'rb')) if isdefined(self.inputs.affine): affine = self.inputs.affine else: affine = None data = hdr.data_from_fileobj(open(self.inputs.data_file, 'rb')) img = nb.Nifti1Image(data, affine, hdr) nb.save(img, self._gen_output_file_name()) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['nifti_file'] = self._gen_output_file_name() return outputs class TSNRInputSpec(BaseInterfaceInputSpec): in_file = InputMultiPath(File(exists=True), mandatory=True, desc='realigned 4D file or a list of 3D files') regress_poly = traits.Range(low=1, desc='Remove polynomials') class TSNROutputSpec(TraitedSpec): tsnr_file = File(exists=True, desc='tsnr image file') mean_file = File(exists=True, desc='mean image file') stddev_file = File(exists=True, desc='std dev image file') detrended_file = File(desc='detrended input file') class TSNR(BaseInterface): """Computes the time-course SNR for a time series Typically you want to run this on a realigned time-series. Example ------- >>> tsnr = TSNR() >>> tsnr.inputs.in_file = 'functional.nii' >>> res = tsnr.run() # doctest: +SKIP """ input_spec = TSNRInputSpec output_spec = TSNROutputSpec def _gen_output_file_name(self, suffix=None): _, base, ext = split_filename(self.inputs.in_file[0]) if suffix in ['mean', 'stddev']: return os.path.abspath(base + "_tsnr_" + suffix + ext) elif suffix in ['detrended']: return os.path.abspath(base + "_" + suffix + ext) else: return os.path.abspath(base + "_tsnr" + ext) def _run_interface(self, runtime): img = nb.load(self.inputs.in_file[0]) header = img.get_header().copy() vollist = [nb.load(filename) for filename in self.inputs.in_file] data = np.concatenate([vol.get_data().reshape( vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3) if data.dtype.kind == 'i': header.set_data_dtype(np.float32) data = data.astype(np.float32) if isdefined(self.inputs.regress_poly): timepoints = img.get_shape()[-1] X = np.ones((timepoints, 1)) for i in range(self.inputs.regress_poly): X = np.hstack((X, legendre( i + 1)(np.linspace(-1, 1, timepoints))[:, None])) betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2)) datahat = np.rollaxis(np.dot(X[:, 1:], np.rollaxis( betas[1:, :, :, :], 0, 3)), 0, 4) data = data - datahat img = nb.Nifti1Image(data, img.get_affine(), header) nb.save(img, self._gen_output_file_name('detrended')) meanimg = np.mean(data, axis=3) stddevimg = np.std(data, axis=3) tsnr = meanimg / stddevimg img = nb.Nifti1Image(tsnr, img.get_affine(), header) nb.save(img, self._gen_output_file_name()) img = nb.Nifti1Image(meanimg, img.get_affine(), header) nb.save(img, self._gen_output_file_name('mean')) img = nb.Nifti1Image(stddevimg, img.get_affine(), header) nb.save(img, self._gen_output_file_name('stddev')) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['tsnr_file'] = self._gen_output_file_name() outputs['mean_file'] = self._gen_output_file_name('mean') outputs['stddev_file'] = self._gen_output_file_name('stddev') if isdefined(self.inputs.regress_poly): outputs['detrended_file'] = self._gen_output_file_name('detrended') return outputs class GunzipInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True) class GunzipOutputSpec(TraitedSpec): out_file = File(exists=True) class Gunzip(BaseInterface): """ """ input_spec = GunzipInputSpec output_spec = GunzipOutputSpec def _gen_output_file_name(self): _, base, ext = split_filename(self.inputs.in_file) if ext[-2:].lower() == ".gz": ext = ext[:-3] return os.path.abspath(base + ext[:-3]) def _run_interface(self, runtime): import gzip in_file = gzip.open(self.inputs.in_file, 'rb') out_file = open(self._gen_output_file_name(), 'wb') out_file.write(in_file.read()) out_file.close() in_file.close() return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = self._gen_output_file_name() return outputs def replaceext(in_list, ext): out_list = list() for filename in in_list: path, name, _ = split_filename(op.abspath(filename)) out_name = op.join(path, name) + ext out_list.append(out_name) return out_list def matlab2csv(in_array, name, reshape): output_array = np.asarray(in_array) if reshape: if len(np.shape(output_array)) > 1: output_array = np.reshape(output_array, ( np.shape(output_array)[0]*np.shape(output_array)[1], 1)) iflogger.info(np.shape(output_array)) output_name = op.abspath(name + '.csv') np.savetxt(output_name, output_array, delimiter=',') return output_name class Matlab2CSVInputSpec(TraitedSpec): in_file = File(exists=True, mandatory=True, desc='Input MATLAB .mat file') reshape_matrix = traits.Bool( True, usedefault=True, desc='The output of this interface is meant for R, so matrices will be\ reshaped to vectors by default.' ) class Matlab2CSVOutputSpec(TraitedSpec): csv_files = OutputMultiPath( File(desc='Output CSV files for each variable saved in the input .mat\ file') ) class Matlab2CSV(BaseInterface): """ Simple interface to save the components of a MATLAB .mat file as a text file with comma-separated values (CSVs). CSV files are easily loaded in R, for use in statistical processing. For further information, see cran.r-project.org/doc/manuals/R-data.pdf Example ------- >>> import nipype.algorithms.misc as misc >>> mat2csv = misc.Matlab2CSV() >>> mat2csv.inputs.in_file = 'cmatrix.mat' >>> mat2csv.run() # doctest: +SKIP """ input_spec = Matlab2CSVInputSpec output_spec = Matlab2CSVOutputSpec def _run_interface(self, runtime): in_dict = sio.loadmat(op.abspath(self.inputs.in_file)) # Check if the file has multiple variables in it. If it does, loop # through them and save them as individual CSV files. # If not, save the variable as a single CSV file using the input file # name and a .csv extension. saved_variables = list() for key in in_dict.keys(): if not key.startswith('__'): if isinstance(in_dict[key][0], np.ndarray): saved_variables.append(key) else: iflogger.info('One of the keys in the input file, {k}, is not a Numpy array'.format(k=key)) if len(saved_variables) > 1: iflogger.info( '{N} variables found:'.format(N=len(saved_variables))) iflogger.info(saved_variables) for variable in saved_variables: iflogger.info( '...Converting {var} - type {ty} - to\ CSV'.format(var=variable, ty=type(in_dict[variable])) ) matlab2csv( in_dict[variable], variable, self.inputs.reshape_matrix) elif len(saved_variables) == 1: _, name, _ = split_filename(self.inputs.in_file) variable = saved_variables[0] iflogger.info('Single variable found {var}, type {ty}:'.format( var=variable, ty=type(in_dict[variable]))) iflogger.info('...Converting {var} to CSV from {f}'.format( var=variable, f=self.inputs.in_file)) matlab2csv(in_dict[variable], name, self.inputs.reshape_matrix) else: iflogger.error('No values in the MATLAB file?!') return runtime def _list_outputs(self): outputs = self.output_spec().get() in_dict = sio.loadmat(op.abspath(self.inputs.in_file)) saved_variables = list() for key in in_dict.keys(): if not key.startswith('__'): if isinstance(in_dict[key][0], np.ndarray): saved_variables.append(key) else: iflogger.error('One of the keys in the input file, {k}, is\ not a Numpy array'.format(k=key)) if len(saved_variables) > 1: outputs['csv_files'] = replaceext(saved_variables, '.csv') elif len(saved_variables) == 1: _, name, ext = split_filename(self.inputs.in_file) outputs['csv_files'] = op.abspath(name + '.csv') else: iflogger.error('No values in the MATLAB file?!') return outputs def merge_csvs(in_list): for idx, in_file in enumerate(in_list): try: in_array = np.loadtxt(in_file, delimiter=',') except ValueError, ex: try: in_array = np.loadtxt(in_file, delimiter=',', skiprows=1) except ValueError, ex: first = open(in_file, 'r') header_line = first.readline() header_list = header_line.split(',') n_cols = len(header_list) try: in_array = np.loadtxt( in_file, delimiter=',', skiprows=1, usecols=range(1, n_cols) ) except ValueError, ex: in_array = np.loadtxt( in_file, delimiter=',', skiprows=1, usecols=range(1, n_cols-1)) if idx == 0: out_array = in_array else: out_array = np.dstack((out_array, in_array)) out_array = np.squeeze(out_array) iflogger.info('Final output array shape:') iflogger.info(np.shape(out_array)) return out_array def remove_identical_paths(in_files): import os.path as op from nipype.utils.filemanip import split_filename if len(in_files) > 1: out_names = list() commonprefix = op.commonprefix(in_files) lastslash = commonprefix.rfind('/') commonpath = commonprefix[0:(lastslash+1)] for fileidx, in_file in enumerate(in_files): path, name, ext = split_filename(in_file) in_file = op.join(path, name) name = in_file.replace(commonpath, '') name = name.replace('_subject_id_', '') out_names.append(name) else: path, name, ext = split_filename(in_files[0]) out_names = [name] return out_names def maketypelist(rowheadings, shape, extraheadingBool, extraheading): typelist = [] if rowheadings: typelist.append(('heading', 'a40')) if len(shape) > 1: for idx in range(1, (min(shape)+1)): typelist.append((str(idx), float)) else: for idx in range(1, (shape[0]+1)): typelist.append((str(idx), float)) if extraheadingBool: typelist.append((extraheading, 'a40')) iflogger.info(typelist) return typelist def makefmtlist(output_array, typelist, rowheadingsBool, shape, extraheadingBool): fmtlist = [] if rowheadingsBool: fmtlist.append('%s') if len(shape) > 1: output = np.zeros(max(shape), typelist) for idx in range(1, min(shape)+1): output[str(idx)] = output_array[:, idx-1] fmtlist.append('%f') else: output = np.zeros(1, typelist) for idx in range(1, len(output_array)+1): output[str(idx)] = output_array[idx-1] fmtlist.append('%f') if extraheadingBool: fmtlist.append('%s') fmt = ','.join(fmtlist) return fmt, output class MergeCSVFilesInputSpec(TraitedSpec): in_files = InputMultiPath(File(exists=True), mandatory=True, desc='Input comma-separated value (CSV) files') out_file = File('merged.csv', usedefault=True, desc='Output filename for merged CSV file') column_headings = traits.List( traits.Str, desc='List of column headings to save in merged CSV file\ (must be equal to number of input files). If left undefined, these\ will be pulled from the input filenames.') row_headings = traits.List( traits.Str, desc='List of row headings to save in merged CSV file\ (must be equal to number of rows in the input files).') row_heading_title = traits.Str( 'label', usedefault=True, desc='Column heading for the row headings\ added') extra_column_heading = traits.Str( desc='New heading to add for the added field.') extra_field = traits.Str( desc='New field to add to each row. This is useful for saving the\ group or subject ID in the file.') class MergeCSVFilesOutputSpec(TraitedSpec): csv_file = File(desc='Output CSV file containing columns ') class MergeCSVFiles(BaseInterface): """ This interface is designed to facilitate data loading in the R environment. It takes input CSV files and merges them into a single CSV file. If provided, it will also incorporate column heading names into the resulting CSV file. CSV files are easily loaded in R, for use in statistical processing. For further information, see cran.r-project.org/doc/manuals/R-data.pdf Example ------- >>> import nipype.algorithms.misc as misc >>> mat2csv = misc.MergeCSVFiles() >>> mat2csv.inputs.in_files = ['degree.mat','clustering.mat'] >>> mat2csv.inputs.column_headings = ['degree','clustering'] >>> mat2csv.run() # doctest: +SKIP """ input_spec = MergeCSVFilesInputSpec output_spec = MergeCSVFilesOutputSpec def _run_interface(self, runtime): extraheadingBool = False extraheading = '' rowheadingsBool = False """ This block defines the column headings. """ if isdefined(self.inputs.column_headings): iflogger.info('Column headings have been provided:') headings = self.inputs.column_headings else: iflogger.info( 'Column headings not provided! Pulled from input filenames:') headings = remove_identical_paths(self.inputs.in_files) if isdefined(self.inputs.extra_field): if isdefined(self.inputs.extra_column_heading): extraheading = self.inputs.extra_column_heading iflogger.info('Extra column heading provided: {col}'.format( col=extraheading)) else: extraheading = 'type' iflogger.info( 'Extra column heading was not defined. Using "type"') headings.append(extraheading) extraheadingBool = True if len(self.inputs.in_files) == 1: iflogger.warn('Only one file input!') if isdefined(self.inputs.row_headings): iflogger.info('Row headings have been provided. Adding "labels"\ column header.') prefix = '"{p}","'.format(p=self.inputs.row_heading_title) csv_headings = prefix + '","'.join(itertools.chain( headings)) + '"\n' rowheadingsBool = True else: iflogger.info('Row headings have not been provided.') csv_headings = '"' + '","'.join(itertools.chain(headings)) + '"\n' iflogger.info('Final Headings:') iflogger.info(csv_headings) """ Next we merge the arrays and define the output text file """ output_array = merge_csvs(self.inputs.in_files) _, name, ext = split_filename(self.inputs.out_file) if not ext == '.csv': ext = '.csv' out_file = op.abspath(name + ext) file_handle = open(out_file, 'w') file_handle.write(csv_headings) shape = np.shape(output_array) typelist = maketypelist( rowheadingsBool, shape, extraheadingBool, extraheading) fmt, output = makefmtlist( output_array, typelist, rowheadingsBool, shape, extraheadingBool) if rowheadingsBool: row_heading_list = self.inputs.row_headings row_heading_list_with_quotes = [] for row_heading in row_heading_list: row_heading_with_quotes = '"' + row_heading + '"' row_heading_list_with_quotes.append(row_heading_with_quotes) row_headings = np.array(row_heading_list_with_quotes, dtype='|S40') output['heading'] = row_headings if isdefined(self.inputs.extra_field): extrafieldlist = [] if len(shape) > 1: mx = shape[0] else: mx = 1 for idx in range(0, mx): extrafieldlist.append(self.inputs.extra_field) iflogger.info(len(extrafieldlist)) output[extraheading] = extrafieldlist iflogger.info(output) iflogger.info(fmt) np.savetxt(file_handle, output, fmt, delimiter=',') file_handle.close() return runtime def _list_outputs(self): outputs = self.output_spec().get() _, name, ext = split_filename(self.inputs.out_file) if not ext == '.csv': ext = '.csv' out_file = op.abspath(name + ext) outputs['csv_file'] = out_file return outputs class AddCSVColumnInputSpec(TraitedSpec): in_file = File(exists=True, mandatory=True, desc='Input comma-separated value (CSV) files') out_file = File('extra_heading.csv', usedefault=True, desc='Output filename for merged CSV file') extra_column_heading = traits.Str( desc='New heading to add for the added field.') extra_field = traits.Str( desc='New field to add to each row. This is useful for saving the\ group or subject ID in the file.') class AddCSVColumnOutputSpec(TraitedSpec): csv_file = File(desc='Output CSV file containing columns ') class AddCSVColumn(BaseInterface): """ Short interface to add an extra column and field to a text file Example ------- >>> import nipype.algorithms.misc as misc >>> addcol = misc.AddCSVColumn() >>> addcol.inputs.in_file = 'degree.csv' >>> addcol.inputs.extra_column_heading = 'group' >>> addcol.inputs.extra_field = 'male' >>> addcol.run() # doctest: +SKIP """ input_spec = AddCSVColumnInputSpec output_spec = AddCSVColumnOutputSpec def _run_interface(self, runtime): in_file = open(self.inputs.in_file, 'r') _, name, ext = split_filename(self.inputs.out_file) if not ext == '.csv': ext = '.csv' out_file = op.abspath(name + ext) out_file = open(out_file, 'w') firstline = in_file.readline() firstline = firstline.replace('\n', '') new_firstline = firstline + ',"' + \ self.inputs.extra_column_heading + '"\n' out_file.write(new_firstline) for line in in_file: new_line = line.replace('\n', '') new_line = new_line + ',' + self.inputs.extra_field + '\n' out_file.write(new_line) return runtime def _list_outputs(self): outputs = self.output_spec().get() _, name, ext = split_filename(self.inputs.out_file) if not ext == '.csv': ext = '.csv' out_file = op.abspath(name + ext) outputs['csv_file'] = out_file return outputs class CalculateNormalizedMomentsInputSpec(TraitedSpec): timeseries_file = File( exists=True, mandatory=True, desc='Text file with timeseries in columns and timepoints in rows,\ whitespace separated') moment = traits.Int( mandatory=True, desc="Define which moment should be calculated, 3 for skewness, 4 for\ kurtosis.") class CalculateNormalizedMomentsOutputSpec(TraitedSpec): moments = traits.List(traits.Float(), desc='Moments') class CalculateNormalizedMoments(BaseInterface): """ Calculates moments of timeseries. Example ------- >>> import nipype.algorithms.misc as misc >>> skew = misc.CalculateNormalizedMoments() >>> skew.inputs.moment = 3 >>> skew.inputs.timeseries_file = 'timeseries.txt' >>> skew.run() # doctest: +SKIP """ input_spec = CalculateNormalizedMomentsInputSpec output_spec = CalculateNormalizedMomentsOutputSpec def _run_interface(self, runtime): self._moments = calc_moments( self.inputs.timeseries_file, self.inputs.moment) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs['skewness'] = self._moments return outputs def calc_moments(timeseries_file, moment): """ Returns nth moment (3 for skewness, 4 for kurtosis) of timeseries (list of values; one per timeseries). Keyword arguments: timeseries_file -- text file with white space separated timepoints in rows """ timeseries = np.genfromtxt(timeseries_file) m2 = stats.moment(timeseries, 2, axis=0) m3 = stats.moment(timeseries, moment, axis=0) zero = (m2 == 0) return np.where(zero, 0, m3 / m2**(moment/2.0)) nipype-0.9.2/nipype/algorithms/modelgen.py000066400000000000000000001011171227300005300206400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The modelgen module provides classes for specifying designs for individual subject analysis of task-based fMRI experiments. In particular it also includes algorithms for generating regressors for sparse and sparse-clustered acquisition experiments. These functions include: * SpecifyModel: allows specification of sparse and non-sparse models Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ from copy import deepcopy import os from nibabel import load import numpy as np from scipy.special import gammaln from nipype.interfaces.base import (BaseInterface, TraitedSpec, InputMultiPath, traits, File, Bunch, BaseInterfaceInputSpec, isdefined) from nipype.utils.filemanip import filename_to_list from .. import config, logging iflogger = logging.getLogger('interface') def gcd(a, b): """Returns the greatest common divisor of two integers uses Euclid's algorithm >>> gcd(4, 5) 1 >>> gcd(4, 8) 4 >>> gcd(22, 55) 11 """ while b > 0: a, b = b, a % b return a def spm_hrf(RT, P=None, fMRI_T=16): """ python implementation of spm_hrf see spm_hrf for implementation details % RT - scan repeat time % p - parameters of the response function (two gamma % functions) % defaults (seconds) % p(0) - delay of response (relative to onset) 6 % p(1) - delay of undershoot (relative to onset) 16 % p(2) - dispersion of response 1 % p(3) - dispersion of undershoot 1 % p(4) - ratio of response to undershoot 6 % p(5) - onset (seconds) 0 % p(6) - length of kernel (seconds) 32 % % hrf - hemodynamic response function % p - parameters of the response function the following code using scipy.stats.distributions.gamma doesn't return the same result as the spm_Gpdf function hrf = gamma.pdf(u, p[0]/p[2], scale=dt/p[2]) - gamma.pdf(u, p[1]/p[3], scale=dt/p[3])/p[4] >>> print spm_hrf(2) [ 0.00000000e+00 8.65660810e-02 3.74888236e-01 3.84923382e-01 2.16117316e-01 7.68695653e-02 1.62017720e-03 -3.06078117e-02 -3.73060781e-02 -3.08373716e-02 -2.05161334e-02 -1.16441637e-02 -5.82063147e-03 -2.61854250e-03 -1.07732374e-03 -4.10443522e-04 -1.46257507e-04] """ p = np.array([6, 16, 1, 1, 6, 0, 32], dtype=float) if P is not None: p[0:len(P)] = P _spm_Gpdf = lambda x, h, l: np.exp(h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h)) # modelled hemodynamic response function - {mixture of Gammas} dt = RT/float(fMRI_T) u = np.arange(0, int(p[6]/dt+1)) - p[5]/dt hrf = _spm_Gpdf(u, p[0]/p[2], dt/p[2]) - _spm_Gpdf(u, p[1]/p[3], dt/p[3])/p[4] idx = np.arange(0, int((p[6]/RT)+1))*fMRI_T hrf = hrf[idx] hrf = hrf/np.sum(hrf) return hrf def orth(x_in, y_in): """Orthoganlize y_in with respect to x_in >>> err = np.abs(np.array(orth([1, 2, 3],[4, 5, 6]) - np.array([1.7142857142857144, 0.42857142857142883, -0.85714285714285676]))) >>> all(err np.exp(-32): y = y[:, 0].tolist() else: y = y_in return y def scale_timings(timelist, input_units, output_units, time_repetition): """Scales timings given input and output units (scans/secs) Parameters ---------- timelist: list of times to scale input_units: 'secs' or 'scans' output_units: Ibid. time_repetition: float in seconds """ if input_units==output_units: _scalefactor = 1. if (input_units == 'scans') and (output_units == 'secs'): _scalefactor = time_repetition if (input_units == 'secs') and (output_units == 'scans'): _scalefactor = 1./time_repetition timelist = [np.max([0., _scalefactor*t]) for t in timelist] return timelist def gen_info(run_event_files): """Generate subject_info structure from a list of event files """ info = [] for i, event_files in enumerate(run_event_files): runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for event_file in event_files: _, name = os.path.split(event_file) if '.run' in name: name, _ = name.split('.run%03d'%(i+1)) elif '.txt' in name: name, _ = name.split('.txt') runinfo.conditions.append(name) event_info = np.loadtxt(event_file) runinfo.onsets.append(event_info[:, 0].tolist()) if event_info.shape[1] > 1: runinfo.durations.append(event_info[:, 1].tolist()) else: runinfo.durations.append([0]) if event_info.shape[1] > 2: runinfo.amplitudes.append(event_info[:, 2].tolist()) else: delattr(runinfo, 'amplitudes') info.append(runinfo) return info class SpecifyModelInputSpec(BaseInterfaceInputSpec): subject_info = InputMultiPath(Bunch, mandatory=True, xor=['event_files'], desc=("Bunch or List(Bunch) subject specific condition information. " "see :ref:`SpecifyModel` or SpecifyModel.__doc__ for details")) event_files = InputMultiPath(traits.List(File(exists=True)), mandatory=True, xor=['subject_info'], desc=('list of event description files 1, 2 or 3 column format ' 'corresponding to onsets, durations and amplitudes')) realignment_parameters = InputMultiPath(File(exists=True), desc = "Realignment parameters returned by motion correction algorithm", copyfile=False) outlier_files = InputMultiPath(File(exists=True), desc="Files containing scan outlier indices that should be tossed", copyfile=False) functional_runs = InputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), mandatory=True, desc="Data files for model. List of 4D files or list of" \ "list of 3D files per session", copyfile=False) input_units = traits.Enum('secs', 'scans', mandatory=True, desc = "Units of event onsets and durations (secs or scans)" \ "Output units are always in secs") high_pass_filter_cutoff = traits.Float(mandatory=True, desc="High-pass filter cutoff in secs") time_repetition = traits.Float(mandatory=True, desc = "Time between the start of one volume to the start of " \ "the next image volume.") # Not implemented yet #polynomial_order = traits.Range(0, low=0, # desc ="Number of polynomial functions to model high pass filter.") class SpecifyModelOutputSpec(TraitedSpec): session_info = traits.Any(desc="session info for level1designs") class SpecifyModel(BaseInterface): """Makes a model specification compatible with spm/fsl designers. The subject_info field should contain paradigm information in the form of a Bunch or a list of Bunch. The Bunch should contain the following information:: [Mandatory] - conditions : list of names - onsets : lists of onsets corresponding to each condition - durations : lists of durations corresponding to each condition. Should be left to a single 0 if all events are being modelled as impulses. [Optional] - regressor_names : list of str list of names corresponding to each column. Should be None if automatically assigned. - regressors : list of lists values for each regressor - must correspond to the number of volumes in the functional run - amplitudes : lists of amplitudes for each event. This will be ignored by SPM's Level1Design. The following two (tmod, pmod) will be ignored by any Level1Design class other than SPM: - tmod : lists of conditions that should be temporally modulated. Should default to None if not being used. - pmod : list of Bunch corresponding to conditions - name : name of parametric modulator - param : values of the modulator - poly : degree of modulation Alternatively, you can provide information through event files. The event files have to be in 1, 2 or 3 column format with the columns corresponding to Onsets, Durations and Amplitudes and they have to have the name event_name.runXXX... e.g.: Words.run001.txt. The event_name part will be used to create the condition names. Examples -------- >>> from nipype.interfaces.base import Bunch >>> s = SpecifyModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.high_pass_filter_cutoff = 128. >>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]), \ Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])] >>> s.inputs.subject_info = info Using pmod: >>> info = [Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 50],[100, 180]], durations=[[0],[0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), None]), \ Bunch(conditions=['cond1', 'cond2'], onsets=[[20, 120],[80, 160]], durations=[[0],[0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), None])] >>> s.inputs.subject_info = info """ input_spec = SpecifyModelInputSpec output_spec = SpecifyModelOutputSpec def _generate_standard_design(self, infolist, functional_runs=None, realignment_parameters=None, outliers=None): """ Generates a standard design matrix paradigm given information about each run """ sessinfo = [] output_units = 'secs' if 'output_units' in self.inputs.traits(): output_units = self.inputs.output_units for i, info in enumerate(infolist): sessinfo.insert(i, dict(cond=[])) if isdefined(self.inputs.high_pass_filter_cutoff): sessinfo[i]['hpf'] = np.float(self.inputs.high_pass_filter_cutoff) if hasattr(info, 'conditions') and info.conditions is not None: for cid, cond in enumerate(info.conditions): sessinfo[i]['cond'].insert(cid, dict()) sessinfo[i]['cond'][cid]['name'] = info.conditions[cid] sessinfo[i]['cond'][cid]['onset'] = scale_timings(info.onsets[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) sessinfo[i]['cond'][cid]['duration'] = scale_timings(info.durations[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) if hasattr(info, 'amplitudes') and info.amplitudes: sessinfo[i]['cond'][cid]['amplitudes'] = info.amplitudes[cid] if hasattr(info, 'tmod') and info.tmod and len(info.tmod)>cid: sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid] if hasattr(info, 'pmod') and info.pmod and len(info.pmod)>cid: if info.pmod[cid]: sessinfo[i]['cond'][cid]['pmod'] = [] for j, name in enumerate(info.pmod[cid].name): sessinfo[i]['cond'][cid]['pmod'].insert(j,{}) sessinfo[i]['cond'][cid]['pmod'][j]['name'] = name sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = info.pmod[cid].poly[j] sessinfo[i]['cond'][cid]['pmod'][j]['param'] = info.pmod[cid].param[j] sessinfo[i]['regress']= [] if hasattr(info, 'regressors') and info.regressors is not None: for j, r in enumerate(info.regressors): sessinfo[i]['regress'].insert(j, dict(name='', val=[])) if hasattr(info, 'regressor_names') and info.regressor_names is not None: sessinfo[i]['regress'][j]['name'] = info.regressor_names[j] else: sessinfo[i]['regress'][j]['name'] = 'UR%d'%(j+1) sessinfo[i]['regress'][j]['val'] = info.regressors[j] sessinfo[i]['scans'] = functional_runs[i] if realignment_parameters is not None: for i, rp in enumerate(realignment_parameters): mc = realignment_parameters[i] for col in range(mc.shape[1]): colidx = len(sessinfo[i]['regress']) sessinfo[i]['regress'].insert(colidx, dict(name='', val=[])) sessinfo[i]['regress'][colidx]['name'] = 'Realign%d'%(col+1) sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist() if outliers is not None: for i, out in enumerate(outliers): numscans = 0 for f in filename_to_list(sessinfo[i]['scans']): shape = load(f).get_shape() if len(shape) == 3 or shape[3] == 1: iflogger.warning("You are using 3D instead of 4D files. Are you sure this was intended?") numscans += 1 else: numscans += shape[3] for j, scanno in enumerate(out): colidx = len(sessinfo[i]['regress']) sessinfo[i]['regress'].insert(colidx, dict(name='', val=[])) sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d'%(j+1) sessinfo[i]['regress'][colidx]['val'] = np.zeros((1, numscans))[0].tolist() sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1 return sessinfo def _generate_design(self, infolist=None): """Generate design specification for a typical fmri paradigm """ realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): for parfile in self.inputs.realignment_parameters: realignment_parameters.append(np.loadtxt(parfile)) outliers = [] if isdefined(self.inputs.outlier_files): for filename in self.inputs.outlier_files: try: outindices = np.loadtxt(filename, dtype=int) except IOError: outliers.append([]) else: if outindices.size == 1: outliers.append([outindices.tolist()]) else: outliers.append(outindices.tolist()) if infolist is None: if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) self._sessinfo = self._generate_standard_design(infolist, functional_runs=self.inputs.functional_runs, realignment_parameters=realignment_parameters, outliers=outliers) def _run_interface(self, runtime): """ """ self._sessioninfo = None self._generate_design() return runtime def _list_outputs(self): outputs = self._outputs().get() if not hasattr(self, '_sessinfo'): self._generate_design() outputs['session_info'] = self._sessinfo return outputs class SpecifySPMModelInputSpec(SpecifyModelInputSpec): concatenate_runs = traits.Bool(False, usedefault=True, desc="Concatenate all runs to look like a single session.") output_units = traits.Enum('secs', 'scans', usedefault=True, desc = "Units of design event onsets and durations " \ "(secs or scans)") class SpecifySPMModel(SpecifyModel): """Adds SPM specific options to SpecifyModel adds: - concatenate_runs - output_units Examples -------- >>> from nipype.interfaces.base import Bunch >>> s = SpecifySPMModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.output_units = 'scans' >>> s.inputs.high_pass_filter_cutoff = 128. >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.concatenate_runs = True >>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]), \ Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])] >>> s.inputs.subject_info = info """ input_spec = SpecifySPMModelInputSpec def _concatenate_info(self, infolist): nscans = [] for i, f in enumerate(self.inputs.functional_runs): if isinstance(f, list): numscans = len(f) elif isinstance(f, str): img = load(f) numscans = img.get_shape()[3] else: raise Exception('Functional input not specified correctly') nscans.insert(i, numscans) # now combine all fields into 1 # names, onsets, durations, amplitudes, pmod, tmod, regressor_names, regressors infoout = infolist[0] for i, info in enumerate(infolist[1:]): #info.[conditions, tmod] remain the same if info.onsets: for j, val in enumerate(info.onsets): if self.inputs.input_units == 'secs': infoout.onsets[j].extend((np.array(info.onsets[j])+ self.inputs.time_repetition*sum(nscans[0:(i+1)])).tolist()) else: infoout.onsets[j].extend((np.array(info.onsets[j])+sum(nscans[0:(i+1)])).tolist()) for j, val in enumerate(info.durations): if len(val) > 1: infoout.durations[j].extend(info.durations[j]) if hasattr(info, 'amplitudes') and info.amplitudes: for j, val in enumerate(info.amplitudes): infoout.amplitudes[j].extend(info.amplitudes[j]) if hasattr(info, 'pmod') and info.pmod: for j, val in enumerate(info.pmod): if val: for key, data in enumerate(val.param): infoout.pmod[j].param[key].extend(data) if hasattr(info, 'regressors') and info.regressors: #assumes same ordering of regressors across different #runs and the same names for the regressors for j, v in enumerate(info.regressors): infoout.regressors[j].extend(info.regressors[j]) #insert session regressors if not hasattr(infoout, 'regressors') or not infoout.regressors: infoout.regressors = [] onelist = np.zeros((1, sum(nscans))) onelist[0, sum(nscans[0:(i)]):sum(nscans[0:(i+1)])] = 1 infoout.regressors.insert(len(infoout.regressors), onelist.tolist()[0]) return [infoout], nscans def _generate_design(self, infolist=None): if not isdefined(self.inputs.concatenate_runs) or not self.inputs.concatenate_runs: super(SpecifySPMModel, self)._generate_design(infolist=infolist) return if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) concatlist, nscans = self._concatenate_info(infolist) functional_runs = [filename_to_list(self.inputs.functional_runs)] realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): realignment_parameters = [] for parfile in self.inputs.realignment_parameters: mc = np.loadtxt(parfile) if not realignment_parameters: realignment_parameters.insert(0, mc) else: realignment_parameters[0] = np.concatenate((realignment_parameters[0], mc)) outliers = [] if isdefined(self.inputs.outlier_files): outliers = [[]] for i, filename in enumerate(self.inputs.outlier_files): try: out = np.loadtxt(filename, dtype=int) except IOError: out = np.array([]) if out.size>0: if out.size == 1: outliers[0].extend([(np.array(out)+sum(nscans[0:i])).tolist()]) else: outliers[0].extend((np.array(out)+sum(nscans[0:i])).tolist()) self._sessinfo = self._generate_standard_design(concatlist, functional_runs=functional_runs, realignment_parameters=realignment_parameters, outliers=outliers) class SpecifySparseModelInputSpec(SpecifyModelInputSpec): time_acquisition = traits.Float(0, mandatory=True, desc = "Time in seconds to acquire a single image volume") volumes_in_cluster = traits.Range(1, usedefault=True, desc="Number of scan volumes in a cluster") model_hrf = traits.Bool(desc="model sparse events with hrf") stimuli_as_impulses = traits.Bool(True, desc = "Treat each stimulus to be impulse like.", usedefault=True) use_temporal_deriv = traits.Bool(requires=['model_hrf'], desc = "Create a temporal derivative in addition to regular regressor") scale_regressors = traits.Bool(True, desc="Scale regressors by the peak", usedefault=True) scan_onset = traits.Float(0.0, desc="Start of scanning relative to onset of run in secs", usedefault=True) save_plot = traits.Bool(desc='save plot of sparse design calculation ' \ '(Requires matplotlib)') class SpecifySparseModelOutputSpec(SpecifyModelOutputSpec): sparse_png_file = File(desc='PNG file showing sparse design') sparse_svg_file = File(desc='SVG file showing sparse design') class SpecifySparseModel(SpecifyModel): """ Specify a sparse model that is compatible with spm/fsl designers References ---------- .. [1] Ghosh et al. (2009) OHBM http://dl.dropbox.com/u/363467/OHBM2009_HRF.pdf Examples -------- >>> from nipype.interfaces.base import Bunch >>> s = SpecifySparseModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.time_acquisition = 2 >>> s.inputs.high_pass_filter_cutoff = 128. >>> s.inputs.model_hrf = True >>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]), \ Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])] >>> s.inputs.subject_info = info """ input_spec = SpecifySparseModelInputSpec output_spec = SpecifySparseModelOutputSpec def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): """Generates a regressor for a sparse/clustered-sparse acquisition """ bplot = False if isdefined(self.inputs.save_plot) and self.inputs.save_plot: bplot=True import matplotlib matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt TR = np.round(self.inputs.time_repetition*1000) # in ms if self.inputs.time_acquisition: TA = np.round(self.inputs.time_acquisition*1000) # in ms else: TA = TR # in ms nvol = self.inputs.volumes_in_cluster SCANONSET = np.round(self.inputs.scan_onset*1000) total_time = TR*(nscans-nvol)/nvol + TA*nvol + SCANONSET SILENCE = TR-TA*nvol dt = TA/10.; durations = np.round(np.array(i_durations)*1000) if len(durations) == 1: durations = durations*np.ones((len(i_onsets))) onsets = np.round(np.array(i_onsets)*1000) dttemp = gcd(TA, gcd(SILENCE, TR)) if dt < dttemp: if dttemp % dt != 0: dt = gcd(dttemp, dt) if dt < 1: raise Exception("Time multiple less than 1 ms") iflogger.info("Setting dt = %d ms\n" % dt) npts = int(total_time/dt) times = np.arange(0, total_time, dt)*1e-3 timeline = np.zeros((npts)) timeline2 = np.zeros((npts)) if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: hrf = spm_hrf(dt*1e-3) reg_scale = 1.0 if self.inputs.scale_regressors: boxcar = np.zeros((50.*1e3/dt)) if self.inputs.stimuli_as_impulses: boxcar[1.*1e3/dt] = 1.0 reg_scale = float(TA/dt) else: boxcar[1.*1e3/dt:2.*1e3/dt] = 1.0 if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: response = np.convolve(boxcar, hrf) reg_scale = 1./response.max() iflogger.info('response sum: %.4f max: %.4f'%(response.sum(), response.max())) iflogger.info('reg_scale: %.4f'%reg_scale) for i, t in enumerate(onsets): idx = int(t/dt) if i_amplitudes: if len(i_amplitudes)>1: timeline2[idx] = i_amplitudes[i] else: timeline2[idx] = i_amplitudes[0] else: timeline2[idx] = 1 if bplot: plt.subplot(4, 1, 1) plt.plot(times, timeline2) if not self.inputs.stimuli_as_impulses: if durations[i] == 0: durations[i] = TA*nvol stimdur = np.ones((int(durations[i]/dt))) timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)] timeline += timeline2 timeline2[:] = 0 if bplot: plt.subplot(4, 1, 2) plt.plot(times, timeline) if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: timeline = np.convolve(timeline, hrf)[0:len(timeline)] if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv: #create temporal deriv timederiv = np.concatenate(([0], np.diff(timeline))) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline) if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv: plt.plot(times, timederiv) # sample timeline timeline2 = np.zeros((npts)) reg = [] regderiv = [] for i, trial in enumerate(np.arange(nscans)/nvol): scanstart = int((SCANONSET + trial*TR + (i%nvol)*TA)/dt) #print total_time/dt, SCANONSET, TR, TA, scanstart, trial, i%2, int(TA/dt) scanidx = scanstart+np.arange(int(TA/dt)) timeline2[scanidx] = np.max(timeline) reg.insert(i, np.mean(timeline[scanidx])*reg_scale) if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv: regderiv.insert(i, np.mean(timederiv[scanidx])*reg_scale) if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv: iflogger.info('orthoganlizing derivative w.r.t. main regressor') regderiv = orth(reg, regderiv) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline2) plt.subplot(4, 1, 4) plt.bar(np.arange(len(reg)), reg, width=0.5) plt.savefig('sparse.png') plt.savefig('sparse.svg') if regderiv: return [reg, regderiv] else: return reg def _cond_to_regress(self, info, nscans): """Converts condition information to full regressors """ reg = [] regnames = [] for i, cond in enumerate(info.conditions): if hasattr(info, 'amplitudes') and info.amplitudes: amplitudes = info.amplitudes[i] else: amplitudes = None regnames.insert(len(regnames), cond) regressor = self._gen_regress(scale_timings(info.onsets[i], self.inputs.input_units, 'secs', self.inputs.time_repetition), scale_timings(info.durations[i], self.inputs.input_units, 'secs', self.inputs.time_repetition), amplitudes, nscans) if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv: reg.insert(len(reg), regressor[0]) regnames.insert(len(regnames), cond+'_D') reg.insert(len(reg), regressor[1]) else: reg.insert(len(reg), regressor) # need to deal with temporal and parametric modulators # for sparse-clustered acquisitions enter T1-effect regressors nvol = self.inputs.volumes_in_cluster if nvol > 1: for i in range(nvol-1): treg = np.zeros((nscans/nvol, nvol)) treg[:, i] = 1 reg.insert(len(reg), treg.ravel().tolist()) regnames.insert(len(regnames), 'T1effect_%d'%i) return reg, regnames def _generate_clustered_design(self, infolist): """Generates condition information for sparse-clustered designs. """ infoout = deepcopy(infolist) for i, info in enumerate(infolist): infoout[i].conditions = None infoout[i].onsets = None infoout[i].durations = None if info.conditions: img = load(self.inputs.functional_runs[i]) nscans = img.get_shape()[3] reg, regnames = self._cond_to_regress(info, nscans) if hasattr(infoout[i], 'regressors') and infoout[i].regressors: if not infoout[i].regressor_names: infoout[i].regressor_names = ['R%d'%j for j in range(len(infoout[i].regressors))] else: infoout[i].regressors = [] infoout[i].regressor_names = [] for j, r in enumerate(reg): regidx = len(infoout[i].regressors) infoout[i].regressor_names.insert(regidx, regnames[j]) infoout[i].regressors.insert(regidx, r) return infoout def _generate_design(self, infolist=None): if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) sparselist = self._generate_clustered_design(infolist) super(SpecifySparseModel, self)._generate_design(infolist = sparselist) def _list_outputs(self): outputs = self._outputs().get() if not hasattr(self, '_sessinfo'): self._generate_design() outputs['session_info'] = self._sessinfo if isdefined(self.inputs.save_plot) and self.inputs.save_plot: outputs['sparse_png_file'] = os.path.join(os.getcwd(), 'sparse.png') outputs['sparse_svg_file'] = os.path.join(os.getcwd(), 'sparse.svg') return outputs nipype-0.9.2/nipype/algorithms/rapidart.py000066400000000000000000000725771227300005300206750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The rapidart module provides routines for artifact detection and region of interest analysis. These functions include: * ArtifactDetect: performs artifact detection on functional images * StimulusCorrelation: determines correlation between stimuli schedule and movement/intensity parameters Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ import os from copy import deepcopy from warnings import warn from nibabel import load, funcs, Nifti1Image import numpy as np from scipy import signal import scipy.io as sio from ..interfaces.base import (BaseInterface, traits, InputMultiPath, OutputMultiPath, TraitedSpec, File, BaseInterfaceInputSpec, isdefined) from ..utils.filemanip import filename_to_list, save_json, split_filename from ..utils.misc import find_indices from .. import logging, config iflogger = logging.getLogger('interface') def _get_affine_matrix(params, source): """Return affine matrix given a set of translation and rotation parameters params : np.array (upto 12 long) in native package format source : the package that generated the parameters supports SPM, AFNI, FSFAST, FSL, NIPY """ if source == 'FSL': params = params[[3, 4, 5, 0, 1, 2]] elif source in ('AFNI', 'FSFAST'): params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)] params[3:] = params[3:] * np.pi / 180. if source == 'NIPY': # nipy does not store typical euler angles, use nipy to convert from nipy.algorithms.registration import to_matrix44 return to_matrix44(params) #process for FSL, SPM, AFNI and FSFAST rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)], [-np.sin(x), np.cos(x)]]) q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]) if len(params) < 12: params = np.hstack((params, q[len(params):])) params.shape = (len(params),) # Translation T = np.eye(4) T[0:3, -1] = params[0:3] # Rotation Rx = np.eye(4) Rx[1:3, 1:3] = rotfunc(params[3]) Ry = np.eye(4) Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel() Rz = np.eye(4) Rz[0:2, 0:2] = rotfunc(params[5]) # Scaling S = np.eye(4) S[0:3, 0:3] = np.diag(params[6:9]) # Shear Sh = np.eye(4) Sh[(0, 0, 1), (1, 2, 2)] = params[9:12] if source in ('AFNI', 'FSFAST'): return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh))))) return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh))))) def _calc_norm(mc, use_differences, source, brain_pts=None): """Calculates the maximum overall displacement of the midpoints of the faces of a cube due to translation and rotation. Parameters ---------- mc : motion parameter estimates [3 translation, 3 rotation (radians)] use_differences : boolean brain_pts : [4 x n_points] of coordinates Returns ------- norm : at each time point displacement : euclidean distance (mm) of displacement at each coordinate """ if brain_pts is None: respos = np.diag([70, 70, 75]) resneg = np.diag([-70, -110, -45]) all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6)))) displacement = None else: all_pts = brain_pts n_pts = all_pts.size - all_pts.shape[1] newpos = np.zeros((mc.shape[0], n_pts)) if brain_pts is not None: displacement = np.zeros((mc.shape[0], n_pts / 3)) for i in range(mc.shape[0]): affine = _get_affine_matrix(mc[i, :], source) newpos[i, :] = np.dot(affine, all_pts)[0:3, :].ravel() if brain_pts is not None: displacement[i, :] = \ np.sqrt(np.sum(np.power(np.reshape(newpos[i, :], (3, all_pts.shape[1])) - all_pts[0:3, :], 2), axis=0)) # np.savez('displacement.npz', newpos=newpos, pts=all_pts) normdata = np.zeros(mc.shape[0]) if use_differences: newpos = np.concatenate((np.zeros((1, n_pts)), np.diff(newpos, n=1, axis=0)), axis=0) for i in range(newpos.shape[0]): normdata[i] = \ np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2), (3, all_pts.shape[1])), axis=0))) else: newpos = np.abs(signal.detrend(newpos, axis=0, type='constant')) normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1)) return normdata, displacement def _nanmean(a, axis=None): """Return the mean excluding items that are nan >>> a = [1, 2, np.nan] >>> _nanmean(a) 1.5 """ if axis: return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis) else: return np.nansum(a) / np.sum(1 - np.isnan(a)) class ArtifactDetectInputSpec(BaseInterfaceInputSpec): realigned_files = InputMultiPath(File(exists=True), desc="Names of realigned functional data files", mandatory=True) realignment_parameters = InputMultiPath(File(exists=True), mandatory=True, desc=("Names of realignment parameters" "corresponding to the functional data files")) parameter_source = traits.Enum("SPM", "FSL", "AFNI", "NiPy", "FSFAST", desc="Source of movement parameters", mandatory=True) use_differences = traits.ListBool([True, False], minlen=2, maxlen=2, usedefault=True, desc=("Use differences between successive motion (first element)" "and intensity paramter (second element) estimates in order" "to determine outliers. (default is [True, False])")) use_norm = traits.Bool(True, requires=['norm_threshold'], desc=("Uses a composite of the motion parameters in " "order to determine outliers."), usedefault=True) norm_threshold = traits.Float(desc=("Threshold to use to detect motion-rela" "ted outliers when composite motion is " "being used"), mandatory=True, xor=['rotation_threshold', 'translation_threshold']) rotation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'], desc=("Threshold (in radians) to use to detect rotation-related " "outliers")) translation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'], desc=("Threshold (in mm) to use to detect translation-related " "outliers")) zintensity_threshold = traits.Float(mandatory=True, desc=("Intensity Z-threshold use to detection images that deviate " "from the mean")) mask_type = traits.Enum('spm_global', 'file', 'thresh', desc=("Type of mask that should be used to mask the functional " "data. *spm_global* uses an spm_global like calculation to " "determine the brain mask. *file* specifies a brain mask " "file (should be an image file consisting of 0s and 1s). " "*thresh* specifies a threshold to use. By default all voxels" "are used, unless one of these mask types are defined."), mandatory=True) mask_file = File(exists=True, desc="Mask file to be used if mask_type is 'file'.") mask_threshold = traits.Float(desc=("Mask threshold to be used if mask_type" " is 'thresh'.")) intersect_mask = traits.Bool(True, desc=("Intersect the masks when computed from " "spm_global.")) save_plot = traits.Bool(True, desc="save plots containing outliers", usedefault=True) plot_type = traits.Enum('png', 'svg', 'eps', 'pdf', desc="file type of the outlier plot", usedefault=True) bound_by_brainmask = traits.Bool(False, desc=("use the brain mask to " "determine bounding box" "for composite norm (works" "for SPM and Nipy - currently" "inaccurate for FSL, AFNI"), usedefault=True) global_threshold = traits.Float(8.0, desc=("use this threshold when mask " "type equal's spm_global"), usedefault=True) class ArtifactDetectOutputSpec(TraitedSpec): outlier_files = OutputMultiPath(File(exists=True), desc=("One file for each functional run containing a list of " "0-based indices corresponding to outlier volumes")) intensity_files = OutputMultiPath(File(exists=True), desc=("One file for each functional run containing the global " "intensity values determined from the brainmask")) norm_files = OutputMultiPath(File, desc=("One file for each functional run containing the composite " "norm")) statistic_files = OutputMultiPath(File(exists=True), desc=("One file for each functional run containing information " "about the different types of artifacts and if design info is" " provided then details of stimulus correlated motion and a " "listing or artifacts by event type.")) plot_files = OutputMultiPath(File, desc=("One image file for each functional run containing the " "detected outliers")) mask_files = OutputMultiPath(File, desc=("One image file for each functional run containing the mask" "used for global signal calculation")) displacement_files = OutputMultiPath(File, desc=("One image file for each functional run containing the voxel" "displacement timeseries")) class ArtifactDetect(BaseInterface): """Detects outliers in a functional imaging series Uses intensity and motion parameters to infer outliers. If `use_norm` is True, it computes the movement of the center of each face a cuboid centered around the head and returns the maximal movement across the centers. Examples -------- >>> ad = ArtifactDetect() >>> ad.inputs.realigned_files = 'functional.nii' >>> ad.inputs.realignment_parameters = 'functional.par' >>> ad.inputs.parameter_source = 'FSL' >>> ad.inputs.norm_threshold = 1 >>> ad.inputs.use_differences = [True, False] >>> ad.inputs.zintensity_threshold = 3 >>> ad.run() # doctest: +SKIP """ input_spec = ArtifactDetectInputSpec output_spec = ArtifactDetectOutputSpec def __init__(self, **inputs): super(ArtifactDetect, self).__init__(**inputs) def _get_output_filenames(self, motionfile, output_dir): """Generate output files based on motion filenames Parameters ---------- motionfile: file/string Filename for motion parameter file output_dir: string output directory in which the files will be generated """ if isinstance(motionfile, str): infile = motionfile elif isinstance(motionfile, list): infile = motionfile[0] else: raise Exception("Unknown type of file") _, filename, ext = split_filename(infile) artifactfile = os.path.join(output_dir, ''.join(('art.', filename, '_outliers.txt'))) intensityfile = os.path.join(output_dir, ''.join(('global_intensity.', filename, '.txt'))) statsfile = os.path.join(output_dir, ''.join(('stats.', filename, '.txt'))) normfile = os.path.join(output_dir, ''.join(('norm.', filename, '.txt'))) plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.', self.inputs.plot_type))) displacementfile = os.path.join(output_dir, ''.join(('disp.', filename, ext))) maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext))) return (artifactfile, intensityfile, statsfile, normfile, plotfile, displacementfile, maskfile) def _list_outputs(self): outputs = self._outputs().get() outputs['outlier_files'] = [] outputs['intensity_files'] = [] outputs['statistic_files'] = [] outputs['mask_files'] = [] if isdefined(self.inputs.use_norm) and self.inputs.use_norm: outputs['norm_files'] = [] if self.inputs.bound_by_brainmask: outputs['displacement_files'] = [] if isdefined(self.inputs.save_plot) and self.inputs.save_plot: outputs['plot_files'] = [] for i, f in enumerate(filename_to_list(self.inputs.realigned_files)): (outlierfile, intensityfile, statsfile, normfile, plotfile, displacementfile, maskfile) = \ self._get_output_filenames(f, os.getcwd()) outputs['outlier_files'].insert(i, outlierfile) outputs['intensity_files'].insert(i, intensityfile) outputs['statistic_files'].insert(i, statsfile) outputs['mask_files'].insert(i, maskfile) if isdefined(self.inputs.use_norm) and self.inputs.use_norm: outputs['norm_files'].insert(i, normfile) if self.inputs.bound_by_brainmask: outputs['displacement_files'].insert(i, displacementfile) if isdefined(self.inputs.save_plot) and self.inputs.save_plot: outputs['plot_files'].insert(i, plotfile) return outputs def _plot_outliers_with_wave(self, wave, outliers, name): import matplotlib.pyplot as plt plt.plot(wave) plt.ylim([wave.min(), wave.max()]) plt.xlim([0, len(wave) - 1]) if len(outliers): plt.plot(np.tile(outliers[:, None], (1, 2)).T, np.tile([wave.min(), wave.max()], (len(outliers), 1)).T, 'r') plt.xlabel('Scans - 0-based') plt.ylabel(name) def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): """ Core routine for detecting outliers """ if not cwd: cwd = os.getcwd() # read in functional image if isinstance(imgfile, str): nim = load(imgfile) elif isinstance(imgfile, list): if len(imgfile) == 1: nim = load(imgfile[0]) else: images = [load(f) for f in imgfile] nim = funcs.concat_images(images) # compute global intensity signal (x, y, z, timepoints) = nim.get_shape() data = nim.get_data() affine = nim.get_affine() g = np.zeros((timepoints, 1)) masktype = self.inputs.mask_type if masktype == 'spm_global': # spm_global like calculation iflogger.debug('art: using spm global') intersect_mask = self.inputs.intersect_mask if intersect_mask: mask = np.ones((x, y, z), dtype=bool) for t0 in range(timepoints): vol = data[:, :, :, t0] # Use an SPM like approach mask_tmp = vol > \ (_nanmean(vol) / self.inputs.global_threshold) mask = mask * mask_tmp for t0 in range(timepoints): vol = data[:, :, :, t0] g[t0] = _nanmean(vol[mask]) if len(find_indices(mask)) < (np.prod((x, y, z)) / 10): intersect_mask = False g = np.zeros((timepoints, 1)) if not intersect_mask: iflogger.info('not intersect_mask is True') mask = np.zeros((x, y, z, timepoints)) for t0 in range(timepoints): vol = data[:, :, :, t0] mask_tmp = vol > \ (_nanmean(vol) / self.inputs.global_threshold) mask[:, :, :, t0] = mask_tmp g[t0] = np.nansum(vol * mask_tmp)/np.nansum(mask_tmp) elif masktype == 'file': # uses a mask image to determine intensity maskimg = load(self.inputs.mask_file) mask = maskimg.get_data() affine = maskimg.get_affine() mask = mask > 0.5 for t0 in range(timepoints): vol = data[:, :, :, t0] g[t0] = _nanmean(vol[mask]) elif masktype == 'thresh': # uses a fixed signal threshold for t0 in range(timepoints): vol = data[:, :, :, t0] mask = vol > self.inputs.mask_threshold g[t0] = _nanmean(vol[mask]) else: mask = np.ones((x, y, z)) g = _nanmean(data[mask > 0, :], 1) # compute normalized intensity values gz = signal.detrend(g, axis=0) # detrend the signal if self.inputs.use_differences[1]: gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0) gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold) # read in motion parameters mc_in = np.loadtxt(motionfile) mc = deepcopy(mc_in) (artifactfile, intensityfile, statsfile, normfile, plotfile, displacementfile, maskfile) = self._get_output_filenames(imgfile, cwd) mask_img = Nifti1Image(mask.astype(np.uint8), affine) mask_img.to_filename(maskfile) if self.inputs.use_norm: brain_pts = None if self.inputs.bound_by_brainmask: voxel_coords = np.nonzero(mask) coords = np.vstack((voxel_coords[0], np.vstack((voxel_coords[1], voxel_coords[2])))).T brain_pts = np.dot(affine, np.hstack((coords, np.ones((coords.shape[0], 1)))).T) # calculate the norm of the motion parameters normval, displacement = _calc_norm(mc, self.inputs.use_differences[0], self.inputs.parameter_source, brain_pts=brain_pts) tidx = find_indices(normval > self.inputs.norm_threshold) ridx = find_indices(normval < 0) if displacement is not None: dmap = np.zeros((x, y, z, timepoints), dtype=np.float) for i in range(timepoints): dmap[voxel_coords[0], voxel_coords[1], voxel_coords[2], i] = displacement[i, :] dimg = Nifti1Image(dmap, affine) dimg.to_filename(displacementfile) else: if self.inputs.use_differences[0]: mc = np.concatenate((np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0) traval = mc[:, 0:3] # translation parameters (mm) rotval = mc[:, 3:6] # rotation parameters (rad) tidx = find_indices(np.sum(abs(traval) > self.inputs.translation_threshold, 1) > 0) ridx = find_indices(np.sum(abs(rotval) > self.inputs.rotation_threshold, 1) > 0) outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx))) # write output to outputfile np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ') np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ') if self.inputs.use_norm: np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ') if isdefined(self.inputs.save_plot) and self.inputs.save_plot: import matplotlib matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt fig = plt.figure() if isdefined(self.inputs.use_norm) and self.inputs.use_norm: plt.subplot(211) else: plt.subplot(311) self._plot_outliers_with_wave(gz, iidx, 'Intensity') if isdefined(self.inputs.use_norm) and self.inputs.use_norm: plt.subplot(212) self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx), 'Norm (mm)') else: diff = '' if self.inputs.use_differences[0]: diff = 'diff' plt.subplot(312) self._plot_outliers_with_wave(traval, tidx, 'Translation (mm)' + diff) plt.subplot(313) self._plot_outliers_with_wave(rotval, ridx, 'Rotation (rad)' + diff) plt.savefig(plotfile) plt.close(fig) motion_outliers = np.union1d(tidx, ridx) stats = [{'motion_file': motionfile, 'functional_file': imgfile}, {'common_outliers': len(np.intersect1d(iidx, motion_outliers)), 'intensity_outliers': len(np.setdiff1d(iidx, motion_outliers)), 'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)), }, {'motion': [{'using differences': self.inputs.use_differences[0]}, {'mean': np.mean(mc_in, axis=0).tolist(), 'min': np.min(mc_in, axis=0).tolist(), 'max': np.max(mc_in, axis=0).tolist(), 'std': np.std(mc_in, axis=0).tolist()}, ]}, {'intensity': [{'using differences': self.inputs.use_differences[1]}, {'mean': np.mean(gz, axis=0).tolist(), 'min': np.min(gz, axis=0).tolist(), 'max': np.max(gz, axis=0).tolist(), 'std': np.std(gz, axis=0).tolist()}, ]}, ] if self.inputs.use_norm: stats.insert(3, {'motion_norm': {'mean': np.mean(normval, axis=0).tolist(), 'min': np.min(normval, axis=0).tolist(), 'max': np.max(normval, axis=0).tolist(), 'std': np.std(normval, axis=0).tolist(), }}) save_json(statsfile, stats) def _run_interface(self, runtime): """Execute this module. """ funcfilelist = filename_to_list(self.inputs.realigned_files) motparamlist = filename_to_list(self.inputs.realignment_parameters) for i, imgf in enumerate(funcfilelist): self._detect_outliers_core(imgf, motparamlist[i], i, cwd=os.getcwd()) return runtime class StimCorrInputSpec(BaseInterfaceInputSpec): realignment_parameters = InputMultiPath(File(exists=True), mandatory=True, desc=('Names of realignment parameters corresponding to the functional ' 'data files')) intensity_values = InputMultiPath(File(exists=True), mandatory=True, desc='Name of file containing intensity values') spm_mat_file = File(exists=True, mandatory=True, desc='SPM mat file (use pre-estimate SPM.mat file)') concatenated_design = traits.Bool(mandatory=True, desc='state if the design matrix contains concatenated sessions') class StimCorrOutputSpec(TraitedSpec): stimcorr_files = OutputMultiPath(File(exists=True), desc='List of files containing correlation values') class StimulusCorrelation(BaseInterface): """Determines if stimuli are correlated with motion or intensity parameters. Currently this class supports an SPM generated design matrix and requires intensity parameters. This implies that one must run :ref:`ArtifactDetect ` and :ref:`Level1Design ` prior to running this or provide an SPM.mat file and intensity parameters through some other means. Examples -------- >>> sc = StimulusCorrelation() >>> sc.inputs.realignment_parameters = 'functional.par' >>> sc.inputs.intensity_values = 'functional.rms' >>> sc.inputs.spm_mat_file = 'SPM.mat' >>> sc.inputs.concatenated_design = False >>> sc.run() # doctest: +SKIP """ input_spec = StimCorrInputSpec output_spec = StimCorrOutputSpec def _get_output_filenames(self, motionfile, output_dir): """Generate output files based on motion filenames Parameters ---------- motionfile: file/string Filename for motion parameter file output_dir: string output directory in which the files will be generated """ (_, filename) = os.path.split(motionfile) (filename, _) = os.path.splitext(filename) corrfile = os.path.join(output_dir, ''.join(('qa.', filename, '_stimcorr.txt'))) return corrfile def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None): """ Core routine for determining stimulus correlation """ if not cwd: cwd = os.getcwd() # read in motion parameters mc_in = np.loadtxt(motionfile) g_in = np.loadtxt(intensityfile) g_in.shape = g_in.shape[0], 1 dcol = designmatrix.shape[1] mccol = mc_in.shape[1] concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in)) cm = np.corrcoef(concat_matrix, rowvar=0) corrfile = self._get_output_filenames(motionfile, cwd) # write output to outputfile file = open(corrfile, 'w') file.write("Stats for:\n") file.write("Stimulus correlated motion:\n%s\n" % motionfile) for i in range(dcol): file.write("SCM.%d:" % i) for v in cm[i, dcol + np.arange(mccol)]: file.write(" %.2f" % v) file.write('\n') file.write("Stimulus correlated intensity:\n%s\n" % intensityfile) for i in range(dcol): file.write("SCI.%d: %.2f\n" % (i, cm[i, -1])) file.close() def _get_spm_submatrix(self, spmmat, sessidx, rows=None): """ Parameters ---------- spmmat: scipy matlab object full SPM.mat file loaded into a scipy object sessidx: int index to session that needs to be extracted. """ designmatrix = spmmat['SPM'][0][0].xX[0][0].X U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0] if rows is None: rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1 cols = spmmat['SPM'][0][0].Sess[0][sessidx].col[0][range(len(U))] - 1 outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(), axis=1) return outmatrix def _run_interface(self, runtime): """Execute this module. """ motparamlist = self.inputs.realignment_parameters intensityfiles = self.inputs.intensity_values spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) nrows = [] for i in range(len(motparamlist)): sessidx = i rows = None if self.inputs.concatenated_design: sessidx = 0 mc_in = np.loadtxt(motparamlist[i]) rows = np.sum(nrows) + np.arange(mc_in.shape[0]) nrows.append(mc_in.shape[0]) matrix = self._get_spm_submatrix(spmmat, sessidx, rows) self._stimcorr_core(motparamlist[i], intensityfiles[i], matrix, os.getcwd()) return runtime def _list_outputs(self): outputs = self._outputs().get() files = [] for i, f in enumerate(self.inputs.realignment_parameters): files.insert(i, self._get_output_filenames(f, os.getcwd())) if files: outputs['stimcorr_files'] = files return outputs nipype-0.9.2/nipype/algorithms/setup.py000066400000000000000000000007171227300005300202120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('algorithms', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/algorithms/tests/000077500000000000000000000000001227300005300176355ustar00rootroot00000000000000nipype-0.9.2/nipype/algorithms/tests/test_auto_AddCSVColumn.py000066400000000000000000000015111227300005300245160ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import AddCSVColumn def test_AddCSVColumn_inputs(): input_map = dict(extra_column_heading=dict(), extra_field=dict(), in_file=dict(mandatory=True, ), out_file=dict(usedefault=True, ), ) inputs = AddCSVColumn.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AddCSVColumn_outputs(): output_map = dict(csv_file=dict(), ) outputs = AddCSVColumn.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_ArtifactDetect.py000066400000000000000000000035201227300005300251640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.rapidart import ArtifactDetect def test_ArtifactDetect_inputs(): input_map = dict(bound_by_brainmask=dict(usedefault=True, ), global_threshold=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), intersect_mask=dict(), mask_file=dict(), mask_threshold=dict(), mask_type=dict(mandatory=True, ), norm_threshold=dict(mandatory=True, xor=['rotation_threshold', 'translation_threshold'], ), parameter_source=dict(mandatory=True, ), plot_type=dict(usedefault=True, ), realigned_files=dict(mandatory=True, ), realignment_parameters=dict(mandatory=True, ), rotation_threshold=dict(mandatory=True, xor=['norm_threshold'], ), save_plot=dict(usedefault=True, ), translation_threshold=dict(mandatory=True, xor=['norm_threshold'], ), use_differences=dict(maxlen=2, minlen=2, usedefault=True, ), use_norm=dict(requires=['norm_threshold'], usedefault=True, ), zintensity_threshold=dict(mandatory=True, ), ) inputs = ArtifactDetect.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ArtifactDetect_outputs(): output_map = dict(displacement_files=dict(), intensity_files=dict(), mask_files=dict(), norm_files=dict(), outlier_files=dict(), plot_files=dict(), statistic_files=dict(), ) outputs = ArtifactDetect.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py000066400000000000000000000015321227300005300275640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import CalculateNormalizedMoments def test_CalculateNormalizedMoments_inputs(): input_map = dict(moment=dict(mandatory=True, ), timeseries_file=dict(mandatory=True, ), ) inputs = CalculateNormalizedMoments.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CalculateNormalizedMoments_outputs(): output_map = dict(moments=dict(), ) outputs = CalculateNormalizedMoments.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_CreateNifti.py000066400000000000000000000015471227300005300245020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import CreateNifti def test_CreateNifti_inputs(): input_map = dict(affine=dict(), data_file=dict(mandatory=True, ), header_file=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), ) inputs = CreateNifti.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CreateNifti_outputs(): output_map = dict(nifti_file=dict(), ) outputs = CreateNifti.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_Distance.py000066400000000000000000000016711227300005300240350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import Distance def test_Distance_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), mask_volume=dict(), method=dict(usedefault=True, ), volume1=dict(mandatory=True, ), volume2=dict(mandatory=True, ), ) inputs = Distance.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Distance_outputs(): output_map = dict(distance=dict(), histogram=dict(), point1=dict(), point2=dict(), ) outputs = Distance.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_FuzzyOverlap.py000066400000000000000000000017661227300005300247700ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import FuzzyOverlap def test_FuzzyOverlap_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_ref=dict(mandatory=True, ), in_tst=dict(mandatory=True, ), out_file=dict(usedefault=True, ), weighting=dict(usedefault=True, ), ) inputs = FuzzyOverlap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FuzzyOverlap_outputs(): output_map = dict(class_fdi=dict(), class_fji=dict(), dice=dict(), diff_file=dict(), jaccard=dict(), ) outputs = FuzzyOverlap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_Gunzip.py000066400000000000000000000014131227300005300235510ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import Gunzip def test_Gunzip_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(mandatory=True, ), ) inputs = Gunzip.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Gunzip_outputs(): output_map = dict(out_file=dict(), ) outputs = Gunzip.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_ICC.py000066400000000000000000000015411227300005300226750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.icc import ICC def test_ICC_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), mask=dict(mandatory=True, ), subjects_sessions=dict(mandatory=True, ), ) inputs = ICC.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ICC_outputs(): output_map = dict(icc_map=dict(), session_var_map=dict(), subject_var_map=dict(), ) outputs = ICC.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_Matlab2CSV.py000066400000000000000000000014151227300005300241350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import Matlab2CSV def test_Matlab2CSV_inputs(): input_map = dict(in_file=dict(mandatory=True, ), reshape_matrix=dict(usedefault=True, ), ) inputs = Matlab2CSV.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Matlab2CSV_outputs(): output_map = dict(csv_files=dict(), ) outputs = Matlab2CSV.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_MergeCSVFiles.py000066400000000000000000000016671227300005300247060ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import MergeCSVFiles def test_MergeCSVFiles_inputs(): input_map = dict(column_headings=dict(), extra_column_heading=dict(), extra_field=dict(), in_files=dict(mandatory=True, ), out_file=dict(usedefault=True, ), row_heading_title=dict(usedefault=True, ), row_headings=dict(), ) inputs = MergeCSVFiles.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MergeCSVFiles_outputs(): output_map = dict(csv_file=dict(), ) outputs = MergeCSVFiles.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_ModifyAffine.py000066400000000000000000000015531227300005300246420ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import ModifyAffine def test_ModifyAffine_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), transformation_matrix=dict(usedefault=True, ), volumes=dict(mandatory=True, ), ) inputs = ModifyAffine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ModifyAffine_outputs(): output_map = dict(transformed_volumes=dict(), ) outputs = ModifyAffine.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_Overlap.py000066400000000000000000000016761227300005300237200ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import Overlap def test_Overlap_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), mask_volume=dict(), out_file=dict(usedefault=True, ), volume1=dict(mandatory=True, ), volume2=dict(mandatory=True, ), ) inputs = Overlap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Overlap_outputs(): output_map = dict(dice=dict(), diff_file=dict(), jaccard=dict(), volume_difference=dict(), ) outputs = Overlap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_P2PDistance.py000066400000000000000000000015711227300005300243560ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.mesh import P2PDistance def test_P2PDistance_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), surface1=dict(mandatory=True, ), surface2=dict(mandatory=True, ), weighting=dict(usedefault=True, ), ) inputs = P2PDistance.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_P2PDistance_outputs(): output_map = dict(distance=dict(), ) outputs = P2PDistance.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_PickAtlas.py000066400000000000000000000016551227300005300241600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import PickAtlas def test_PickAtlas_inputs(): input_map = dict(atlas=dict(mandatory=True, ), dilation_size=dict(usedefault=True, ), hemi=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), labels=dict(mandatory=True, ), output_file=dict(), ) inputs = PickAtlas.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PickAtlas_outputs(): output_map = dict(mask_file=dict(), ) outputs = PickAtlas.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_SimpleThreshold.py000066400000000000000000000015551227300005300254120ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import SimpleThreshold def test_SimpleThreshold_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), threshold=dict(mandatory=True, ), volumes=dict(mandatory=True, ), ) inputs = SimpleThreshold.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SimpleThreshold_outputs(): output_map = dict(thresholded_volumes=dict(), ) outputs = SimpleThreshold.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_SpecifyModel.py000066400000000000000000000023221227300005300246600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.modelgen import SpecifyModel def test_SpecifyModel_inputs(): input_map = dict(event_files=dict(mandatory=True, xor=['subject_info'], ), functional_runs=dict(copyfile=False, mandatory=True, ), high_pass_filter_cutoff=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_units=dict(mandatory=True, ), outlier_files=dict(copyfile=False, ), realignment_parameters=dict(copyfile=False, ), subject_info=dict(mandatory=True, xor=['event_files'], ), time_repetition=dict(mandatory=True, ), ) inputs = SpecifyModel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SpecifyModel_outputs(): output_map = dict(session_info=dict(), ) outputs = SpecifyModel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_SpecifySPMModel.py000066400000000000000000000025011227300005300252370ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.modelgen import SpecifySPMModel def test_SpecifySPMModel_inputs(): input_map = dict(concatenate_runs=dict(usedefault=True, ), event_files=dict(mandatory=True, xor=['subject_info'], ), functional_runs=dict(copyfile=False, mandatory=True, ), high_pass_filter_cutoff=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_units=dict(mandatory=True, ), outlier_files=dict(copyfile=False, ), output_units=dict(usedefault=True, ), realignment_parameters=dict(copyfile=False, ), subject_info=dict(mandatory=True, xor=['event_files'], ), time_repetition=dict(mandatory=True, ), ) inputs = SpecifySPMModel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SpecifySPMModel_outputs(): output_map = dict(session_info=dict(), ) outputs = SpecifySPMModel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_SpecifySparseModel.py000066400000000000000000000032071227300005300260410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.modelgen import SpecifySparseModel def test_SpecifySparseModel_inputs(): input_map = dict(event_files=dict(mandatory=True, xor=['subject_info'], ), functional_runs=dict(copyfile=False, mandatory=True, ), high_pass_filter_cutoff=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_units=dict(mandatory=True, ), model_hrf=dict(), outlier_files=dict(copyfile=False, ), realignment_parameters=dict(copyfile=False, ), save_plot=dict(), scale_regressors=dict(usedefault=True, ), scan_onset=dict(usedefault=True, ), stimuli_as_impulses=dict(usedefault=True, ), subject_info=dict(mandatory=True, xor=['event_files'], ), time_acquisition=dict(mandatory=True, ), time_repetition=dict(mandatory=True, ), use_temporal_deriv=dict(requires=['model_hrf'], ), volumes_in_cluster=dict(usedefault=True, ), ) inputs = SpecifySparseModel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SpecifySparseModel_outputs(): output_map = dict(session_info=dict(), sparse_png_file=dict(), sparse_svg_file=dict(), ) outputs = SpecifySparseModel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_StimulusCorrelation.py000066400000000000000000000017671227300005300263400ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.rapidart import StimulusCorrelation def test_StimulusCorrelation_inputs(): input_map = dict(concatenated_design=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), intensity_values=dict(mandatory=True, ), realignment_parameters=dict(mandatory=True, ), spm_mat_file=dict(mandatory=True, ), ) inputs = StimulusCorrelation.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_StimulusCorrelation_outputs(): output_map = dict(stimcorr_files=dict(), ) outputs = StimulusCorrelation.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_auto_TSNR.py000066400000000000000000000015441227300005300230700ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.algorithms.misc import TSNR def test_TSNR_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(mandatory=True, ), regress_poly=dict(), ) inputs = TSNR.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TSNR_outputs(): output_map = dict(detrended_file=dict(), mean_file=dict(), stddev_file=dict(), tsnr_file=dict(), ) outputs = TSNR.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/algorithms/tests/test_icc_anova.py000066400000000000000000000013311227300005300231660ustar00rootroot00000000000000import numpy as np from nipype.testing import assert_equal from nipype.algorithms.icc import ICC_rep_anova def test_ICC_rep_anova(): #see table 2 in P. E. Shrout & Joseph L. Fleiss (1979). "Intraclass Correlations: Uses in # Assessing Rater Reliability". Psychological Bulletin 86 (2): 420-428 Y = np.array([[9, 2, 5, 8], [6, 1, 3, 2], [8, 4, 6, 8], [7, 1, 2, 6], [10, 5, 6, 9], [6, 2, 4, 7]]) icc, r_var, e_var , _, dfc, dfe = ICC_rep_anova(Y) #see table 4 yield assert_equal, round(icc, 2), 0.71 yield assert_equal, dfc, 3 yield assert_equal, dfe, 15 yield assert_equal, r_var/(r_var + e_var), icc nipype-0.9.2/nipype/algorithms/tests/test_modelgen.py000066400000000000000000000127161227300005300230470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from copy import deepcopy import os from shutil import rmtree from tempfile import mkdtemp from nibabel import Nifti1Image import numpy as np from nipype.testing import (assert_equal, assert_raises, assert_almost_equal) from nipype.interfaces.base import Bunch, TraitError from nipype.algorithms.modelgen import (SpecifyModel, SpecifySparseModel, SpecifySPMModel) def test_modelgen1(): tempdir = mkdtemp() filename1 = os.path.join(tempdir, 'test1.nii') filename2 = os.path.join(tempdir, 'test2.nii') Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename1) Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename2) s = SpecifyModel() s.inputs.input_units = 'scans' set_output_units = lambda: setattr(s.inputs, 'output_units', 'scans') yield assert_raises, TraitError, set_output_units s.inputs.functional_runs = [filename1, filename2] s.inputs.time_repetition = 6 s.inputs.high_pass_filter_cutoff = 128. info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]], amplitudes=None, pmod=None, regressors=None, regressor_names=None, tmod=None), Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]], amplitudes=None, pmod=None, regressors=None, regressor_names=None, tmod=None)] s.inputs.subject_info = info res = s.run() yield assert_equal, len(res.outputs.session_info), 2 yield assert_equal, len(res.outputs.session_info[0]['regress']), 0 yield assert_equal, len(res.outputs.session_info[0]['cond']), 1 yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([12, 300, 600, 1080]) rmtree(tempdir) def test_modelgen_spm_concat(): tempdir = mkdtemp() filename1 = os.path.join(tempdir, 'test1.nii') filename2 = os.path.join(tempdir, 'test2.nii') Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename1) Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename2) s = SpecifySPMModel() s.inputs.input_units = 'secs' s.inputs.concatenate_runs = True setattr(s.inputs, 'output_units', 'secs') yield assert_equal, s.inputs.output_units, 'secs' s.inputs.functional_runs = [filename1, filename2] s.inputs.time_repetition = 6 s.inputs.high_pass_filter_cutoff = 128. info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 170]], durations=[[1]]), Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])] s.inputs.subject_info = deepcopy(info) res = s.run() yield assert_equal, len(res.outputs.session_info), 1 yield assert_equal, len(res.outputs.session_info[0]['regress']), 1 yield assert_equal, np.sum(res.outputs.session_info[0]['regress'][0]['val']), 30 yield assert_equal, len(res.outputs.session_info[0]['cond']), 1 yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) setattr(s.inputs, 'output_units', 'scans') yield assert_equal, s.inputs.output_units, 'scans' s.inputs.subject_info = deepcopy(info) res = s.run() yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])/6 s.inputs.concatenate_runs = False s.inputs.subject_info = deepcopy(info) s.inputs.output_units = 'secs' res = s.run() yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0]) rmtree(tempdir) def test_modelgen_sparse(): tempdir = mkdtemp() filename1 = os.path.join(tempdir, 'test1.nii') filename2 = os.path.join(tempdir, 'test2.nii') Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename1) Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename2) s = SpecifySparseModel() s.inputs.input_units = 'secs' s.inputs.functional_runs = [filename1, filename2] s.inputs.time_repetition = 6 info = [Bunch(conditions=['cond1'], onsets=[[0, 50, 100, 180]], durations=[[2]]), Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])] s.inputs.subject_info = info s.inputs.volumes_in_cluster = 1 s.inputs.time_acquisition = 2 s.inputs.high_pass_filter_cutoff = np.inf res = s.run() yield assert_equal, len(res.outputs.session_info), 2 yield assert_equal, len(res.outputs.session_info[0]['regress']), 1 yield assert_equal, len(res.outputs.session_info[0]['cond']), 0 s.inputs.stimuli_as_impulses = False res = s.run() yield assert_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 1.0 s.inputs.model_hrf = True res = s.run() yield assert_almost_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384 yield assert_equal, len(res.outputs.session_info[0]['regress']), 1 s.inputs.use_temporal_deriv = True res = s.run() yield assert_equal, len(res.outputs.session_info[0]['regress']), 2 yield assert_almost_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384 yield assert_almost_equal, res.outputs.session_info[1]['regress'][1]['val'][5], 0.007671459162258378 rmtree(tempdir) nipype-0.9.2/nipype/algorithms/tests/test_moments.py000066400000000000000000000322441227300005300227350ustar00rootroot00000000000000import numpy as np from nipype.testing import assert_true import tempfile from nipype.algorithms.misc import calc_moments def test_skew(): data = """14.62418305 5.916396751 -1.658088086 4.71113546 1.598428608 5.612553811 -5.004056368 -4.057513911 11.16365251 17.32688599 -3.099920667 2.630189741 2.389709914 0.379332731 -0.2899694205 -4.363591482 2.059205599 23.90705054 0.7180462297 -1.976963652 7.487682025 -5.583986129 1.094800525 -2.319858134 -1.907579712 22.08277347 4.595575886 -3.869054671 8.214834769 -3.442156385 2.428766374 0.7736184662 0.6535290043 14.1320384 0.9458768261 -2.577892846 -0.8925440241 3.177128674 6.048546332 1.736059675 3.149271524 8.106285467 -6.173280371 -0.5146958863 -11.83574747 4.066575201 9.160589786 0.1680632718 3.089673173 8.736851925 -5.624227736 1.386441126 -12.58621755 -0.726443824 8.036414499 -0.3318169666 2.685349599 9.968755255 2.965603277 2.634928414 -3.783441929 -1.858587372 3.238274675 2.594880211 0.870577208 2.323455904 7.840351954 1.635436162 2.451630603 2.834494164 -1.384081764 5.840475644 -4.421008251 -12.78755879 2.985581265 -1.609381512 -0.1816579797 5.448215202 -2.855889998 5.041186537 -8.502455278 -22.66799593 -3.964218147 -4.180363107 -5.061764789 2.439737668 -0.9988071581 1.437142327 -5.355058719 -19.00567875 -4.803737548 -3.884369973 -4.977945181 -0.4758749938 1.894453988 0.003263759218 1.29682909 -8.295173365 -1.51226274 -1.611159469 -2.5403281 -0.2155584519 2.597114132 1.16528519 3.162947556 -3.093405654 0.4782790153 1.015061011 -2.755821487 -1.015685899 0.1402527399 0.05435017236 0.9158883917 -6.679241736 0.9376568982 3.175011335 -2.712383777 -3.836563374 -2.270503748 -4.593165145 0.5468675209 -11.14130502 1.420140475 3.506045445 2.777240829 -3.14187819 -0.7823285883 -6.84663074 -0.5754863055 -9.638785593 0.2926825231 1.039079149 9.613209645 1.300380032 3.755092776 -2.30881605 -9.12095608 -5.422145216 -3.089096046 -1.913969236 8.36828235 1.622740946 6.756285589 4.803793558 -18.6459149 -5.677906762 -4.447399529 -1.826561667 -1.179681537 -3.51737806 6.062770694 7.743917051 -14.12032005 -9.346953111 -0.3927872312 0.5116398162 -8.814603334 -4.191932775 3.735940996 5.926107194 3.984986352 -7.490234063 5.101302343 0.6359344324 -8.098435707 3.372259941 1.603560776 2.787631701 16.74369044 2.523688856 4.825375014 -2.888386026 -2.929939078 7.41176576 -0.9444665519 -0.5476924783 13.0864062 10.44887074 -2.409155335 -6.466987193 2.038766622 -0.9844478726 -3.872608358 -3.903240663 3.888161509 7.356308659 -9.783752602 -6.593576679 7.785360016 -11.59798121 -5.359996968 -4.646576281 2.919034842 0.4926039084 -9.765686304 -3.169484175 13.3885185 -10.00053277 -5.284251069 -1.953467094 7.762685816 3.138596183 -2.417670781 2.087535944 12.09072814 0.3201456619 -5.986630196 -0.393473785 8.598656701 12.64638617 4.32929224 6.665685612 2.52013659 4.924021467 -7.729146671 -2.531538284 4.286211902 12.70121508 4.197284784 7.586579174 -4.511459665 1.039992021 -7.200406996 -2.678018972 -0.206805413 -1.118395095 1.251956053 4.927663964 -0.3269306726 -1.614001868 -2.858296125 3.708027659 -3.615745533 -13.26040515 4.163662563 3.376525012 6.876574727 1.021356663 1.813515644 9.401028448 -6.392625018 -11.19412506 11.70010341 5.557449086 3.188483207 3.033109557 3.108015432 5.00732323 -5.697688304 -1.564055358 12.53451981 6.641295722 -9.330508253 1.60952695 1.985401431 -4.635334005 -0.4739120366 5.308731294 3.209488234 1.907340382 -15.26443399 1.262158357 1.288838724 -6.54661201 3.733208755 11.99608217 -4.121352088 -3.787629919 -8.977806581 3.760241115 1.048439633 -0.2497259139 1.633682769 21.98252106 0.008457593931 -2.863301753 -1.475378656 4.854200462 -0.156717616 2.028483989 -4.262983941 24.73198623 6.529712692 1.286325062 -1.857794734 2.962236297 -1.586154566 -3.6478191 -7.502330557 10.60931417 2.397686502 -1.56459968 -4.721655517 2.006857078 -1.490344215 -7.044842318 -5.198726771 -8.273929595 -7.6127574 -11.03862432 -1.592101433 3.747829535 -0.06779667515 -2.412618507 0.7334095101 -11.76661769 -9.165804187 -14.81298889 5.36362746 4.955331255 1.673488979 2.0899358 5.517823916 -1.529874203 -2.421802273 -6.947139589 8.366593034 3.55375893 4.03335273 -0.05524186477 1.474077483 2.649817521 7.255506458 6.068405441 -2.220943179 -0.6343270953 1.382522916 -2.748044018 -6.776840898 2.855345278 -3.570626044 1.654853143 -2.838161622 0.755210647 7.252510904 1.235575241 -14.86022341 -0.8943548346 -10.36165869 -1.966680076 -3.641426564 -3.670112785 8.644955043 6.859610046 -7.145239483 -0.1458937017 -3.867994525 -0.9484554762 -2.48227248 -8.36071796 2.539637492 5.399990929 8.804929045 1.925551314 3.240568033 1.273961559 2.104351411 -6.141864838 -5.255423549 -0.7896387751 9.735755254 -1.862844212 -2.552156104 -0.3902178948 5.745817797 -1.515932976 -8.546922674 -3.440929455 -5.837957148 -8.226266393 -13.20837554 -4.385043051 2.553090991 -4.209818986 -8.331176217 -1.707250641 -12.64051676 -8.2399894 -12.76990779 -5.960467624 -4.294427772 -10.92374675 -8.6902905 0.3421994093 1.17028221 -1.953361346 -2.607159313 -4.896369845 -4.519583123 -8.055510792 -9.019182555 3.36412153 14.48433641 2.152584104 3.178007658 -3.9792054 3.873546228 5.321306118 -5.445499499 8.684509027 8.116988393 0.4683619278 1.046001596 -3.128586059 10.0250152 12.58326776 1.447856102 10.18164703 -4.706381289 -1.788728553 0.6563335204 -0.5831451131 5.744824049 3.988876139 5.65836796 2.189197844 -2.76704126 -0.495980308 6.533235978 2.372759856 -2.792331174 -7.896310272 3.502571539 -8.556072249 8.315654337 0.7043190657 11.38508989 2.565286445 -5.081739754 -6.900720718 -1.667312154 -10.59024727 9.909297104 -2.934946689 8.968652164 -0.5610029798 -0.6957945725 3.815352939 -4.277309457 -4.346939024 3.809478921 -8.178727502 2.78966603 -4.568498377 3.295953611 9.457549108 -2.931773943 -0.04922082646 4.940986376 -6.906199411 -0.6726182267 -6.550149966 3.251783239 6.324220141 0.1496185048 -1.7701633 10.55846735 1.720423345 -0.02248084003 -4.475053837 0.3943175795 3.615274407 3.17786214 -4.661015894 5.164991215 7.975239079 2.030845129 1.259865261 -3.543706118 6.424886561 5.257164014 -5.686755714 -7.85360929 4.585684687 2.641661508 6.399259194 -5.791994946 9.620021677 5.059618162 -5.841773643 -7.887333445 -1.663863126 0.531225876 6.442066641 -2.580841985 8.356612294 2.609875283 -3.391732494 7.467417207 0.7346301535 -2.719728468 2.822035284 4.54698989 4.221046784 0.791568596 3.728706407 14.76100347 9.382305581 -3.17219641 1.381585183 7.754995237 -0.3908054543 1.355349478 9.807914939 0.1267792801 9.818588278 0.5608772817 3.633460684 3.711951896 -5.421004876 1.162611597 7.001274262 -19.35283277 -2.103358718 4.16130701 4.67192889 -0.8231375514 -8.81474386 -2.846417531 -1.268859264 -20.80038431 -11.76135621 2.944594891 1.64388247 -0.1668629943 -6.707442921 -6.544901517 -3.830974298 -5.592905106 -6.057725588 -1.233208621 -1.339964983 0.7299911265 -0.7530015377 -3.117175727 1.142381884 7.890421323 8.119524766 -2.606602104 0.007101965698 -4.473969864 1.35750371 5.357618774 4.161238035 9.600782899 14.52365435 0.1990637024 3.403466406 -11.59507852 -3.675154543 8.718678202 0.7825822225 3.703846665 8.748127367 3.135332804 4.127582534 -12.38852274 -9.447080613 3.417599727 -1.915488323 -3.011725724 -0.5381126202 3.567929983 2.184591464 -7.411651508 -9.252946446 -1.827784625 1.560496584 -7.142629796 -5.355184696 3.289780212 1.113331632 -3.105505654 -5.606446238 0.1961208934 6.334603712 -6.659543803 -4.245675975 3.726757782 1.953178495 -0.7484610023 -4.426403774 3.716311729 6.200735049 -1.643440395 0.7536090906 2.509268017 2.15471156 2.374200456 -3.774138064 -0.1428981969 2.646676328 3.686406766 4.827058909 -2.458101484 -0.39559615 5.082577298 3.167157352 -8.147321924 -0.03506891856 4.407495284 2.5606793 -8.149493446 -4.632729429 4.938050013 14.56344531 -9.374945991 -1.3893417 -0.1687177084 -4.106757231 -9.343602374 -7.415904922 4.749022091 18.81314153 -1.749200795 -2.02566815 -6.507688641 -6.001538055 -6.108916568 -6.784929595 7.21051134 10.59847744 5.776257506 -0.4990570991 -9.820082348 -0.5741078285 -4.687969138 -4.377866052 7.40862329 -0.06470407472 6.857336593 2.745243336 -7.04365894 2.689020958 -8.804350547 -3.506610093 0.5732906688 -1.771827007 4.332768659 3.537426733 -0.4346222942 -2.295147419 -12.91289393 -3.95705062 -7.130741497 1.478867856 2.340197798 -0.2224791818 2.355519667 -7.446912611 -8.580935982 -1.515500603 -6.545362285 -2.460234117 0.4822626914 -5.261252431 -3.230857748 -4.456435972 3.105258325 4.868182005 -0.3155725672 -12.9461276 -1.81314629 -7.915543953 -10.61694158 1.023409988 11.23695246 9.13393953 2.080132446 -15.68433051 -2.452603277 -8.067702457 -8.952785439 0.3914623321 9.072213866 5.788054925 0.5661677477 -4.862572943 -1.253393229 -6.497656047 1.825216246 -2.868761361 2.684946057 -1.702605515 2.524615008 6.658427102 -1.464383881 -3.333412097 10.52499456 -1.807928838 1.602770946 -5.693835167 7.025193015 6.172728664 -3.989160551 -0.7754719889 10.83430082 0.3010957187 5.703164372 -4.7215044 5.747620411 -0.6137370397 -5.393253651 -1.967790019 9.084992271 -1.297359974 7.313272774 -2.919262371 -0.341939585 -0.488964096 -3.962652217 -5.129527247 11.86896398 -0.4901633845 3.193953846 -1.811431925 -0.3604987261 6.192234507 -2.348495577 -4.159036411 14.81736012 7.870835671 -2.04922723 0.122245812 7.807201578 8.435263453 -1.994392703 2.494961459 10.99679669 13.62141018 -3.175917696 1.68947873 12.43613872 4.131979444 -0.8035598171 8.583091116 3.538171963 6.008200439 0.5876902994 0.4403643142 6.183013749 2.012581919 1.090536757 8.392496526 0.5460594103 -6.259032909 6.647104433 -1.43557129 -3.452884137 4.366160275 -0.2274303705 3.900139848 1.772017802 -8.109091085 10.50095909 -0.1621391129 -7.608906136 2.481208401 -4.509906047 0.7763248812 0.606115406 -2.603199426 7.692974034 2.104967053 -8.226098406 -6.837921596 -4.561655055 1.015397953 -2.978847372 -2.385761908 -0.8339871055 0.6707971346 -9.874595181 -13.39338209 3.157380259 2.413897035 -2.985013991 -5.160444086 -7.29279473 -2.371762765 -10.03622895 -9.34912711 10.97609581 2.654665151 -1.068091568 -0.2479914452 -6.107351633 -0.9239821871 -5.835733231 -2.189236707 9.811317248 1.508754364 -6.520427038 7.430392097 -1.95095948 4.15525371 -2.032963385 -2.693509918 2.091753969 0.4782648423 -18.09299987 4.740223292 -2.838854108 6.118069011 -3.664423954 -7.91518773 -2.533067915 1.120941519 -19.32711056 -3.231687054 -8.04776777 3.689162869 -6.952885159 -6.854774161 -1.172264884 2.581894309 -2.203996345 -0.5339747203 -10.27858531 1.833505163 -5.406679162 1.678416611 0.871971912 1.837113402 15.60657966 8.749980935 -7.560269196 1.70515063 0.1003191195 8.04135078 1.044572756 -1.582641946 12.19564564 5.273436246 -4.367517279 -0.0400759142 4.431313549 7.067826794 2.741622337 -3.458851463 -6.44120462 -9.849684434 -1.946651925 -2.183436603 6.686267514 4.016449169 6.302612811 -0.9698019507 -13.80878408 -13.92578887 3.071419193 -0.156449455 8.551444945 4.051266929 5.541317929 1.901010931 -1.084801367 -1.267516734 9.774222689 3.461150291 8.195043157 4.77412064 -2.223359889 0.07143463336 11.95939854 7.195316999 11.93418631 1.472618288 3.247038347 2.656123844 -9.091445458 -4.097157466 -2.752420619 -1.103781682 -3.382675846 -3.9326499 0.3168555978 -2.600573426 -9.409987851 -1.564842317 -11.68718367 -12.62978052 -7.436711849 -11.05071165 -4.535693861 -4.973062537 -9.154275121 -0.8478464554 -11.1129098 -8.014294516 -5.818564146 -6.557508409 -4.920322355 -2.444494132 -0.762850219 -1.035995467 -0.1942650118 5.507757423 -0.6713848498 2.045539379 0.2907563314 2.654730384 5.268838031 -2.711154892 6.638825325 9.118078409 2.220738816 5.875202986 0.6059672284 -5.305207318 -0.08004872831 -2.950039659 12.18704972 0.6256114468 2.352153233 8.701077613 4.804756766 -6.163162012 -1.779998967 -6.493561445 4.442326811 -15.10908307 4.919949591 3.969210961 7.004029439 0.1398435001 -4.659976897 -3.899267451 -7.594265524 -20.77328745 5.94521557 -2.385814065 3.224509406 8.943882025 -3.270587613 3.470325906 -8.696673766 -12.29052026 -0.3763403003 -5.55470641 -3.51572569 12.51259902 3.753517263 8.67338497 -0.5057854071 -2.415896554 -9.663571931 -5.714041661 -6.037933426 8.673756933 10.03557773 8.629816199 3.622185659 0.4716627142 -10.92515308 -3.705286841 -2.776089545 2.271920902 9.251504922 5.744980887 """ with tempfile.NamedTemporaryFile(delete=True) as f: f.write(data) f.flush() skewness = calc_moments(f.name, 3) yield assert_true, np.allclose(skewness, np.array( [-0.23418937314622, 0.2946365564954823, -0.05781002053540932, -0.3512508282578762, - 0.07035664150233077, - 0.01935867699166935, 0.00483863369427428, 0.21879460029850167])) nipype-0.9.2/nipype/algorithms/tests/test_rapidart.py000066400000000000000000000065401227300005300230610ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from nipype.testing import (assert_equal, assert_false, assert_true, assert_almost_equal) import nipype.algorithms.rapidart as ra from nipype.interfaces.base import Bunch import numpy as np def test_ad_init(): ad = ra.ArtifactDetect(use_differences=[True, False]) yield assert_true, ad.inputs.use_differences[0] yield assert_false, ad.inputs.use_differences[1] def test_ad_output_filenames(): ad = ra.ArtifactDetect() outputdir = '/tmp' f = 'motion.nii' (outlierfile, intensityfile, statsfile, normfile, plotfile, displacementfile, maskfile) = ad._get_output_filenames(f, outputdir) yield assert_equal, outlierfile, '/tmp/art.motion_outliers.txt' yield assert_equal, intensityfile, '/tmp/global_intensity.motion.txt' yield assert_equal, statsfile, '/tmp/stats.motion.txt' yield assert_equal, normfile, '/tmp/norm.motion.txt' yield assert_equal, plotfile, '/tmp/plot.motion.png' yield assert_equal, displacementfile, '/tmp/disp.motion.nii' yield assert_equal, maskfile, '/tmp/mask.motion.nii' def test_ad_get_affine_matrix(): matrix = ra._get_affine_matrix(np.array([0]), 'SPM') yield assert_equal, matrix, np.eye(4) # test translation params = [1, 2, 3] matrix = ra._get_affine_matrix(params, 'SPM') out = np.eye(4) out[0:3, 3] = params yield assert_equal, matrix, out # test rotation params = np.array([0, 0, 0, np.pi / 2, np.pi / 2, np.pi / 2]) matrix = ra._get_affine_matrix(params, 'SPM') out = np.array([0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1]).reshape((4, 4)) yield assert_almost_equal, matrix, out # test scaling params = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3]) matrix = ra._get_affine_matrix(params, 'SPM') out = np.array([1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1]).reshape((4, 4)) yield assert_equal, matrix, out # test shear params = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3]) matrix = ra._get_affine_matrix(params, 'SPM') out = np.array([1, 1, 2, 0, 0, 1, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape((4, 4)) yield assert_equal, matrix, out def test_ad_get_norm(): params = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, np.pi / 4, np.pi / 4, np.pi / 4, 0, 0, 0, -np.pi / 4, -np.pi / 4, -np.pi / 4]).reshape((3, 6)) norm, _ = ra._calc_norm(params, False, 'SPM') yield assert_almost_equal, norm, np.array([18.86436316, 37.74610158, 31.29780829]) norm, _ = ra._calc_norm(params, True, 'SPM') yield assert_almost_equal, norm, np.array([0., 143.72192614, 173.92527131]) def test_sc_init(): sc = ra.StimulusCorrelation(concatenated_design=True) yield assert_true, sc.inputs.concatenated_design def test_sc_populate_inputs(): sc = ra.StimulusCorrelation() inputs = Bunch(realignment_parameters=None, intensity_values=None, spm_mat_file=None, concatenated_design=None) yield assert_equal, sc.inputs.__dict__.keys(), inputs.__dict__.keys() def test_sc_output_filenames(): sc = ra.StimulusCorrelation() outputdir = '/tmp' f = 'motion.nii' corrfile = sc._get_output_filenames(f, outputdir) yield assert_equal, corrfile, '/tmp/qa.motion_stimcorr.txt' nipype-0.9.2/nipype/caching/000077500000000000000000000000001227300005300157165ustar00rootroot00000000000000nipype-0.9.2/nipype/caching/__init__.py000066400000000000000000000000521227300005300200240ustar00rootroot00000000000000from nipype.caching.memory import Memory nipype-0.9.2/nipype/caching/memory.py000066400000000000000000000246731227300005300176140ustar00rootroot00000000000000""" Using nipype with persistence and lazy recomputation but without explicit name-steps pipeline: getting back scope in command-line based programming. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ import os import hashlib import pickle import time import shutil import glob from nipype.interfaces.base import BaseInterface from nipype.pipeline.engine import Node from nipype.pipeline.utils import modify_paths ################################################################################ # PipeFunc object: callable interface to nipype.interface objects class PipeFunc(object): """ Callable interface to nipype.interface objects Use this to wrap nipype.interface object and call them specifying their input with keyword arguments:: fsl_merge = PipeFunc(fsl.Merge, base_dir='.') out = fsl_merge(in_files=files, dimension='t') """ def __init__(self, interface, base_dir, callback=None): """ Parameters =========== interface: a nipype interface class The interface class to wrap base_dir: a string The directory in which the computation will be stored callback: a callable An optional callable called each time after the function is called. """ if not (isinstance(interface, type) and issubclass(interface, BaseInterface)): raise ValueError('the interface argument should be a nipype ' 'interface class, but %s (type %s) was passed.' % (interface, type(interface))) self.interface = interface base_dir = os.path.abspath(base_dir) if not os.path.exists(base_dir) and os.path.isdir(base_dir): raise ValueError('base_dir should be an existing directory') self.base_dir = base_dir doc = '%s\n%s' % (self.interface.__doc__, self.interface.help(returnhelp=True)) self.__doc__ = doc self.callback = callback def __call__(self, **kwargs): kwargs = modify_paths(kwargs, relative=False) interface = self.interface() # Set the inputs early to get some argument checking interface.inputs.set(**kwargs) # Make a name for our node inputs = interface.inputs.get_hashval() hasher = hashlib.new('md5') hasher.update(pickle.dumps(inputs)) dir_name = '%s-%s' % (interface.__class__.__module__.replace('.', '-'), interface.__class__.__name__) job_name = hasher.hexdigest() node = Node(interface, name=job_name) node.base_dir = os.path.join(self.base_dir, dir_name) out = node.run() if self.callback is not None: self.callback(dir_name, job_name) return out def __repr__(self): return '%s(%s.%s, base_dir=%s)' % (self.__class__.__name__, self.interface.__module__, self.interface.__name__, self.base_dir) ################################################################################ # Memory manager: provide some tracking about what is computed when, to # be able to flush the disk def read_log(filename, run_dict=None): if run_dict is None: run_dict = dict() for line in open(filename, 'r'): dir_name, job_name = line[:-1].split('/') jobs = run_dict.get(dir_name, set()) jobs.add(job_name) run_dict[dir_name] = jobs return run_dict def rm_all_but(base_dir, dirs_to_keep, warn=False): """ Remove all the sub-directories of base_dir, but those listed Parameters ============ base_dir: string The base directory dirs_to_keep: set The names of the directories to keep """ try: all_dirs = os.listdir(base_dir) except OSError: "Dir has been deleted" return all_dirs = [d for d in all_dirs if not d.startswith('log.')] dirs_to_rm = list(dirs_to_keep.symmetric_difference(all_dirs)) for dir_name in dirs_to_rm: dir_name = os.path.join(base_dir, dir_name) if os.path.exists(dir_name): if warn: print 'removing directory: %s' % dir_name shutil.rmtree(dir_name) class _MemoryCallback(object): "An object to avoid closures and have everything pickle" def __init__(self, memory): self.memory = memory def __call__(self, dir_name, job_name): self.memory._log_name(dir_name, job_name) class Memory(object): """ Memory context to provide caching for interfaces Parameters ========== base_dir: string The directory name of the location for the caching Methods ======= cache Creates a cacheable function from an nipype Interface class clear_previous_runs Removes from the disk all the runs that where not used after the creation time of the specific Memory instance clear_previous_runs Removes from the disk all the runs that where not used after the given time """ def __init__(self, base_dir): base_dir = os.path.join(os.path.abspath(base_dir), 'nipype_mem') if not os.path.exists(base_dir): os.mkdir(base_dir) elif not os.path.isdir(base_dir): raise ValueError('base_dir should be a directory') self.base_dir = base_dir open(os.path.join(base_dir, 'log.current'), 'w') def cache(self, interface): """ Returns a callable that caches the output of an interface Parameters ========== interface: nipype interface The nipype interface class to be wrapped and cached Returns ======= pipe_func: a PipeFunc callable object An object that can be used as a function to apply the interface to arguments. Inputs of the interface are given as keyword arguments, bearing the same name as the name in the inputs specs of the interface. Examples ======== >>> from tempfile import mkdtemp >>> mem = Memory(mkdtemp()) >>> from nipype.interfaces import fsl Here we create a callable that can be used to apply an fsl.Merge interface to files >>> fsl_merge = mem.cache(fsl.Merge) Now we apply it to a list of files. We need to specify the list of input files and the dimension along which the files should be merged. >>> results = fsl_merge(in_files=['a.nii', 'b.nii'], ... dimension='t') # doctest: +SKIP We can retrieve the resulting file from the outputs: >>> results.outputs.merged_file # doctest: +SKIP '...' """ return PipeFunc(interface, self.base_dir, _MemoryCallback(self)) def _log_name(self, dir_name, job_name): """ Increment counters tracking which cached function get executed. """ base_dir = self.base_dir # Every counter is a file opened in append mode and closed # immediately to avoid race conditions in parallel computing: # file appends are atomic open(os.path.join(base_dir, 'log.current'), 'a').write('%s/%s\n' % (dir_name, job_name)) t = time.localtime() year_dir = os.path.join(base_dir, 'log.%i' % t.tm_year) try: os.mkdir(year_dir) except OSError: "Dir exists" month_dir = os.path.join(year_dir, '%02i' % t.tm_mon) try: os.mkdir(month_dir) except OSError: "Dir exists" open(os.path.join(month_dir, '%02i.log' % t.tm_mday), 'a').write('%s/%s\n' % (dir_name, job_name)) def clear_previous_runs(self, warn=True): """ Remove all the cache that where not used in the latest run of the memory object: i.e. since the corresponding Python object was created. Parameters ========== warn: boolean, optional If true, echoes warning messages for all directory removed """ base_dir = self.base_dir latest_runs = read_log(os.path.join(base_dir, 'log.current')) self._clear_all_but(latest_runs, warn=warn) def clear_runs_since(self, day=None, month=None, year=None, warn=True): """ Remove all the cache that where not used since the given date Parameters ========== day, month, year: integers, optional The integers specifying the latest day (in localtime) that a node should have been accessed to be kept. If not given, the current date is used. warn: boolean, optional If true, echoes warning messages for all directory removed """ t = time.localtime() day = day if day is not None else t.tm_mday month = month if month is not None else t.tm_mon year = year if year is not None else t.tm_year base_dir = self.base_dir cut_off_file = '%s/log.%i/%02i/%02i.log' % (base_dir, year, month, day) logs_to_flush = list() recent_runs = dict() for log_name in glob.glob('%s/log.*/*/*.log' % base_dir): if log_name < cut_off_file: logs_to_flush.append(log_name) else: recent_runs = read_log(log_name, recent_runs) self._clear_all_but(recent_runs, warn=warn) for log_name in logs_to_flush: os.remove(log_name) def _clear_all_but(self, runs, warn=True): """ Remove all the runs appart from those given to the function input. """ rm_all_but(self.base_dir, set(runs.keys()), warn=warn) for dir_name, job_names in runs.iteritems(): rm_all_but(os.path.join(self.base_dir, dir_name), job_names, warn=warn) def __repr__(self): return '%s(base_dir=%s)' % (self.__class__.__name__, self.base_dir) nipype-0.9.2/nipype/caching/tests/000077500000000000000000000000001227300005300170605ustar00rootroot00000000000000nipype-0.9.2/nipype/caching/tests/test_memory.py000066400000000000000000000031341227300005300220020ustar00rootroot00000000000000""" Test the nipype interface caching mechanism """ from tempfile import mkdtemp from shutil import rmtree from nose.tools import assert_equal from nipype.caching import Memory from nipype.pipeline.tests.test_engine import TestInterface from nipype.utils.config import NipypeConfig config = NipypeConfig() config.set_default_config() nb_runs = 0 class SideEffectInterface(TestInterface): def _run_interface(self, runtime): global nb_runs nb_runs += 1 runtime.returncode = 0 return runtime def test_caching(): temp_dir = mkdtemp(prefix='test_memory_') old_rerun = config.get('execution', 'stop_on_first_rerun') try: # Prevent rerun to check that evaluation is computed only once config.set('execution', 'stop_on_first_rerun', 'true') mem = Memory(temp_dir) first_nb_run = nb_runs results = mem.cache(SideEffectInterface)(input1=2, input2=1) assert_equal(nb_runs, first_nb_run + 1) assert_equal(results.outputs.output1, [1, 2]) results = mem.cache(SideEffectInterface)(input1=2, input2=1) # Check that the node hasn't been rerun assert_equal(nb_runs, first_nb_run + 1) assert_equal(results.outputs.output1, [1, 2]) results = mem.cache(SideEffectInterface)(input1=1, input2=1) # Check that the node hasn been rerun assert_equal(nb_runs, first_nb_run + 2) assert_equal(results.outputs.output1, [1, 1]) finally: rmtree(temp_dir) config.set('execution', 'stop_on_first_rerun', old_rerun) if __name__ == '__main__': test_caching() nipype-0.9.2/nipype/external/000077500000000000000000000000001227300005300161445ustar00rootroot00000000000000nipype-0.9.2/nipype/external/__init__.py000066400000000000000000000000001227300005300202430ustar00rootroot00000000000000nipype-0.9.2/nipype/external/cloghandler.py000066400000000000000000000353571227300005300210150ustar00rootroot00000000000000# Copyright 2008 Lowell Alleman # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ cloghandler.py: A smart replacement for the standard RotatingFileHandler ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in replacement for the python standard log handler 'RotateFileHandler', the primary difference being that this handler will continue to write to the same file if the file cannot be rotated for some reason, whereas the RotatingFileHandler will strictly adhere to the maximum file size. Unfortunately, if you are using the RotatingFileHandler on Windows, you will find that once an attempted rotation fails, all subsequent log messages are dropped. The other major advantage of this module is that multiple processes can safely write to a single log file. To put it another way: This module's top priority is preserving your log records, whereas the standard library attempts to limit disk usage, which can potentially drop log messages. If you are trying to determine which module to use, there are number of considerations: What is most important: strict disk space usage or preservation of log messages? What OSes are you supporting? Can you afford to have processes blocked by file locks? Concurrent access is handled by using file locks, which should ensure that log messages are not dropped or clobbered. This means that a file lock is acquired and released for every log message that is written to disk. (On Windows, you may also run into a temporary situation where the log file must be opened and closed for each log message.) This can have potentially performance implications. In my testing, performance was more than adequate, but if you need a high-volume or low-latency solution, I suggest you look elsewhere. This module currently only support the 'nt' and 'posix' platforms due to the usage of the portalocker module. I do not have access to any other platforms for testing, patches are welcome. See the README file for an example usage of this module. """ __version__ = "$Id: cloghandler.py 6175 2009-11-02 18:40:35Z lowell $" __author__ = "Lowell Alleman" __all__ = [ "ConcurrentRotatingFileHandler", ] import os import sys from random import randint from logging import Handler from logging.handlers import BaseRotatingHandler try: import codecs except ImportError: codecs = None # Question/TODO: Should we have a fallback mode if we can't load portalocker / # we should still be better off than with the standard RotattingFileHandler # class, right? We do some rename checking... that should prevent some file # clobbering that the builtin class allows. # sibling module than handles all the ugly platform-specific details of file locking from .portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException # A client can set this to true to automatically convert relative paths to # absolute paths (which will also hide the absolute path warnings) FORCE_ABSOLUTE_PATH = False class ConcurrentRotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a set of files, which switches from one file to the next when the current file reaches a certain size. Multiple processes can write to the log file concurrently, but this may mean that the file will exceed the given size. """ def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, debug=True, supress_abs_warn=False): """ Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. On Windows, it is not possible to rename a file that is currently opened by another process. This means that it is not possible to rotate the log files if multiple processes is using the same log file. In this case, the current log file will continue to grow until the rotation can be completed successfully. In order for rotation to be possible, all of the other processes need to close the file first. A mechanism, called "degraded" mode, has been created for this scenario. In degraded mode, the log file is closed after each log message is written. So once all processes have entered degraded mode, the next rotate log attempt should be successful and then normal logging can be resumed. This log handler assumes that all concurrent processes logging to a single file will are using only this class, and that the exact same parameters are provided to each instance of this class. If, for example, two different processes are using this class, but with different values for 'maxBytes' or 'backupCount', then odd behavior is expected. The same is true if this class is used by one application, but the RotatingFileHandler is used by another. NOTE: You should always provide 'filename' as an absolute path, since this class will need to re-open the file during rotation. If your application call os.chdir() then subsequent log files could be created in the wrong directory. """ # The question of absolute paths: I'm not sure what the 'right thing' is # to do here. RotatingFileHander simply ignores this possibility. I was # going call os.path.abspath(), but that potentially limits uses. For # example, on Linux (any posix system?) you can rename a directory of a # running app, and the app wouldn't notice as long as it only opens new # files using relative paths. But since that's not a "normal" thing to # do, and having an app call os.chdir() is a much more likely scenario # that should be supported. For the moment, we are just going to warn # the user if they provide a relative path and do some other voodoo # logic that you'll just have to review for yourself. # if the given filename contains no path, we make an absolute path if not os.path.isabs(filename): if FORCE_ABSOLUTE_PATH or \ not os.path.split(filename)[0]: filename = os.path.abspath(filename) elif not supress_abs_warn: from warnings import warn warn("The given 'filename' should be an absolute path. If your " "application calls os.chdir(), your logs may get messed up. " "Use 'supress_abs_warn=True' to hide this message.") try: BaseRotatingHandler.__init__(self, filename, mode, encoding) except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?) BaseRotatingHandler.__init__(self, filename, mode) self.encoding = encoding self._rotateFailed = False self.maxBytes = maxBytes self.backupCount = backupCount # Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.) if filename.endswith(".log"): lock_file = filename[:-4] else: lock_file = filename self.stream_lock = open(lock_file + ".lock", "w") # For debug mode, swap out the "_degrade()" method with a more a verbose one. if debug: self._degrade = self._degrade_debug def _openFile(self, mode): if self.encoding: self.stream = codecs.open(self.baseFilename, mode, self.encoding) else: self.stream = open(self.baseFilename, mode) def acquire(self): """ Acquire thread and file locks. Also re-opening log file when running in 'degraded' mode. """ # handle thread lock Handler.acquire(self) lock(self.stream_lock, LOCK_EX) if self.stream.closed: self._openFile(self.mode) def release(self): """ Release file and thread locks. Flush stream and take care of closing stream in 'degraded' mode. """ try: if not self.stream.closed: self.stream.flush() if self._rotateFailed: self.stream.close() except IOError: if self._rotateFailed: self.stream.close() finally: try: unlock(self.stream_lock) finally: # release thread lock Handler.release(self) def close(self): """ Closes the stream. """ if not self.stream.closed: self.stream.flush() self.stream.close() Handler.close(self) def flush(self): """ flush(): Do nothing. Since a flush is issued in release(), we don't do it here. To do a flush here, it would be necessary to re-lock everything, and it is just easier and cleaner to do it all in release(), rather than requiring two lock ops per handle() call. Doing a flush() here would also introduces a window of opportunity for another process to write to the log file in between calling stream.write() and stream.flush(), which seems like a bad thing. """ pass def _degrade(self, degrade, msg, *args): """ Set degrade mode or not. Ignore msg. """ self._rotateFailed = degrade del msg, args # avoid pychecker warnings def _degrade_debug(self, degrade, msg, *args): """ A more colorful version of _degade(). (This is enabled by passing "debug=True" at initialization). """ if degrade: if not self._rotateFailed: sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" % (os.getpid(), msg % args)) self._rotateFailed = True else: if self._rotateFailed: sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" % (os.getpid(), msg % args)) self._rotateFailed = False def doRollover(self): """ Do a rollover, as described in __init__(). """ if self.backupCount <= 0: # Don't keep any backups, just overwrite the existing backup file # Locking doesn't much matter here; since we are overwriting it anyway self.stream.close() self._openFile("w") return self.stream.close() try: # Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable tmpname = None while not tmpname or os.path.exists(tmpname): tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0,99999999)) try: # Do a rename test to determine if we can successfully rename the log file os.rename(self.baseFilename, tmpname) except (IOError, OSError): exc_value = sys.exc_info()[1] self._degrade(True, "rename failed. File in use? " "exception=%s", exc_value) return # Q: Is there some way to protect this code from a KeboardInterupt? # This isn't necessarily a data loss issue, but it certainly would # break the rotation process during my stress testing. # There is currently no mechanism in place to handle the situation # where one of these log files cannot be renamed. (Example, user # opens "logfile.3" in notepad) for i in range(self.backupCount - 1, 0, -1): sfn = "%s.%d" % (self.baseFilename, i) dfn = "%s.%d" % (self.baseFilename, i + 1) if os.path.exists(sfn): #print "%s -> %s" % (sfn, dfn) if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.baseFilename + ".1" if os.path.exists(dfn): os.remove(dfn) os.rename(tmpname, dfn) #print "%s -> %s" % (self.baseFilename, dfn) self._degrade(False, "Rotation completed") finally: self._openFile(self.mode) def shouldRollover(self, record): """ Determine if rollover should occur. For those that are keeping track. This differs from the standard library's RotatingLogHandler class. Because there is no promise to keep the file size under maxBytes we ignore the length of the current record. """ del record # avoid pychecker warnings if self._shouldRollover(): # if some other process already did the rollover we might # checked log.1, so we reopen the stream and check again on # the right log file self.stream.close() self._openFile(self.mode) return self._shouldRollover() return False def _shouldRollover(self): if self.maxBytes > 0: # are we rolling over? try: self.stream.seek(0, 2) #due to non-posix-compliant Windows feature except IOError: return True if self.stream.tell() >= self.maxBytes: return True else: self._degrade(False, "Rotation done or not needed at this time") return False # Publish this class to the "logging.handlers" module so that it can be use # from a logging config file via logging.config.fileConfig(). import logging.handlers logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler nipype-0.9.2/nipype/external/d3.v3.min.js000066400000000000000000004350071227300005300201320ustar00rootroot00000000000000d3=function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(){}function o(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function a(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=aa.length;r>e;++e){var u=aa[e]+t;if(u in n)return u}}function c(){}function s(){}function l(n){function t(){for(var t,r=e,u=-1,i=r.length;++ue;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function N(n){return sa(n,da),n}function L(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t0&&(n=n.substring(0,a));var l=ya.get(n);return l&&(n=l,s=R),a?t?u:r:t?c:i}function z(n,t){return function(e){var r=Zo.event;Zo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Zo.event=r}}}function R(n,t){var e=z(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function D(){var n=".dragsuppress-"+ ++Ma,t="touchmove"+n,e="selectstart"+n,r="dragstart"+n,u="click"+n,i=Zo.select(Wo).on(t,f).on(e,f).on(r,f),o=Bo.style,a=o[xa];return o[xa]="none",function(t){function e(){i.on(u,null)}i.on(n,null),o[xa]=a,t&&(i.on(u,function(){f(),e()},!0),setTimeout(e,0))}}function P(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>_a&&(Wo.scrollX||Wo.scrollY)){e=Zo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();_a=!(u.f||u.e),e.remove()}return _a?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function U(n){return n>0?1:0>n?-1:0}function j(n){return n>1?0:-1>n?ba:Math.acos(n)}function H(n){return n>1?Sa:-1>n?-Sa:Math.asin(n)}function F(n){return((n=Math.exp(n))-1/n)/2}function O(n){return((n=Math.exp(n))+1/n)/2}function Y(n){return((n=Math.exp(2*n))-1)/(n+1)}function I(n){return(n=Math.sin(n/2))*n}function Z(){}function V(n,t,e){return new X(n,t,e)}function X(n,t,e){this.h=n,this.s=t,this.l=e}function $(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,ot(u(n+120),u(n),u(n-120))}function B(n,t,e){return new W(n,t,e)}function W(n,t,e){this.h=n,this.c=t,this.l=e}function J(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),G(e,Math.cos(n*=Aa)*t,Math.sin(n)*t)}function G(n,t,e){return new K(n,t,e)}function K(n,t,e){this.l=n,this.a=t,this.b=e}function Q(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=tt(u)*ja,r=tt(r)*Ha,i=tt(i)*Fa,ot(rt(3.2404542*u-1.5371385*r-.4985314*i),rt(-.969266*u+1.8760108*r+.041556*i),rt(.0556434*u-.2040259*r+1.0572252*i))}function nt(n,t,e){return n>0?B(Math.atan2(e,t)*Ca,Math.sqrt(t*t+e*e),n):B(0/0,0/0,n)}function tt(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function et(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function rt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ut(n){return ot(n>>16,255&n>>8,255&n)}function it(n){return ut(n)+""}function ot(n,t,e){return new at(n,t,e)}function at(n,t,e){this.r=n,this.g=t,this.b=e}function ct(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function st(n,t,e){var r,u,i,o=0,a=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(n))switch(u=r[2].split(","),r[1]){case"hsl":return e(parseFloat(u[0]),parseFloat(u[1])/100,parseFloat(u[2])/100);case"rgb":return t(gt(u[0]),gt(u[1]),gt(u[2]))}return(i=Ia.get(n))?t(i.r,i.g,i.b):(null!=n&&"#"===n.charAt(0)&&(4===n.length?(o=n.charAt(1),o+=o,a=n.charAt(2),a+=a,c=n.charAt(3),c+=c):7===n.length&&(o=n.substring(1,3),a=n.substring(3,5),c=n.substring(5,7)),o=parseInt(o,16),a=parseInt(a,16),c=parseInt(c,16)),t(o,a,c))}function lt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),V(r,u,c)}function ft(n,t,e){n=ht(n),t=ht(t),e=ht(e);var r=et((.4124564*n+.3575761*t+.1804375*e)/ja),u=et((.2126729*n+.7151522*t+.072175*e)/Ha),i=et((.0193339*n+.119192*t+.9503041*e)/Fa);return G(116*u-16,500*(r-u),200*(u-i))}function ht(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function gt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function pt(n){return"function"==typeof n?n:function(){return n}}function vt(n){return n}function dt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),mt(t,e,n,r)}}function mt(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Zo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Wo.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Zo.event;Zo.event=n;try{o.progress.call(i,c)}finally{Zo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Xo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Zo.rebind(i,o,"on"),null==r?i:i.get(yt(r))}function yt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function xt(){var n=Mt(),t=_t()-n;t>24?(isFinite(t)&&(clearTimeout($a),$a=setTimeout(xt,t)),Xa=0):(Xa=1,Wa(xt))}function Mt(){var n=Date.now();for(Ba=Za;Ba;)n>=Ba.t&&(Ba.f=Ba.c(n-Ba.t)),Ba=Ba.n;return n}function _t(){for(var n,t=Za,e=1/0;t;)t.f?t=n?n.n=t.n:Za=t.n:(t.t8?function(n){return n/e}:function(n){return n*e},symbol:n}}function wt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function St(n){return n+""}function kt(){}function Et(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function At(n,t){n&&ac.hasOwnProperty(n.type)&&ac[n.type](n,t)}function Ct(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++ua;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new $t(e,n,null,!0),s=new $t(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new $t(r,n,null,!1),s=new $t(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Xt(i),Xt(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Xt(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Wt))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Zo.merge(g);var n=Kt(m,p);g.length?Vt(g,Gt,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Jt(),M=t(x);return y}}function Wt(n){return n.length>1}function Jt(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:c,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Gt(n,t){return((n=n.x)[0]<0?n[1]-Sa-ka:Sa-n[1])-((t=t.x)[0]<0?t[1]-Sa-ka:Sa-t[1])}function Kt(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;sc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+ba/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+ba/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=ua(_)>ba,w=p*x;if(sc.add(Math.atan2(w*Math.sin(_),v*M+w*Math.cos(_))),i+=b?_+(_>=0?wa:-wa):_,b^h>=e^m>=e){var S=zt(Tt(f),Tt(n));Pt(S);var k=zt(u,S);Pt(k);var E=(b^_>=0?-1:1)*H(k[2]);(r>E||r===E&&(S[0]||S[1]))&&(o+=b^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-ka>i||ka>i&&0>sc)^1&o}function Qt(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?ba:-ba,c=ua(i-e);ua(c-ba)0?Sa:-Sa),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=ba&&(ua(e-u)ka?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function te(n,t,e,r){var u;if(null==n)u=e*Sa,r.point(-ba,u),r.point(0,u),r.point(ba,u),r.point(ba,0),r.point(ba,-u),r.point(0,-u),r.point(-ba,-u),r.point(-ba,0),r.point(-ba,u);else if(ua(n[0]-t[0])>ka){var i=n[0]i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?ba:-ba),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(jt(e,g)||jt(p,g))&&(p[0]+=ka,p[1]+=ka,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&jt(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=Tt(n),u=Tt(t),o=[1,0,0],a=zt(r,u),c=qt(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=zt(o,a),p=Dt(o,f),v=Dt(a,h);Rt(p,v);var d=g,m=qt(p,d),y=qt(d,d),x=m*m-y*(qt(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=Dt(d,(-m-M)/y);if(Rt(_,p),_=Ut(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=ua(A-ba)A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(ua(_[0]-w)ba^(w<=_[0]&&_[0]<=S)){var L=Dt(d,(-m+M)/y);return Rt(L,p),[_,Ut(L)]}}}function u(t,e){var r=o?n:ba-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=ua(i)>ka,c=Ne(n,6*Aa);return Bt(t,e,c,o?[0,-n]:[-ba,n-ba])}function re(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function ue(n,t,e,r){function u(r,u){return ua(r[0]-n)0?0:3:ua(r[0]-e)0?2:1:ua(r[1]-t)0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=m.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=m[u],c=a.length,l=a[0];c>o;++o)i=a[o],l[1]<=r?i[1]>r&&s(l,i,n)>0&&++t:i[1]<=r&&s(l,i,n)<0&&--t,l=i;return 0!==t}function s(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(e[0]-n[0])*(t[1]-n[1])}function l(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function f(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function h(n,t){f(n,t)&&a.point(n,t)}function g(){L.point=v,m&&m.push(y=[]),k=!0,S=!1,b=w=0/0}function p(){d&&(v(x,M),_&&S&&C.rejoin(),d.push(C.buffer())),L.point=h,S&&a.lineEnd()}function v(n,t){n=Math.max(-Sc,Math.min(Sc,n)),t=Math.max(-Sc,Math.min(Sc,t));var e=f(n,t);if(m&&y.push([n,t]),k)x=n,M=t,_=e,k=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&S)a.point(n,t);else{var r={a:{x:b,y:w},b:{x:n,y:t}};N(r)?(S||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),E=!1):e&&(a.lineStart(),a.point(n,t),E=!1)}b=n,w=t,S=e}var d,m,y,x,M,_,b,w,S,k,E,A=a,C=Jt(),N=re(n,t,e,r),L={point:h,lineStart:g,lineEnd:p,polygonStart:function(){a=C,d=[],m=[],E=!0},polygonEnd:function(){a=A,d=Zo.merge(d);var t=c([n,r]),e=E&&t,u=d.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),l(null,null,1,a),a.lineEnd()),u&&Vt(d,i,t,l,a),a.polygonEnd()),d=m=y=null}};return L}}function ie(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function oe(n){var t=0,e=ba/3,r=_e(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*ba/180,e=n[1]*ba/180):[180*(t/ba),180*(e/ba)]},u}function ae(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,H((i-(n*n+e*e)*u*u)/(2*u))]},e}function ce(){function n(n,t){Ec+=u*n-r*t,r=n,u=t}var t,e,r,u;Tc.point=function(i,o){Tc.point=n,t=r=i,e=u=o},Tc.lineEnd=function(){n(t,e)}}function se(n,t){Ac>n&&(Ac=n),n>Nc&&(Nc=n),Cc>t&&(Cc=t),t>Lc&&(Lc=t)}function le(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=fe(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=fe(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function fe(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function he(n,t){gc+=n,pc+=t,++vc}function ge(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);dc+=o*(t+n)/2,mc+=o*(e+r)/2,yc+=o,he(t=n,e=r)}var t,e;zc.point=function(r,u){zc.point=n,he(t=r,e=u)}}function pe(){zc.point=he}function ve(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);dc+=o*(r+n)/2,mc+=o*(u+t)/2,yc+=o,o=u*n-r*t,xc+=o*(r+n),Mc+=o*(u+t),_c+=3*o,he(r=n,u=t)}var t,e,r,u;zc.point=function(i,o){zc.point=n,he(t=r=i,e=u=o)},zc.lineEnd=function(){n(t,e)}}function de(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,wa)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:c};return a}function me(n){function t(t){function r(e,r){e=n(e,r),t.point(e[0],e[1])}function u(){x=0/0,S.point=o,t.lineStart()}function o(r,u){var o=Tt([r,u]),a=n(r,u);e(x,M,y,_,b,w,x=a[0],M=a[1],y=r,_=o[0],b=o[1],w=o[2],i,t),t.point(x,M)}function a(){S.point=r,t.lineEnd()}function c(){u(),S.point=s,S.lineEnd=l}function s(n,t){o(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=o}function l(){e(x,M,y,_,b,w,g,p,f,v,d,m,i,t),S.lineEnd=a,a()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:r,lineStart:u,lineEnd:a,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=u}};return S}function e(t,i,o,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-i,M=y*y+x*x;if(M>4*r&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=ua(ua(w)-1)r||ua((y*L+x*T)/M-.5)>.3||u>a*g+c*p+s*v)&&(e(t,i,o,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),e(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var r=.5,u=Math.cos(30*Aa),i=16;return t.precision=function(n){return arguments.length?(i=(r=n*n)>0&&16,t):Math.sqrt(r)},t}function ye(n){this.stream=n}function xe(n){var t=me(function(t,e){return n([t*Ca,e*Ca])});return function(n){var e=new ye(n=t(n));return e.point=function(t,e){n.point(t*Aa,e*Aa)},e}}function Me(n){return _e(function(){return n})()}function _e(n){function t(n){return n=a(n[0]*Aa,n[1]*Aa),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*Ca,n[1]*Ca]}function r(){a=ie(o=ke(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=me(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=wc,_=vt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=be(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,wc):ee((b=+n)*Aa),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?ue(n[0][0],n[0][1],n[1][0],n[1][1]):vt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Aa,d=n[1]%360*Aa,r()):[v*Ca,d*Ca]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Aa,y=n[1]%360*Aa,x=n.length>2?n[2]%360*Aa:0,r()):[m*Ca,y*Ca,x*Ca]},Zo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function be(n){var t=new ye(n);return t.point=function(t,e){n.point(t*Aa,e*Aa)},t}function we(n,t){return[n,t]}function Se(n,t){return[n>ba?n-wa:-ba>n?n+wa:n,t]}function ke(n,t,e){return n?t||e?ie(Ae(n),Ce(t,e)):Ae(n):t||e?Ce(t,e):Se}function Ee(n){return function(t,e){return t+=n,[t>ba?t-wa:-ba>t?t+wa:t,e]}}function Ae(n){var t=Ee(n);return t.invert=Ee(-n),t}function Ce(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),H(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),H(l*r-a*u)]},e}function Ne(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=Le(e,u),i=Le(e,i),(o>0?i>u:u>i)&&(u+=o*wa)):(u=n+o*wa,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=Ut([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function Le(n,t){var e=Tt(t);e[0]-=n,Pt(e);var r=j(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-ka)%(2*Math.PI)}function Te(n,t,e){var r=Zo.range(n,t-ka,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function qe(n,t,e){var r=Zo.range(n,t-ka,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function ze(n){return n.source}function Re(n){return n.target}function De(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(I(r-t)+u*o*I(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*Ca,Math.atan2(o,Math.sqrt(r*r+u*u))*Ca]}:function(){return[n*Ca,t*Ca]};return p.distance=h,p}function Pe(){function n(n,u){var i=Math.sin(u*=Aa),o=Math.cos(u),a=ua((n*=Aa)-t),c=Math.cos(a);Rc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;Dc.point=function(u,i){t=u*Aa,e=Math.sin(i*=Aa),r=Math.cos(i),Dc.point=n},Dc.lineEnd=function(){Dc.point=Dc.lineEnd=c}}function Ue(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function je(n,t){function e(n,t){var e=ua(ua(t)-Sa)0}function Xe(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function $e(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Be(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function We(){dr(this),this.edge=this.site=this.circle=null}function Je(n){var t=$c.pop()||new We;return t.site=n,t}function Ge(n){ar(n),Zc.remove(n),$c.push(n),dr(n)}function Ke(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Ge(n);for(var c=i;c.circle&&ua(e-c.circle.x)l;++l)s=a[l],c=a[l-1],gr(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=fr(c.site,s.site,null,u),or(c),or(s)}function Qe(n){for(var t,e,r,u,i=n.x,o=n.y,a=Zc._;a;)if(r=nr(a,o)-i,r>ka)a=a.L;else{if(u=i-tr(a,o),!(u>ka)){r>-ka?(t=a.P,e=a):u>-ka?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Je(n);if(Zc.insert(t,c),t||e){if(t===e)return ar(t),e=Je(t.site),Zc.insert(c,e),c.edge=e.edge=fr(t.site,c.site),or(t),or(e),void 0;if(!e)return c.edge=fr(t.site,c.site),void 0;ar(t),ar(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};gr(e.edge,s,p,M),c.edge=fr(s,n,null,M),e.edge=fr(n,p,null,M),or(t),or(e)}}function nr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function tr(n,t){var e=n.N;if(e)return nr(e,t);var r=n.site;return r.y===t?r.x:1/0}function er(n){this.site=n,this.edges=[]}function rr(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Ic,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(ua(r-t)>ka||ua(u-e)>ka)&&(a.splice(o,0,new pr(hr(i.site,l,ua(r-f)ka?{x:f,y:ua(t-f)ka?{x:ua(e-p)ka?{x:h,y:ua(t-h)ka?{x:ua(e-g)=-Ea)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Bc.pop()||new ir;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Xc._;x;)if(m.yd||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.yr||r>1)if(h>p){if(i){if(i.y>=s)return }else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.yg){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.xr;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=Nr(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function Tr(n,t){for(var e,r=Zo.interpolators.length;--r>=0&&!(e=Zo.interpolators[r](n,t)););return e}function qr(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(Tr(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function zr(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function Rr(n){return function(t){return 1-n(1-t)}}function Dr(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function Pr(n){return n*n}function Ur(n){return n*n*n}function jr(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function Hr(n){return function(t){return Math.pow(t,n)}}function Fr(n){return 1-Math.cos(n*Sa)}function Or(n){return Math.pow(2,10*(n-1))}function Yr(n){return 1-Math.sqrt(1-n*n)}function Ir(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/wa*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*wa/t)}}function Zr(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function Vr(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Xr(n,t){n=Zo.hcl(n),t=Zo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return J(e+i*n,r+o*n,u+a*n)+""}}function $r(n,t){n=Zo.hsl(n),t=Zo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return $(e+i*n,r+o*n,u+a*n)+""}}function Br(n,t){n=Zo.lab(n),t=Zo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return Q(e+i*n,r+o*n,u+a*n)+""}}function Wr(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Jr(n){var t=[n.a,n.b],e=[n.c,n.d],r=Kr(t),u=Gr(t,e),i=Kr(Qr(e,t,-u))||0;t[0]*e[1]180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:Nr(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:Nr(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:Nr(g[0],p[0])},{i:e-2,x:Nr(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++ie;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function bu(n){return n.reduce(wu,0)}function wu(n,t){return n+t[1]}function Su(n,t){return ku(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ku(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function Eu(n){return[Zo.min(n),Zo.max(n)]}function Au(n,t){return n.parent==t.parent?1:2}function Cu(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function Nu(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function Lu(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i0&&(n=r);return n}function Tu(n,t){return n.x-t.x}function qu(n,t){return t.x-n.x}function zu(n,t){return n.depth-t.depth}function Ru(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function Pu(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function Uu(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function ju(n,t){return n.value-t.value}function Hu(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Fu(n,t){n._pack_next=t,t._pack_prev=n}function Ou(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function Yu(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(Iu),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Xu(r,u,i),t(i),Hu(r,i),r._pack_prev=i,Hu(i,u),u=r._pack_next,o=3;s>o;o++){Xu(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(Ou(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!Ou(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.ro;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Zu)}}function Iu(n){n._pack_next=n._pack_prev=n}function Zu(n){delete n._pack_next,delete n._pack_prev}function Vu(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++iu&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function Qu(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function ni(n){return n.rangeExtent?n.rangeExtent():Qu(n.range())}function ti(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function ei(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function ri(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:os}function ui(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]2?ui:ti,c=r?eu:tu;return o=u(n,t,c,e),a=u(t,n,c,Tr),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Wr)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return si(n,t)},i.tickFormat=function(t,e){return li(n,t,e)},i.nice=function(t){return ai(n,t),u()},i.copy=function(){return ii(n,t,e,r)},u()}function oi(n,t){return Zo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function ai(n,t){return ei(n,ri(ci(n,t)[2]))}function ci(n,t){null==t&&(t=10);var e=Qu(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function si(n,t){return Zo.range.apply(Zo,ci(n,t))}function li(n,t,e){var r=-Math.floor(Math.log(ci(n,t)[2])/Math.LN10+.01);return Zo.format(e?e.replace(tc,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+(r-2*("%"===l)),l].join("")}):",."+r+"f")}function fi(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=ei(r.map(u),e?Math:cs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=Qu(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++0;h--)o.push(i(s)*h);for(s=0;o[s]c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return as;arguments.length<2?t=as:"function"!=typeof t&&(t=Zo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return fi(n.copy(),t,e,r)},oi(o,n)}function hi(n,t,e){function r(t){return n(u(t))}var u=gi(t),i=gi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return si(e,n)},r.tickFormat=function(n,t){return li(e,n,t)},r.nice=function(n){return r.domain(ai(e,n))},r.exponent=function(o){return arguments.length?(u=gi(t=o),i=gi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return hi(n.copy(),t,e)},oi(r,n)}function gi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function pi(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Zo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++ae?[0/0,0/0]:[e>0?u[e-1]:n[0],et?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return di(n,t,e)},u()}function mi(n,t){function e(e){return e>=e?t[Zo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return mi(n,t)},e}function yi(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return si(n,t)},t.tickFormat=function(t,e){return li(n,t,e)},t.copy=function(){return yi(n)},t}function xi(n){return n.innerRadius}function Mi(n){return n.outerRadius}function _i(n){return n.startAngle}function bi(n){return n.endAngle}function wi(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=pt(e),p=pt(r);++f1&&u.push("H",r[0]),u.join("")}function Ai(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Ii(n){return n.length<3?Si(n):n[0]+qi(n,Yi(n))}function Zi(n){for(var t,e,r,u=-1,i=n.length;++ue?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Zo.timer(function(){return p.c=c(r||1)?Zt:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ba,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function eo(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function ro(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function uo(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function io(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new Ts(e-1)),1),e}function i(n,e){return t(n=new Ts(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{Ts=uo;var r=new uo;return r._=n,o(r,t,e)}finally{Ts=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=oo(n);return c.floor=c,c.round=oo(r),c.ceil=oo(u),c.offset=oo(i),c.range=a,n}function oo(n){return function(t,e){try{Ts=uo;var r=new uo;return r._=t,n(r,e)._}finally{Ts=Date}}}function ao(n){function t(t){for(var r,u,i,o=[],a=-1,c=0;++aa;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=Ks[o in Js?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function so(n){return new RegExp("^(?:"+n.map(Zo.requote).join("|")+")","i")}function lo(n){for(var t=new u,e=-1,r=n.length;++en?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function ho(n,t,e){Is.lastIndex=0;var r=Is.exec(t.substring(e));return r?(n.w=Zs.get(r[0].toLowerCase()),e+r[0].length):-1}function go(n,t,e){Os.lastIndex=0;var r=Os.exec(t.substring(e));return r?(n.w=Ys.get(r[0].toLowerCase()),e+r[0].length):-1}function po(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function vo(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function mo(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function yo(n,t,e){$s.lastIndex=0;var r=$s.exec(t.substring(e));return r?(n.m=Bs.get(r[0].toLowerCase()),e+r[0].length):-1}function xo(n,t,e){Vs.lastIndex=0;var r=Vs.exec(t.substring(e));return r?(n.m=Xs.get(r[0].toLowerCase()),e+r[0].length):-1}function Mo(n,t,e){return co(n,Gs.c.toString(),t,e)}function _o(n,t,e){return co(n,Gs.x.toString(),t,e)}function bo(n,t,e){return co(n,Gs.X.toString(),t,e)}function wo(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function So(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+2));return r?(n.y=Eo(+r[0]),e+r[0].length):-1}function ko(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Eo(n){return n+(n>68?1900:2e3)}function Ao(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Co(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function No(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Lo(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function To(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function qo(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function zo(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function Ro(n,t,e){var r=nl.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}function Do(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(ua(t)/60),u=ua(t)%60;return e+fo(r,"0",2)+fo(u,"0",2)}function Po(n,t,e){Ws.lastIndex=0;var r=Ws.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function Uo(n){function t(n){try{Ts=uo;var t=new Ts;return t._=n,e(t)}finally{Ts=Date}}var e=ao(n);return t.parse=function(n){try{Ts=uo;var t=e.parse(n);return t&&t._}finally{Ts=Date}},t.toString=e.toString,t}function jo(n){return n.toISOString()}function Ho(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Zo.bisect(el,u);return i==el.length?[t.year,ci(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/el[i-1]1?{floor:function(t){for(;e(t=n.floor(t));)t=Fo(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Fo(+t+1);return t}}:n))},r.ticks=function(n,t){var e=Qu(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Fo(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Ho(n.copy(),t,e)},oi(r,n)}function Fo(n){return new Date(n)}function Oo(n){return function(t){for(var e=n.length-1,r=n[e];!r[1](t);)r=n[--e];return r[0](t)}}function Yo(n){return JSON.parse(n.responseText)}function Io(n){var t=$o.createRange();return t.selectNode($o.body),t.createContextualFragment(n.responseText)}var Zo={version:"3.3.8"};Date.now||(Date.now=function(){return+new Date});var Vo=[].slice,Xo=function(n){return Vo.call(n)},$o=document,Bo=$o.documentElement,Wo=window;try{Xo(Bo.childNodes)[0].nodeType}catch(Jo){Xo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{$o.createElement("div").style.setProperty("opacity",0,"")}catch(Go){var Ko=Wo.Element.prototype,Qo=Ko.setAttribute,na=Ko.setAttributeNS,ta=Wo.CSSStyleDeclaration.prototype,ea=ta.setProperty;Ko.setAttribute=function(n,t){Qo.call(this,n,t+"")},Ko.setAttributeNS=function(n,t,e){na.call(this,n,t,e+"")},ta.setProperty=function(n,t,e){ea.call(this,n,t+"",e)}}Zo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Zo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Zo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ur&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ur&&(e=r)}return e},Zo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ue&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ue&&(e=r)}return e},Zo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i=e);)e=u=void 0;for(;++ir&&(e=r),r>u&&(u=r))}else{for(;++i=e);)e=void 0;for(;++ir&&(e=r),r>u&&(u=r))}return[e,u]},Zo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i1&&(t=t.map(e)),t=t.filter(n),t.length?Zo.quantile(t.sort(Zo.ascending),.5):void 0},Zo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1; n.call(t,t[i],i)r;){var i=r+u>>>1;er?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Zo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Zo.min(arguments,t),r=new Array(e);++n=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var ua=Math.abs;Zo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(ua(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Zo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Zo.set=function(n){var t=new i;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(i,{has:function(n){return ia+n in this},add:function(n){return this[ia+n]=!0,n},remove:function(n){return n=ia+n,n in this&&delete this[n]},values:function(){var n=[];return this.forEach(function(t){n.push(t)}),n},forEach:function(n){for(var t in this)t.charCodeAt(0)===oa&&n.call(this,t.substring(1))}}),Zo.behavior={},Zo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Zo.event=null,Zo.requote=function(n){return n.replace(ca,"\\$&")};var ca=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,sa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},la=function(n,t){return t.querySelector(n)},fa=function(n,t){return t.querySelectorAll(n)},ha=Bo[a(Bo,"matchesSelector")],ga=function(n,t){return ha.call(n,t)};"function"==typeof Sizzle&&(la=function(n,t){return Sizzle(n,t)[0]||null},fa=function(n,t){return Sizzle.uniqueSort(Sizzle(n,t))},ga=Sizzle.matchesSelector),Zo.selection=function(){return ma};var pa=Zo.selection.prototype=[];pa.select=function(n){var t,e,r,u,i=[];n=v(n);for(var o=-1,a=this.length;++o=0&&(e=n.substring(0,t),n=n.substring(t+1)),va.hasOwnProperty(e)?{space:va[e],local:n}:n}},pa.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Zo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(m(t,n[t]));return this}return this.each(m(n,t))},pa.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=n.trim().split(/^|\s+/g)).length,u=-1;if(t=e.classList){for(;++ur){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(b(e,n[e],t));return this}if(2>r)return Wo.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(b(n,t,e))},pa.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(w(t,n[t]));return this}return this.each(w(n,t))},pa.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},pa.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},pa.append=function(n){return n=S(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},pa.insert=function(n,t){return n=S(n),t=v(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},pa.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},pa.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++rr;++r)p[r]=k(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++oi;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a)&&t.push(r)}return p(u)},pa.order=function(){for(var n=-1,t=this.length;++n=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},pa.sort=function(n){n=A.apply(this,arguments);for(var t=-1,e=this.length;++tn;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},pa.size=function(){var n=0;return this.each(function(){++n}),n};var da=[];Zo.selection.enter=N,Zo.selection.enter.prototype=da,da.append=pa.append,da.empty=pa.empty,da.node=pa.node,da.call=pa.call,da.size=pa.size,da.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++ar){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(q(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(q(n,t,e))};var ya=Zo.map({mouseenter:"mouseover",mouseleave:"mouseout"});ya.forEach(function(n){"on"+n in $o&&ya.remove(n)});var xa=a(Bo.style,"userSelect"),Ma=0;Zo.mouse=function(n){return P(n,h())};var _a=/WebKit/.test(Wo.navigator.userAgent)?-1:0;Zo.touches=function(n,t){return arguments.length<2&&(t=h().touches),t?Xo(t).map(function(t){var e=P(n,t);return e.identifier=t.identifier,e}):[]},Zo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Zo.event.changedTouches[0].identifier}function e(n,t){return Zo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Zo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Zo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Zo.select(Wo).on(e+"."+p,o).on(r+"."+p,a),y=D();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=g(n,"drag","dragstart","dragend"),i=null,o=r(c,Zo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Zo.rebind(n,u,"on")};var ba=Math.PI,wa=2*ba,Sa=ba/2,ka=1e-6,Ea=ka*ka,Aa=ba/180,Ca=180/ba,Na=Math.SQRT2,La=2,Ta=4;Zo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=O(v),o=i/(La*h)*(e*Y(Na*t+v)-F(v));return[r+o*s,u+o*l,i*e/O(Na*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Na*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+Ta*f)/(2*i*La*h),p=(c*c-i*i-Ta*f)/(2*c*La*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Na;return e.duration=1e3*y,e},Zo.behavior.zoom=function(){function n(n){n.on(A,s).on(Ra+".zoom",h).on(C,p).on("dblclick.zoom",v).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Zo.mouse(r),h),a(i)}function e(){f.on(C,Wo===r?p:null).on(N,null),g(l&&Zo.event.target===s),c(i)}var r=this,i=q.of(r,arguments),s=Zo.event.target,l=0,f=Zo.select(Wo).on(C,n).on(N,e),h=t(Zo.mouse(r)),g=D();T.call(r),o(i)}function l(){function n(){var n=Zo.touches(p);return g=S.k,n.forEach(function(n){n.identifier in d&&(d[n.identifier]=t(n))}),n}function e(){for(var t=Zo.event.changedTouches,e=0,i=t.length;i>e;++e)d[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=d[s.identifier];r(2*S.k),u(s,l),f(),a(v)}x=c}else if(o.length>1){var s=o[0],h=o[1],g=s[0]-h[0],p=s[1]-h[1];m=g*g+p*p}}function i(){for(var n,t,e,i,o=Zo.touches(p),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=d[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*g)}x=null,u(n,t),a(v)}function h(){if(Zo.event.touches.length){for(var t=Zo.event.changedTouches,e=0,r=t.length;r>e;++e)delete d[t[e].identifier];for(var u in d)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(v)}var g,p=this,v=q.of(p,arguments),d={},m=0,y=Zo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Zo.select(Wo).on(M,i).on(_,h),w=Zo.select(p).on(A,null).on(L,e),k=D();T.call(p),e(),o(v)}function h(){var n=q.of(this,arguments);y?clearTimeout(y):(T.call(this),o(n)),y=setTimeout(function(){y=null,c(n)},50),f();var e=m||Zo.mouse(this);d||(d=t(e)),r(Math.pow(2,.002*qa())*S.k),u(e,d),a(n)}function p(){d=null}function v(){var n=q.of(this,arguments),e=Zo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Zo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var d,m,y,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=za,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",q=g(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=q.of(this,arguments),t=S;Ms?Zo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Zo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?za:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(m=t&&[+t[0],+t[1]],n):m},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Zo.rebind(n,q,"on")};var qa,za=[0,1/0],Ra="onwheel"in $o?(qa=function(){return-Zo.event.deltaY*(Zo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in $o?(qa=function(){return Zo.event.wheelDelta},"mousewheel"):(qa=function(){return-Zo.event.detail},"MozMousePixelScroll");Z.prototype.toString=function(){return this.rgb()+""},Zo.hsl=function(n,t,e){return 1===arguments.length?n instanceof X?V(n.h,n.s,n.l):st(""+n,lt,V):V(+n,+t,+e)};var Da=X.prototype=new Z;Da.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),V(this.h,this.s,this.l/n)},Da.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),V(this.h,this.s,n*this.l)},Da.rgb=function(){return $(this.h,this.s,this.l)},Zo.hcl=function(n,t,e){return 1===arguments.length?n instanceof W?B(n.h,n.c,n.l):n instanceof K?nt(n.l,n.a,n.b):nt((n=ft((n=Zo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):B(+n,+t,+e)};var Pa=W.prototype=new Z;Pa.brighter=function(n){return B(this.h,this.c,Math.min(100,this.l+Ua*(arguments.length?n:1)))},Pa.darker=function(n){return B(this.h,this.c,Math.max(0,this.l-Ua*(arguments.length?n:1)))},Pa.rgb=function(){return J(this.h,this.c,this.l).rgb()},Zo.lab=function(n,t,e){return 1===arguments.length?n instanceof K?G(n.l,n.a,n.b):n instanceof W?J(n.l,n.c,n.h):ft((n=Zo.rgb(n)).r,n.g,n.b):G(+n,+t,+e)};var Ua=18,ja=.95047,Ha=1,Fa=1.08883,Oa=K.prototype=new Z;Oa.brighter=function(n){return G(Math.min(100,this.l+Ua*(arguments.length?n:1)),this.a,this.b)},Oa.darker=function(n){return G(Math.max(0,this.l-Ua*(arguments.length?n:1)),this.a,this.b)},Oa.rgb=function(){return Q(this.l,this.a,this.b)},Zo.rgb=function(n,t,e){return 1===arguments.length?n instanceof at?ot(n.r,n.g,n.b):st(""+n,ot,$):ot(~~n,~~t,~~e)};var Ya=at.prototype=new Z;Ya.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),ot(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):ot(u,u,u)},Ya.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),ot(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Ya.hsl=function(){return lt(this.r,this.g,this.b)},Ya.toString=function(){return"#"+ct(this.r)+ct(this.g)+ct(this.b)};var Ia=Zo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Ia.forEach(function(n,t){Ia.set(n,ut(t))}),Zo.functor=pt,Zo.xhr=dt(vt),Zo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=Zo.xhr(n,t,i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o.row(e)}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function o(t){return t.map(a).join(n)}function a(n){return c.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var c=new RegExp('["'+n+"\n]"),s=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=c)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==s)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],c=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new i,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(a).join(n)].concat(t.map(function(t){return u.map(function(n){return a(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(o).join("\n")},e},Zo.csv=Zo.dsv(",","text/csv"),Zo.tsv=Zo.dsv(" ","text/tab-separated-values");var Za,Va,Xa,$a,Ba,Wa=Wo[a(Wo,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Zo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};Va?Va.n=i:Za=i,Va=i,Xa||($a=clearTimeout($a),Xa=1,Wa(xt))},Zo.timer.flush=function(){Mt(),_t()};var Ja=".",Ga=",",Ka=[3,3],Qa="$",nc=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(bt);Zo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Zo.round(n,wt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),nc[8+e/3]},Zo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)},Zo.format=function(n){var t=tc.exec(n),e=t[1]||" ",r=t[2]||">",u=t[3]||"",i=t[4]||"",o=t[5],a=+t[6],c=t[7],s=t[8],l=t[9],f=1,h="",g=!1;switch(s&&(s=+s.substring(1)),(o||"0"===e&&"="===r)&&(o=e="0",r="=",c&&(a-=Math.floor((a-1)/4))),l){case"n":c=!0,l="g";break;case"%":f=100,h="%",l="f";break;case"p":f=100,h="%",l="r";break;case"b":case"o":case"x":case"X":"#"===i&&(i="0"+l.toLowerCase());case"c":case"d":g=!0,s=0;break;case"s":f=-1,l="r"}"#"===i?i="":"$"===i&&(i=Qa),"r"!=l||s||(l="g"),null!=s&&("g"==l?s=Math.max(1,Math.min(21,s)):("e"==l||"f"==l)&&(s=Math.max(0,Math.min(20,s)))),l=ec.get(l)||St;var p=o&&c;return function(n){if(g&&n%1)return"";var t=0>n||0===n&&0>1/n?(n=-n,"-"):u;if(0>f){var v=Zo.formatPrefix(n,s);n=v.scale(n),h=v.symbol}else n*=f;n=l(n,s);var d=n.lastIndexOf("."),m=0>d?n:n.substring(0,d),y=0>d?"":Ja+n.substring(d+1);!o&&c&&(m=rc(m));var x=i.length+m.length+y.length+(p?0:t.length),M=a>x?new Array(x=a-x+1).join(e):"";return p&&(m=rc(M+m)),t+=i,n=m+y,("<"===r?t+n+M:">"===r?M+t+n:"^"===r?M.substring(0,x>>=1)+t+n+M.substring(x):t+(p?n:M+n))+h}};var tc=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,ec=Zo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Zo.round(n,wt(n,t))).toFixed(Math.max(0,Math.min(20,wt(n*(1+1e-15),t))))}}),rc=vt;if(Ka){var uc=Ka.length;rc=function(n){for(var t=n.length,e=[],r=0,u=Ka[0];t>0&&u>0;)e.push(n.substring(t-=u,t+u)),u=Ka[r=(r+1)%uc];return e.reverse().join(Ga)}}Zo.geo={},kt.prototype={s:0,t:0,add:function(n){Et(n,this.t,ic),Et(ic.s,this.s,this),this.s?this.t+=ic.t:this.s=ic.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var ic=new kt;Zo.geo.stream=function(n,t){n&&oc.hasOwnProperty(n.type)?oc[n.type](n,t):At(n,t)};var oc={Feature:function(n,t){At(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++rn?4*ba+n:n,lc.lineStart=lc.lineEnd=lc.point=c}};Zo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=Tt([t*Aa,e*Aa]);if(m){var u=zt(m,r),i=[u[1],-u[0],0],o=zt(i,u);Pt(o),o=Ut(o);var c=t-p,s=c>0?1:-1,v=o[0]*Ca*s,d=ua(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*Ca;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*Ca;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=ua(r)>180?r+(r>0?360:-360):r}else v=n,d=e;lc.point(n,e),t(n,e)}function i(){lc.lineStart()}function o(){u(v,d),lc.lineEnd(),ua(y)>ka&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nsc?(l=-(h=180),f=-(g=90)):y>ka?g=90:-ka>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Zo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Zo.geo.centroid=function(n){fc=hc=gc=pc=vc=dc=mc=yc=xc=Mc=_c=0,Zo.geo.stream(n,bc);var t=xc,e=Mc,r=_c,u=t*t+e*e+r*r;return Ea>u&&(t=dc,e=mc,r=yc,ka>hc&&(t=gc,e=pc,r=vc),u=t*t+e*e+r*r,Ea>u)?[0/0,0/0]:[Math.atan2(e,t)*Ca,H(r/Math.sqrt(u))*Ca]};var fc,hc,gc,pc,vc,dc,mc,yc,xc,Mc,_c,bc={sphere:c,point:Ht,lineStart:Ot,lineEnd:Yt,polygonStart:function(){bc.lineStart=It},polygonEnd:function(){bc.lineStart=Ot}},wc=Bt(Zt,Qt,te,[-ba,-ba/2]),Sc=1e9;Zo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=ue(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Zo.geo.conicEqualArea=function(){return oe(ae)}).raw=ae,Zo.geo.albers=function(){return Zo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Zo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Zo.geo.albers(),o=Zo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Zo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+ka,f+.12*s+ka],[l-.214*s-ka,f+.234*s-ka]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+ka,f+.166*s+ka],[l-.115*s-ka,f+.234*s-ka]]).stream(c).point,n},n.scale(1070)};var kc,Ec,Ac,Cc,Nc,Lc,Tc={point:c,lineStart:c,lineEnd:c,polygonStart:function(){Ec=0,Tc.lineStart=ce},polygonEnd:function(){Tc.lineStart=Tc.lineEnd=Tc.point=c,kc+=ua(Ec/2)}},qc={point:se,lineStart:c,lineEnd:c,polygonStart:c,polygonEnd:c},zc={point:he,lineStart:ge,lineEnd:pe,polygonStart:function(){zc.lineStart=ve},polygonEnd:function(){zc.point=he,zc.lineStart=ge,zc.lineEnd=pe}};Zo.geo.transform=function(n){return{stream:function(t){var e=new ye(t);for(var r in n)e[r]=n[r];return e}}},ye.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Zo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Zo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return kc=0,Zo.geo.stream(n,u(Tc)),kc},n.centroid=function(n){return gc=pc=vc=dc=mc=yc=xc=Mc=_c=0,Zo.geo.stream(n,u(zc)),_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:vc?[gc/vc,pc/vc]:[0/0,0/0]},n.bounds=function(n){return Nc=Lc=-(Ac=Cc=1/0),Zo.geo.stream(n,u(qc)),[[Ac,Cc],[Nc,Lc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||xe(n):vt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new le:new de(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Zo.geo.albersUsa()).context(null)},Zo.geo.projection=Me,Zo.geo.projectionMutator=_e,(Zo.geo.equirectangular=function(){return Me(we)}).raw=we.invert=we,Zo.geo.rotation=function(n){function t(t){return t=n(t[0]*Aa,t[1]*Aa),t[0]*=Ca,t[1]*=Ca,t }return n=ke(n[0]%360*Aa,n[1]*Aa,n.length>2?n[2]*Aa:0),t.invert=function(t){return t=n.invert(t[0]*Aa,t[1]*Aa),t[0]*=Ca,t[1]*=Ca,t},t},Se.invert=we,Zo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ke(-n[0]*Aa,-n[1]*Aa,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=Ca,n[1]*=Ca}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=Ne((t=+r)*Aa,u*Aa),n):t},n.precision=function(r){return arguments.length?(e=Ne(t*Aa,(u=+r)*Aa),n):u},n.angle(90)},Zo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Aa,u=n[1]*Aa,i=t[1]*Aa,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Zo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Zo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Zo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Zo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return ua(n%d)>ka}).map(l)).concat(Zo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return ua(n%m)>ka}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=Te(a,o,90),f=qe(r,e,y),h=Te(s,c,90),g=qe(i,u,y),n):y},n.majorExtent([[-180,-90+ka],[180,90-ka]]).minorExtent([[-180,-80-ka],[180,80+ka]])},Zo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=ze,u=Re;return n.distance=function(){return Zo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Zo.geo.interpolate=function(n,t){return De(n[0]*Aa,n[1]*Aa,t[0]*Aa,t[1]*Aa)},Zo.geo.length=function(n){return Rc=0,Zo.geo.stream(n,Dc),Rc};var Rc,Dc={sphere:c,point:c,lineStart:Pe,lineEnd:c,polygonStart:c,polygonEnd:c},Pc=Ue(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Zo.geo.azimuthalEqualArea=function(){return Me(Pc)}).raw=Pc;var Uc=Ue(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},vt);(Zo.geo.azimuthalEquidistant=function(){return Me(Uc)}).raw=Uc,(Zo.geo.conicConformal=function(){return oe(je)}).raw=je,(Zo.geo.conicEquidistant=function(){return oe(He)}).raw=He;var jc=Ue(function(n){return 1/n},Math.atan);(Zo.geo.gnomonic=function(){return Me(jc)}).raw=jc,Fe.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Sa]},(Zo.geo.mercator=function(){return Oe(Fe)}).raw=Fe;var Hc=Ue(function(){return 1},Math.asin);(Zo.geo.orthographic=function(){return Me(Hc)}).raw=Hc;var Fc=Ue(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Zo.geo.stereographic=function(){return Me(Fc)}).raw=Fc,Ye.invert=function(n,t){return[Math.atan2(F(n),Math.cos(t)),H(Math.sin(t)/O(n))]},(Zo.geo.transverseMercator=function(){return Oe(Ye)}).raw=Ye,Zo.geom={},Zo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u,i,o,a,c,s,l,f,h,g,p,v=pt(e),d=pt(r),m=n.length,y=m-1,x=[],M=[],_=0;if(v===Ie&&r===Ze)t=n;else for(i=0,t=[];m>i;++i)t.push([+v.call(this,u=n[i],i),+d.call(this,u,i)]);for(i=1;m>i;++i)(t[i][1]i;++i)i!==_&&(c=t[i][1]-t[_][1],a=t[i][0]-t[_][0],x.push({angle:Math.atan2(c,a),index:i}));for(x.sort(function(n,t){return n.angle-t.angle}),g=x[0].angle,h=x[0].index,f=0,i=1;y>i;++i){if(o=x[i].index,g==x[i].angle){if(a=t[h][0]-t[_][0],c=t[h][1]-t[_][1],s=t[o][0]-t[_][0],l=t[o][1]-t[_][1],a*a+c*c>=s*s+l*l){x[i].index=-1;continue}x[f].index=-1}g=x[i].angle,f=i,h=o}for(M.push(_),i=0,o=0;2>i;++o)x[o].index>-1&&(M.push(x[o].index),i++);for(p=M.length;y>o;++o)if(!(x[o].index<0)){for(;!Ve(M[p-2],M[p-1],x[o].index,t);)--p;M[p++]=x[o].index}var b=[];for(i=p-1;i>=0;--i)b.push(n[M[i]]);return b}var e=Ie,r=Ze;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Zo.geom.polygon=function(n){return sa(n,Oc),n};var Oc=Zo.geom.polygon.prototype=[];Oc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/ka)*ka,y:Math.round(o(n,t)/ka)*ka,i:t}})}var r=Ie,u=Ze,i=r,o=u,a=Wc;return n?t(n):(t.links=function(n){return Mr(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return Mr(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(ur),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=kr()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=pt(a),M=pt(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.xm&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=kr();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){Er(n,k,v,d,m,y)},g=-1,null==t){for(;++g=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=Kc.get(e)||Gc,r=Qc.get(r)||vt,zr(r(e.apply(null,Vo.call(arguments,1))))},Zo.interpolateHcl=Xr,Zo.interpolateHsl=$r,Zo.interpolateLab=Br,Zo.interpolateRound=Wr,Zo.transform=function(n){var t=$o.createElementNS(Zo.ns.prefix.svg,"g");return(Zo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Jr(e?e.matrix:ns)})(n)},Jr.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var ns={a:1,b:0,c:0,d:1,e:0,f:0};Zo.interpolateTransform=nu,Zo.layout={},Zo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e(u-e)*a){var c=t.charge*a*a;return n.px-=i*c,n.py-=o*c,!0}if(t.point&&isFinite(a)){var c=t.pointCharge*a*a;n.px-=i*c,n.py-=o*c}}return!t.charge}}function t(n){n.px=Zo.event.x,n.py=Zo.event.y,a.resume()}var e,r,u,i,o,a={},c=Zo.dispatch("start","tick","end"),s=[1,1],l=.9,f=ts,h=es,g=-30,p=.1,v=.8,d=[],m=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,v,y,x,M,_=d.length,b=m.length;for(e=0;b>e;++e)a=m[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(v=x*x+M*M)&&(v=r*i[e]*((v=Math.sqrt(v))-u[e])/v,x*=v,M*=v,h.x-=x*(y=f.weight/(h.weight+f.weight)),h.y-=M*y,f.x+=x*(y=1-y),f.y+=M*y);if((y=r*p)&&(x=s[0]/2,M=s[1]/2,e=-1,y))for(;++e<_;)a=d[e],a.x+=(x-a.x)*y,a.y+=(M-a.y)*y;if(g)for(lu(t=Zo.geom.quadtree(d),r,o),e=-1;++e<_;)(a=d[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=d[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(d=n,a):d},a.links=function(n){return arguments.length?(m=n,a):m},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.gravity=function(n){return arguments.length?(p=+n,a):p},a.theta=function(n){return arguments.length?(v=+n,a):v},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Zo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=m[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++at;++t)(r=d[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=m[t],"number"==typeof r.source&&(r.source=d[r.source]),"number"==typeof r.target&&(r.target=d[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=d[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,m[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,m[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,d[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Zo.behavior.drag().origin(vt).on("dragstart.force",ou).on("drag.force",t).on("dragend.force",au)),arguments.length?(this.on("mouseover.force",cu).on("mouseout.force",su).call(e),void 0):e},Zo.rebind(a,c,"on")};var ts=20,es=1;Zo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++fg;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=vt,e=xu,r=Mu,u=yu,i=du,o=mu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:us.get(t)||xu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:is.get(t)||Mu,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var us=Zo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(_u),i=n.map(bu),o=Zo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Zo.range(n.length).reverse()},"default":xu}),is=Zo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:Mu});Zo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i0)for(i=-1;++i=l[0]&&a<=l[1]&&(o=c[Zo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=Eu,u=Su;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=pt(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ku(n,t)}:pt(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Zo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h0&&(Pu(Uu(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!Nu(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!Cu(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];Ru(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=Lu(l,qu),h=Lu(l,Tu),g=Lu(l,zu),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return Ru(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Zo.layout.hierarchy().sort(null).value(null),e=Au,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},fu(n,t)},Zo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,Ru(a,function(n){n.r=+l(n.value)}),Ru(a,Yu),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;Ru(a,function(n){n.r+=f}),Ru(a,Yu),Ru(a,function(n){n.r-=f})}return Vu(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Zo.layout.hierarchy().sort(ju),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},fu(n,e)},Zo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;Ru(c,function(n){var t=n.children;t&&t.length?(n.x=Bu(t),n.y=$u(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Wu(c),f=Ju(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return Ru(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Zo.layout.hierarchy().sort(null).value(null),e=Au,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},fu(n,t)},Zo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++ut?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++oe&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++ie.dx)&&(l=e.dx);++ie&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Zo.random.normal.apply(Zo,arguments);return function(){return Math.exp(n())}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t/n}}},Zo.scale={};var os={floor:vt,ceil:vt};Zo.scale.linear=function(){return ii([0,1],[0,1],Tr,!1)},Zo.scale.log=function(){return fi(Zo.scale.linear().domain([0,1]),10,!0,[1,10])};var as=Zo.format(".0e"),cs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Zo.scale.pow=function(){return hi(Zo.scale.linear(),1,[0,1])},Zo.scale.sqrt=function(){return Zo.scale.pow().exponent(.5)},Zo.scale.ordinal=function(){return pi([],{t:"range",a:[[]]})},Zo.scale.category10=function(){return Zo.scale.ordinal().range(ss)},Zo.scale.category20=function(){return Zo.scale.ordinal().range(ls)},Zo.scale.category20b=function(){return Zo.scale.ordinal().range(fs)},Zo.scale.category20c=function(){return Zo.scale.ordinal().range(hs)};var ss=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(it),ls=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(it),fs=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(it),hs=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(it);Zo.scale.quantile=function(){return vi([],[])},Zo.scale.quantize=function(){return di(0,1,[0,1])},Zo.scale.threshold=function(){return mi([.5],[0,1])},Zo.scale.identity=function(){return yi([0,1])},Zo.svg={},Zo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+gs,a=u.apply(this,arguments)+gs,c=(o>a&&(c=o,o=a,a=c),a-o),s=ba>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=ps?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=xi,e=Mi,r=_i,u=bi;return n.innerRadius=function(e){return arguments.length?(t=pt(e),n):t},n.outerRadius=function(t){return arguments.length?(e=pt(t),n):e},n.startAngle=function(t){return arguments.length?(r=pt(t),n):r},n.endAngle=function(t){return arguments.length?(u=pt(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+gs;return[Math.cos(i)*n,Math.sin(i)*n]},n};var gs=-Sa,ps=wa-ka;Zo.svg.line=function(){return wi(vt)};var vs=Zo.map({linear:Si,"linear-closed":ki,step:Ei,"step-before":Ai,"step-after":Ci,basis:Ri,"basis-open":Di,"basis-closed":Pi,bundle:Ui,cardinal:Ti,"cardinal-open":Ni,"cardinal-closed":Li,monotone:Ii});vs.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var ds=[0,2/3,1/3,0],ms=[0,1/3,2/3,0],ys=[0,1/6,2/3,1/6];Zo.svg.line.radial=function(){var n=wi(Zi);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},Ai.reverse=Ci,Ci.reverse=Ai,Zo.svg.area=function(){return Vi(vt)},Zo.svg.area.radial=function(){var n=Vi(Zi);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Zo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+gs,l=s.call(n,u,r)+gs;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>ba)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=ze,o=Re,a=Xi,c=_i,s=bi;return n.radius=function(t){return arguments.length?(a=pt(t),n):a},n.source=function(t){return arguments.length?(i=pt(t),n):i},n.target=function(t){return arguments.length?(o=pt(t),n):o},n.startAngle=function(t){return arguments.length?(c=pt(t),n):c},n.endAngle=function(t){return arguments.length?(s=pt(t),n):s},n},Zo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=ze,e=Re,r=$i;return n.source=function(e){return arguments.length?(t=pt(e),n):t},n.target=function(t){return arguments.length?(e=pt(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Zo.svg.diagonal.radial=function(){var n=Zo.svg.diagonal(),t=$i,e=n.projection;return n.projection=function(n){return arguments.length?e(Bi(t=n)):t},n},Zo.svg.symbol=function(){function n(n,r){return(xs.get(t.call(this,n,r))||Gi)(e.call(this,n,r))}var t=Ji,e=Wi;return n.type=function(e){return arguments.length?(t=pt(e),n):t},n.size=function(t){return arguments.length?(e=pt(t),n):e},n};var xs=Zo.map({circle:Gi,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*ws)),e=t*ws;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/bs),e=t*bs/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/bs),e=t*bs/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Zo.svg.symbolTypes=xs.keys();var Ms,_s,bs=Math.sqrt(3),ws=Math.tan(30*Aa),Ss=[],ks=0;Ss.call=pa.call,Ss.empty=pa.empty,Ss.node=pa.node,Ss.size=pa.size,Zo.transition=function(n){return arguments.length?Ms?n.transition():n:ma.transition()},Zo.transition.prototype=Ss,Ss.select=function(n){var t,e,r,u=this.id,i=[];n=v(n);for(var o=-1,a=this.length;++oi;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a)&&t.push(r)}return Ki(u,this.id)},Ss.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):C(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ss.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?nu:Tr,a=Zo.ns.qualify(n);return Qi(this,"attr."+n,t,a.local?i:u)},Ss.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Zo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ss.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Wo.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=Tr(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Qi(this,"style."+n,t,u)},Ss.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Wo.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ss.text=function(n){return Qi(this,"text",n,no)},Ss.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ss.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Zo.ease.apply(Zo,arguments)),C(this,function(e){e.__transition__[t].ease=n}))},Ss.delay=function(n){var t=this.id;return C(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ss.duration=function(n){var t=this.id;return C(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ss.each=function(n,t){var e=this.id;if(arguments.length<2){var r=_s,u=Ms;Ms=e,C(this,function(t,r,u){_s=t.__transition__[e],n.call(t,t.__data__,r,u)}),_s=r,Ms=u}else C(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Zo.dispatch("start","end"))).on(n,t)});return this},Ss.transition=function(){for(var n,t,e,r,u=this.id,i=++ks,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,to(e,s,i,r)),n.push(e)}return Ki(o,i)},Zo.svg.axis=function(){function n(n){n.each(function(){var n,s=Zo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):vt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",ka),d=Zo.transition(p.exit()).style("opacity",ka).remove(),m=Zo.transition(p).style("opacity",1),y=ni(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Zo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=eo,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=eo,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=ro,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=ro,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f.rangeBand()/2,A=function(n){return f(n)+E};v.call(n,A),m.call(n,A)}else v.call(n,l),m.call(n,f),d.call(n,f)})}var t,e=Zo.scale.linear(),r=Es,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in As?t+"":Es,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Es="bottom",As={top:1,right:1,bottom:1,left:1};Zo.svg.brush=function(){function n(i){i.each(function(){var i=Zo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(d,vt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return Cs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Zo.transition(i),h=Zo.transition(o);c&&(l=ni(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=ni(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+h[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",h[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",h[1]-h[0])}function u(){function u(){32==Zo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=h[1],C=2),f())}function g(){32==Zo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=h[1],C=0,f())}function d(){var n=Zo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Zo.event.altKey?(x||(x=[(l[0]+l[1])/2,(h[0]+h[1])/2]),L[0]=l[+(n[0]f?(u=r,r=f):u=f),g[0]!=r||g[1]!=u?(e?o=null:i=null,g[0]=r,g[1]=u,!0):void 0}function y(){d(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Zo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Zo.select(Zo.event.target),w=a.of(_,arguments),S=Zo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=D(),L=Zo.mouse(_),T=Zo.select(Wo).on("keydown.brush",u).on("keyup.brush",g);if(Zo.event.changedTouches?T.on("touchmove.brush",d).on("touchend.brush",y):T.on("mousemove.brush",d).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=h[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],h[1-z]-L[1]],L[0]=l[q],L[1]=h[z]}else Zo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Zo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),d()}var i,o,a=g(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],h=[0,0],p=!0,v=!0,d=Ns[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:h,i:i,j:o},e=this.__chart__||t;this.__chart__=t,Ms?Zo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,h=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=qr(l,t.x),r=qr(h,t.y);return i=o=null,function(u){l=t.x=e(u),h=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,d=Ns[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,d=Ns[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(p=!!t[0],v=!!t[1]):c?p=!!t:s&&(v=!!t),n):c&&s?[p,v]:c?p:s?v:null},n.extent=function(t){var e,r,u,a,f;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(f=e,e=r,r=f),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(f=u,u=a,a=f),(u!=h[0]||a!=h[1])&&(h=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(f=e,e=r,r=f))),s&&(o?(u=o[0],a=o[1]):(u=h[0],a=h[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(f=u,u=a,a=f))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],h=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&h[0]==h[1]},Zo.rebind(n,a,"on")};var Cs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Ns=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ls=Zo.time={},Ts=Date,qs=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"];uo.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){zs.setUTCDate.apply(this._,arguments)},setDay:function(){zs.setUTCDay.apply(this._,arguments)},setFullYear:function(){zs.setUTCFullYear.apply(this._,arguments)},setHours:function(){zs.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){zs.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){zs.setUTCMinutes.apply(this._,arguments)},setMonth:function(){zs.setUTCMonth.apply(this._,arguments)},setSeconds:function(){zs.setUTCSeconds.apply(this._,arguments)},setTime:function(){zs.setTime.apply(this._,arguments)}};var zs=Date.prototype,Rs="%a %b %e %X %Y",Ds="%m/%d/%Y",Ps="%H:%M:%S",Us=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],js=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],Hs=["January","February","March","April","May","June","July","August","September","October","November","December"],Fs=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];Ls.year=io(function(n){return n=Ls.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),Ls.years=Ls.year.range,Ls.years.utc=Ls.year.utc.range,Ls.day=io(function(n){var t=new Ts(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),Ls.days=Ls.day.range,Ls.days.utc=Ls.day.utc.range,Ls.dayOfYear=function(n){var t=Ls.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},qs.forEach(function(n,t){n=n.toLowerCase(),t=7-t;var e=Ls[n]=io(function(n){return(n=Ls.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=Ls.year(n).getDay();return Math.floor((Ls.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});Ls[n+"s"]=e.range,Ls[n+"s"].utc=e.utc.range,Ls[n+"OfYear"]=function(n){var e=Ls.year(n).getDay();return Math.floor((Ls.dayOfYear(n)+(e+t)%7)/7)}}),Ls.week=Ls.sunday,Ls.weeks=Ls.sunday.range,Ls.weeks.utc=Ls.sunday.utc.range,Ls.weekOfYear=Ls.sundayOfYear,Ls.format=ao;var Os=so(Us),Ys=lo(Us),Is=so(js),Zs=lo(js),Vs=so(Hs),Xs=lo(Hs),$s=so(Fs),Bs=lo(Fs),Ws=/^%/,Js={"-":"",_:" ",0:"0"},Gs={a:function(n){return js[n.getDay()]},A:function(n){return Us[n.getDay()]},b:function(n){return Fs[n.getMonth()]},B:function(n){return Hs[n.getMonth()]},c:ao(Rs),d:function(n,t){return fo(n.getDate(),t,2)},e:function(n,t){return fo(n.getDate(),t,2)},H:function(n,t){return fo(n.getHours(),t,2)},I:function(n,t){return fo(n.getHours()%12||12,t,2)},j:function(n,t){return fo(1+Ls.dayOfYear(n),t,3)},L:function(n,t){return fo(n.getMilliseconds(),t,3)},m:function(n,t){return fo(n.getMonth()+1,t,2)},M:function(n,t){return fo(n.getMinutes(),t,2)},p:function(n){return n.getHours()>=12?"PM":"AM"},S:function(n,t){return fo(n.getSeconds(),t,2)},U:function(n,t){return fo(Ls.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return fo(Ls.mondayOfYear(n),t,2)},x:ao(Ds),X:ao(Ps),y:function(n,t){return fo(n.getFullYear()%100,t,2)},Y:function(n,t){return fo(n.getFullYear()%1e4,t,4)},Z:Do,"%":function(){return"%"}},Ks={a:ho,A:go,b:yo,B:xo,c:Mo,d:Co,e:Co,H:Lo,I:Lo,j:No,L:zo,m:Ao,M:To,p:Ro,S:qo,U:vo,w:po,W:mo,x:_o,X:bo,y:So,Y:wo,Z:ko,"%":Po},Qs=/^\s*\d+/,nl=Zo.map({am:0,pm:1});ao.utc=Uo;var tl=Uo("%Y-%m-%dT%H:%M:%S.%LZ");ao.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?jo:tl,jo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},jo.toString=tl.toString,Ls.second=io(function(n){return new Ts(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),Ls.seconds=Ls.second.range,Ls.seconds.utc=Ls.second.utc.range,Ls.minute=io(function(n){return new Ts(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),Ls.minutes=Ls.minute.range,Ls.minutes.utc=Ls.minute.utc.range,Ls.hour=io(function(n){var t=n.getTimezoneOffset()/60;return new Ts(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),Ls.hours=Ls.hour.range,Ls.hours.utc=Ls.hour.utc.range,Ls.month=io(function(n){return n=Ls.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),Ls.months=Ls.month.range,Ls.months.utc=Ls.month.utc.range;var el=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],rl=[[Ls.second,1],[Ls.second,5],[Ls.second,15],[Ls.second,30],[Ls.minute,1],[Ls.minute,5],[Ls.minute,15],[Ls.minute,30],[Ls.hour,1],[Ls.hour,3],[Ls.hour,6],[Ls.hour,12],[Ls.day,1],[Ls.day,2],[Ls.week,1],[Ls.month,1],[Ls.month,3],[Ls.year,1]],ul=[[ao("%Y"),Zt],[ao("%B"),function(n){return n.getMonth()}],[ao("%b %d"),function(n){return 1!=n.getDate()}],[ao("%a %d"),function(n){return n.getDay()&&1!=n.getDate()}],[ao("%I %p"),function(n){return n.getHours()}],[ao("%I:%M"),function(n){return n.getMinutes()}],[ao(":%S"),function(n){return n.getSeconds()}],[ao(".%L"),function(n){return n.getMilliseconds()}]],il=Oo(ul);rl.year=Ls.year,Ls.scale=function(){return Ho(Zo.scale.linear(),rl,il)};var ol={range:function(n,t,e){return Zo.range(+n,+t,e).map(Fo)}},al=rl.map(function(n){return[n[0].utc,n[1]]}),cl=[[Uo("%Y"),Zt],[Uo("%B"),function(n){return n.getUTCMonth()}],[Uo("%b %d"),function(n){return 1!=n.getUTCDate()}],[Uo("%a %d"),function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],[Uo("%I %p"),function(n){return n.getUTCHours()}],[Uo("%I:%M"),function(n){return n.getUTCMinutes()}],[Uo(":%S"),function(n){return n.getUTCSeconds()}],[Uo(".%L"),function(n){return n.getUTCMilliseconds()}]],sl=Oo(cl);return al.year=Ls.year.utc,Ls.scale.utc=function(){return Ho(Zo.scale.linear(),al,sl)},Zo.text=dt(function(n){return n.responseText}),Zo.json=function(n,t){return mt(n,"application/json",Yo,t)},Zo.html=function(n,t){return mt(n,"text/html",Io,t)},Zo.xml=dt(function(n){return n.responseXML}),Zo}();nipype-0.9.2/nipype/external/portalocker.py000066400000000000000000000072211227300005300210450ustar00rootroot00000000000000# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. # Requires python 1.5.2 or better. '''Cross-platform (posix/nt) API for flock-style file locking. Synopsis: import portalocker file = open('somefile', 'r+') portalocker.lock(file, portalocker.LOCK_EX) file.seek(12) file.write('foo') file.close() If you know what you're doing, you may choose to portalocker.unlock(file) before closing the file, but why? Methods: lock( file, flags ) unlock( file ) Constants: LOCK_EX LOCK_SH LOCK_NB Exceptions: LockException Notes: For the 'nt' platform, this module requires the Python Extensions for Windows. Be aware that this may not work as expected on Windows 95/98/ME. History: I learned the win32 technique for locking files from sample code provided by John Nielsen in the documentation that accompanies the win32 modules. Author: Jonathan Feinberg , Lowell Alleman Version: $Id: portalocker.py 5474 2008-05-16 20:53:50Z lowell $ ''' __all__ = [ 'lock', 'unlock', 'LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'LockException', ] import os class LockException(Exception): # Error codes: LOCK_FAILED = 1 if os.name == 'nt': import win32con import win32file import pywintypes LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK LOCK_SH = 0 # the default LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # is there any reason not to reuse the following structure? __overlapped = pywintypes.OVERLAPPED() elif os.name == 'posix': import fcntl LOCK_EX = fcntl.LOCK_EX LOCK_SH = fcntl.LOCK_SH LOCK_NB = fcntl.LOCK_NB else: raise RuntimeError, 'PortaLocker only defined for nt and posix platforms' if os.name == 'nt': def lock(file, flags): hfile = win32file._get_osfhandle(file.fileno()) try: win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped) except pywintypes.error, exc_value: # error: (33, 'LockFileEx', 'The process cannot access the file because another process has locked a portion of the file.') if exc_value[0] == 33: raise LockException(LockException.LOCK_FAILED, exc_value[2]) else: # Q: Are there exceptions/codes we should be dealing with here? raise def unlock(file): hfile = win32file._get_osfhandle(file.fileno()) try: win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped) except pywintypes.error, exc_value: if exc_value[0] == 158: # error: (158, 'UnlockFileEx', 'The segment is already unlocked.') # To match the 'posix' implementation, silently ignore this error pass else: # Q: Are there exceptions/codes we should be dealing with here? raise elif os.name == 'posix': def lock(file, flags): try: fcntl.flock(file.fileno(), flags) except IOError, exc_value: # The exception code varies on different systems so we'll catch # every IO error raise LockException(*exc_value) def unlock(file): fcntl.flock(file.fileno(), fcntl.LOCK_UN) if __name__ == '__main__': from time import time, strftime, localtime import sys import portalocker log = open('log.txt', 'a+') portalocker.lock(log, portalocker.LOCK_EX) timestamp = strftime('%m/%d/%Y %H:%M:%S\n', localtime(time())) log.write( timestamp ) print 'Wrote lines. Hit enter to release lock.' dummy = sys.stdin.readline() log.close() nipype-0.9.2/nipype/external/provcopy.py000066400000000000000000002277271227300005300204200ustar00rootroot00000000000000'''Python implemetation of the W3C Provenance Data Model (PROV-DM) Support for PROV-JSON import/export References: PROV-DM: http://www.w3.org/TR/prov-dm/ @author: Trung Dong Huynh @copyright: University of Southampton 2013 ''' import logging import datetime import json import re import dateutil.parser import collections from collections import defaultdict try: from rdflib.term import URIRef, BNode from rdflib.term import Literal as RDFLiteral from rdflib.graph import ConjunctiveGraph, Graph from rdflib.namespace import RDF, RDFS except ImportError: pass from copy import deepcopy, copy try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict logger = logging.getLogger(__name__) # # PROV record constants - PROV-DM LC # C1. Entities/Activities PROV_REC_ENTITY = 1 PROV_REC_ACTIVITY = 2 PROV_REC_GENERATION = 11 PROV_REC_USAGE = 12 PROV_REC_COMMUNICATION = 13 PROV_REC_START = 14 PROV_REC_END = 15 PROV_REC_INVALIDATION = 16 # C2. Derivations PROV_REC_DERIVATION = 21 # C3. Agents/Responsibility PROV_REC_AGENT = 3 PROV_REC_ATTRIBUTION = 31 PROV_REC_ASSOCIATION = 32 PROV_REC_DELEGATION = 33 PROV_REC_INFLUENCE = 34 # C4. Bundles PROV_REC_BUNDLE = 4 # This is the lowest value, so bundle(s) in JSON will be decoded first # C5. Alternate PROV_REC_ALTERNATE = 51 PROV_REC_SPECIALIZATION = 52 PROV_REC_MENTION = 53 # C6. Collections PROV_REC_MEMBERSHIP = 61 PROV_RECORD_TYPES = ( (PROV_REC_ENTITY, u'Entity'), (PROV_REC_ACTIVITY, u'Activity'), (PROV_REC_GENERATION, u'Generation'), (PROV_REC_USAGE, u'Usage'), (PROV_REC_COMMUNICATION, u'Communication'), (PROV_REC_START, u'Start'), (PROV_REC_END, u'End'), (PROV_REC_INVALIDATION, u'Invalidation'), (PROV_REC_DERIVATION, u'Derivation'), (PROV_REC_AGENT, u'Agent'), (PROV_REC_ATTRIBUTION, u'Attribution'), (PROV_REC_ASSOCIATION, u'Association'), (PROV_REC_DELEGATION, u'Delegation'), (PROV_REC_INFLUENCE, u'Influence'), (PROV_REC_BUNDLE, u'Bundle'), (PROV_REC_ALTERNATE, u'Alternate'), (PROV_REC_SPECIALIZATION, u'Specialization'), (PROV_REC_MENTION, u'Mention'), (PROV_REC_MEMBERSHIP, u'Membership'), ) PROV_N_MAP = { PROV_REC_ENTITY: u'entity', PROV_REC_ACTIVITY: u'activity', PROV_REC_GENERATION: u'wasGeneratedBy', PROV_REC_USAGE: u'used', PROV_REC_COMMUNICATION: u'wasInformedBy', PROV_REC_START: u'wasStartedBy', PROV_REC_END: u'wasEndedBy', PROV_REC_INVALIDATION: u'wasInvalidatedBy', PROV_REC_DERIVATION: u'wasDerivedFrom', PROV_REC_AGENT: u'agent', PROV_REC_ATTRIBUTION: u'wasAttributedTo', PROV_REC_ASSOCIATION: u'wasAssociatedWith', PROV_REC_DELEGATION: u'actedOnBehalfOf', PROV_REC_INFLUENCE: u'wasInfluencedBy', PROV_REC_ALTERNATE: u'alternateOf', PROV_REC_SPECIALIZATION: u'specializationOf', PROV_REC_MENTION: u'mentionOf', PROV_REC_MEMBERSHIP: u'hadMember', PROV_REC_BUNDLE: u'bundle', } # # Identifiers for PROV's attributes PROV_ATTR_ENTITY = 1 PROV_ATTR_ACTIVITY = 2 PROV_ATTR_TRIGGER = 3 PROV_ATTR_INFORMED = 4 PROV_ATTR_INFORMANT = 5 PROV_ATTR_STARTER = 6 PROV_ATTR_ENDER = 7 PROV_ATTR_AGENT = 8 PROV_ATTR_PLAN = 9 PROV_ATTR_DELEGATE = 10 PROV_ATTR_RESPONSIBLE = 11 PROV_ATTR_GENERATED_ENTITY = 12 PROV_ATTR_USED_ENTITY = 13 PROV_ATTR_GENERATION = 14 PROV_ATTR_USAGE = 15 PROV_ATTR_SPECIFIC_ENTITY = 16 PROV_ATTR_GENERAL_ENTITY = 17 PROV_ATTR_ALTERNATE1 = 18 PROV_ATTR_ALTERNATE2 = 19 PROV_ATTR_BUNDLE = 20 PROV_ATTR_INFLUENCEE = 21 PROV_ATTR_INFLUENCER = 22 PROV_ATTR_COLLECTION = 23 # Literal properties PROV_ATTR_TIME = 100 PROV_ATTR_STARTTIME = 101 PROV_ATTR_ENDTIME = 102 PROV_RECORD_ATTRIBUTES = ( # Relations properties (PROV_ATTR_ENTITY, u'prov:entity'), (PROV_ATTR_ACTIVITY, u'prov:activity'), (PROV_ATTR_TRIGGER, u'prov:trigger'), (PROV_ATTR_INFORMED, u'prov:informed'), (PROV_ATTR_INFORMANT, u'prov:informant'), (PROV_ATTR_STARTER, u'prov:starter'), (PROV_ATTR_ENDER, u'prov:ender'), (PROV_ATTR_AGENT, u'prov:agent'), (PROV_ATTR_PLAN, u'prov:plan'), (PROV_ATTR_DELEGATE, u'prov:delegate'), (PROV_ATTR_RESPONSIBLE, u'prov:responsible'), (PROV_ATTR_GENERATED_ENTITY, u'prov:generatedEntity'), (PROV_ATTR_USED_ENTITY, u'prov:usedEntity'), (PROV_ATTR_GENERATION, u'prov:generation'), (PROV_ATTR_USAGE, u'prov:usage'), (PROV_ATTR_SPECIFIC_ENTITY, u'prov:specificEntity'), (PROV_ATTR_GENERAL_ENTITY, u'prov:generalEntity'), (PROV_ATTR_ALTERNATE1, u'prov:alternate1'), (PROV_ATTR_ALTERNATE2, u'prov:alternate2'), (PROV_ATTR_BUNDLE, u'prov:bundle'), (PROV_ATTR_INFLUENCEE, u'prov:influencee'), (PROV_ATTR_INFLUENCER, u'prov:influencer'), (PROV_ATTR_COLLECTION, u'prov:collection'), # Literal properties (PROV_ATTR_TIME, u'prov:time'), (PROV_ATTR_STARTTIME, u'prov:startTime'), (PROV_ATTR_ENDTIME, u'prov:endTime'), ) PROV_ATTRIBUTE_LITERALS = set([PROV_ATTR_TIME, PROV_ATTR_STARTTIME, PROV_ATTR_ENDTIME]) PROV_RECORD_IDS_MAP = dict((PROV_N_MAP[rec_type_id], rec_type_id) for rec_type_id in PROV_N_MAP) PROV_ID_ATTRIBUTES_MAP = dict((prov_id, attribute) for (prov_id, attribute) in PROV_RECORD_ATTRIBUTES) PROV_ATTRIBUTES_ID_MAP = dict((attribute, prov_id) for (prov_id, attribute) in PROV_RECORD_ATTRIBUTES) # Converting an attribute to the normal form for comparison purposes _normalise_attributes = lambda attr: (unicode(attr[0]), unicode(attr[1])) # Datatypes attr2rdf = lambda attr: PROV[PROV_ID_ATTRIBUTES_MAP[attr].split('prov:')[1]].rdf_representation() def _parse_xsd_dateTime(s): return dateutil.parser.parse(s) def _ensure_datetime(time): if isinstance(time, basestring): return _parse_xsd_dateTime(time) else: return time def parse_xsd_dateTime(s): try: return _parse_xsd_dateTime(s) except ValueError: pass return None DATATYPE_PARSERS = { datetime.datetime: parse_xsd_dateTime, } def parse_datatype(value, datatype): if datatype in DATATYPE_PARSERS: # found the required parser return DATATYPE_PARSERS[datatype](value) else: # No parser found for the given data type raise Exception(u'No parser found for the data type <%s>' % unicode(datatype)) # Mappings for XSD datatypes to Python standard types XSD_DATATYPE_PARSERS = { u"xsd:string": unicode, u"xsd:double": float, u"xsd:long": long, u"xsd:int": int, u"xsd:boolean": bool, u"xsd:dateTime": parse_xsd_dateTime, } def parse_xsd_types(value, datatype): # if the datatype is a QName, convert it to a Unicode string datatype = unicode(datatype) return XSD_DATATYPE_PARSERS[datatype](value) if datatype in XSD_DATATYPE_PARSERS else None def _ensure_multiline_string_triple_quoted(s): format_str = u'"""%s"""' if isinstance(s, basestring) and '\n' in s else u'"%s"' return format_str % s def encoding_PROV_N_value(value): if isinstance(value, basestring): return _ensure_multiline_string_triple_quoted(value) elif isinstance(value, datetime.datetime): return value.isoformat() elif isinstance(value, float): return u'"%f" %%%% xsd:float' % value else: return unicode(value) class AnonymousIDGenerator(): def __init__(self): self._cache = {} self._count = 0 def get_anon_id(self, obj, local_prefix="id"): if obj not in self._cache: self._count += 1 self._cache[obj] = Identifier('_:%s%d' % (local_prefix, self._count)) return self._cache[obj] class Literal(object): def __init__(self, value, datatype=None, langtag=None): self._value = value self._datatype = datatype self._langtag = langtag def __unicode__(self): return self.provn_representation() def __str__(self): return unicode(self).encode('utf-8') def __eq__(self, other): return self._value == other._value and self._datatype == other._datatype and self._langtag == other._langtag if isinstance(other, Literal) else False def __hash__(self): return hash((self._value, self._datatype, self._langtag)) def get_value(self): return self._value def get_datatype(self): return self._datatype def get_langtag(self): return self._langtag def has_no_langtag(self): return self._langtag is None def provn_representation(self): if self._langtag: # a langtag can only goes with string return u'%s@%s' % (_ensure_multiline_string_triple_quoted(self._value), unicode(self._langtag)) else: return u'%s %%%% %s' % (_ensure_multiline_string_triple_quoted(self._value), unicode(self._datatype)) def json_representation(self): if self._langtag: # a langtag can only goes with string return {'$': unicode(self._value), 'lang': self._langtag} else: if isinstance(self._datatype, QName): return {'$': unicode(self._value), 'type': unicode(self._datatype)} else: # Assuming it is a valid identifier return {'$': unicode(self._value), 'type': self._datatype.get_uri()} def rdf_representation(self): if self._langtag: # a langtag can only goes with string return RDFLiteral(self._value, lang=str(self._langtag)) else: return RDFLiteral(self._value, datatype=self._datatype.get_uri()) class Identifier(object): def __init__(self, uri): self._uri = unicode(uri) # Ensure this is a unicode string def get_uri(self): return self._uri def __unicode__(self): return self._uri def __str__(self): return unicode(self).encode('utf-8') def __eq__(self, other): return self.get_uri() == other.get_uri() if isinstance(other, Identifier) else False def __hash__(self): return hash(self.get_uri()) def provn_representation(self): return u'"%s" %%%% xsd:anyURI' % self._uri def json_representation(self): return {'$': self._uri, 'type': u'xsd:anyURI'} def rdf_representation(self): return URIRef(self.get_uri()) class QName(Identifier): def __init__(self, namespace, localpart): self._namespace = namespace self._localpart = localpart self._str = u':'.join([namespace._prefix, localpart]) if namespace._prefix else localpart def get_namespace(self): return self._namespace def get_localpart(self): return self._localpart def get_uri(self): return u''.join([self._namespace._uri, self._localpart]) def __unicode__(self): return self._str def __str__(self): return unicode(self).encode('utf-8') def provn_representation(self): return u"'%s'" % self._str def json_representation(self): return {'$': self._str, 'type': u'xsd:QName'} class Namespace(object): def __init__(self, prefix, uri): self._prefix = prefix self._uri = uri self._cache = dict() def get_prefix(self): return self._prefix def get_uri(self): return self._uri def contains(self, identifier): uri = identifier if isinstance(identifier, (str, unicode)) else (identifier.get_uri() if isinstance(identifier, Identifier) else None) return uri.startswith(self._uri) if uri else False def qname(self, identifier): uri = identifier if isinstance(identifier, (str, unicode)) else (identifier.get_uri() if isinstance(identifier, Identifier) else None) if uri and uri.startswith(self._uri): return QName(self, uri[len(self._uri):]) else: return None def __eq__(self, other): return (self._uri == other._uri and self._prefix == other._prefix) if isinstance(other, Namespace) else False def __hash__(self): return hash((self._uri, self._prefix)) def __getitem__(self, localpart): if localpart in self._cache: return self._cache[localpart] else: qname = QName(self, localpart) self._cache[localpart] = qname return qname XSD = Namespace("xsd", 'http://www.w3.org/2001/XMLSchema-datatypes#') PROV = Namespace("prov", 'http://www.w3.org/ns/prov#') # Exceptions class ProvException(Exception): """Base class for exceptions in this module.""" pass class ProvExceptionMissingRequiredAttribute(ProvException): def __init__(self, record_type, attribute_id): self.record_type = record_type self.attribute_id = attribute_id self.args += (PROV_N_MAP[record_type], attribute_id) class ProvExceptionNotValidAttribute(ProvException): def __init__(self, record_type, attribute, attribute_types): self.record_type = record_type self.attribute = attribute self.attribute_types = attribute_types self.args += (PROV_N_MAP[record_type], unicode(attribute), attribute_types) class ProvExceptionCannotUnifyAttribute(ProvException): def __init__(self, identifier, record_type1, record_type2): self.identifier = identifier self.record_type1 = record_type1 self.record_type2 = record_type2 self.args += (identifier, PROV_N_MAP[record_type1], PROV_N_MAP[record_type2]) class ProvExceptionContraint(ProvException): def __init__(self, record_type, attribute1, attribute2, msg): self.record_type = record_type self.attribute1 = attribute1 self.attribute2 = attribute2 self.args += (PROV_N_MAP[record_type], attribute1, attribute2, msg) self.msg = msg # PROV records class ProvRecord(object): """Base class for PROV _records.""" def __init__(self, bundle, identifier, attributes=None, other_attributes=None, asserted=True, allowed_types=None, infered_for=None): self._bundle = bundle self._identifier = identifier self._asserted = asserted self._attributes = None self._extra_attributes = None if attributes or other_attributes: self.add_attributes(attributes, other_attributes) if not asserted: self._allowed_types = allowed_types self._infered_for = infered_for def get_type(self): pass def get_allowed_types(self): if self._asserted: return [self.__class__] else: return [self.__class__] + list(self._allowed_types) def get_prov_type(self): pass def get_asserted_types(self): if self._extra_attributes: prov_type = PROV['type'] return set([value for attr, value in self._extra_attributes if attr == prov_type]) return set() def add_asserted_type(self, type_identifier): asserted_types = self.get_asserted_types() if type_identifier not in asserted_types: if self._extra_attributes is None: self._extra_attributes = set() self._extra_attributes.add((PROV['type'], type_identifier)) def get_attribute(self, attr_name): attr_name = self._bundle.valid_identifier(attr_name) if not self._extra_attributes: return [] results = [value for attr, value in self._extra_attributes if attr == attr_name] return results def get_identifier(self): return self._identifier def get_label(self): label = None if self._extra_attributes: for attribute in self._extra_attributes: if attribute[0]: if attribute[0] == PROV['label']: label = attribute[1] # use the first label found break return label if label else self._identifier def get_value(self): return self.get_attribute(PROV['value']) def _auto_literal_conversion(self, literal): '''This method normalise datatype for literals ''' if isinstance(literal, basestring): # try if this is a QName qname = self._bundle.valid_identifier(literal) if isinstance(qname, QName): return qname # if not a QName, convert all strings to unicode return unicode(literal) if isinstance(literal, Literal) and literal.has_no_langtag(): # try convert generic Literal object to Python standard type if possible # this is to match JSON decoding's literal conversion value = parse_xsd_types(literal.get_value(), literal.get_datatype()) if value is not None: return value # No conversion here, return the original value return literal def parse_extra_attributes(self, extra_attributes): if isinstance(extra_attributes, dict): # Converting the dictionary into a list of tuples (i.e. attribute-value pairs) extra_attributes = extra_attributes.items() attr_set = set((self._bundle.valid_identifier(attribute), self._auto_literal_conversion(value)) for attribute, value in extra_attributes) return attr_set def add_extra_attributes(self, extra_attributes): if extra_attributes: if self._extra_attributes is None: self._extra_attributes = set() # Check attributes for valid qualified names attr_set = self.parse_extra_attributes(extra_attributes) self._extra_attributes.update(attr_set) def add_attributes(self, attributes, extra_attributes): if attributes: if self._attributes is None: self._attributes = attributes else: self._attributes.update(dict((k, v) for k, v in attributes.iteritems() if v is not None)) self.add_extra_attributes(extra_attributes) def get_attributes(self): return (self._attributes, self._extra_attributes) def get_bundle(self): return self._bundle def _parse_identifier(self, value): try: return value.get_identifier() except: return self._bundle.valid_identifier(value) def _parse_record(self, attribute, attribute_types): # check to see if there is an existing record matching the attribute (as the record's identifier) existing_record = self._bundle.get_record(attribute) if existing_record is None: # try to see if there is a bundle with the id existing_record = self._bundle.get_bundle(attribute) if existing_record and isinstance(existing_record, attribute_types): return existing_record else: if hasattr(attribute_types, '__getitem__'): # it is a list klass = attribute_types[0] # get the first class else: klass = attribute_types # only one class provided attribute_types = [attribute_types] if issubclass(klass, ProvRecord): # Create an inferred record for the id given: return self._bundle.add_inferred_record(klass, attribute, self, attribute_types) return None def _parse_attribute(self, attribute, attribute_types): if attribute_types is Identifier: if isinstance(attribute, ProvRecord): # This is a record, return its identifier (if any) return attribute.get_identifier() # Otherwise, trying to parse the attribute as an identifier return self._parse_identifier(attribute) # putting all the types in to a tuple: if not isinstance(attribute_types, collections.Iterable): attribute_types = (attribute_types,) # attempt to find an existing record having the same identifier if any(map(lambda x: issubclass(x, ProvRecord), attribute_types)): record = self._parse_record(attribute, attribute_types) if record: return record # Try to parse it with known datatype parsers for datatype in attribute_types: data = parse_datatype(attribute, datatype) if data is not None: return data return None def _validate_attribute(self, attribute, attribute_types): if isinstance(attribute, attribute_types): # The attribute is of a required type # Return it if isinstance(attribute, ProvRecord) and attribute._identifier in self._bundle._id_map: return self._bundle._id_map[attribute._identifier] else: return attribute else: # The attribute is not of a valid type if isinstance(attribute, ProvRecord): # It is definitely not valid since no further parsing is possible raise ProvExceptionNotValidAttribute(self.get_type(), attribute, attribute_types) # Attempt to parse it parsed_value = self._parse_attribute(attribute, attribute_types) if parsed_value is None: raise ProvExceptionNotValidAttribute(self.get_type(), attribute, attribute_types) return parsed_value def required_attribute(self, attributes, attribute_id, attribute_types): if attribute_id not in attributes: # Raise an exception about the missing attribute raise ProvExceptionMissingRequiredAttribute(self.get_type(), attribute_id) # Found the required attribute attribute = attributes.get(attribute_id) return self._validate_attribute(attribute, attribute_types) def optional_attribute(self, attributes, attribute_id, attribute_types): if not attributes or attribute_id not in attributes: # Because this is optional, return nothing return None # Found the optional attribute attribute = attributes.get(attribute_id) if attribute is None: return None # Validate its type return self._validate_attribute(attribute, attribute_types) def __eq__(self, other): if self.__class__ != other.__class__: return False if self._identifier and not (self._identifier == other._identifier): return False if self._asserted != other._asserted: return False if self._attributes and other._attributes: if len(self._attributes) != len(other._attributes): return False for attr, value_a in self._attributes.items(): value_b = other._attributes[attr] if isinstance(value_a, ProvRecord) and value_a._identifier: if not (value_a._identifier == value_b._identifier): return False elif not (value_a == value_b): return False elif other._attributes and not self._attributes: other_attrs = [(key, value) for key, value in other._attributes.items() if value is not None] if other_attrs: # the other's attributes set is not empty. return False elif self._attributes and not other._attributes: my_attrs = [(key, value) for key, value in self._attributes.items() if value is not None] if my_attrs: # my attributes set is not empty. return False sattr = sorted(self._extra_attributes, key=_normalise_attributes) if self._extra_attributes else None oattr = sorted(other._extra_attributes, key=_normalise_attributes) if other._extra_attributes else None if sattr != oattr: return False return True def __unicode__(self): return self.get_provn() def __str__(self): return unicode(self).encode('utf-8') def get_provn(self, _indent_level=0): items = [] if self._identifier: items.append(unicode(self._identifier)) if self._attributes: for (attr, value) in self._attributes.items(): if value is None: items.append(u'-') else: if isinstance(value, ProvRecord): record_id = value.get_identifier() items.append(unicode(record_id)) else: # Assuming this is a datetime or QName value items.append(value.isoformat() if isinstance(value, datetime.datetime) else unicode(value)) if self._extra_attributes: extra = [] for (attr, value) in self._extra_attributes: try: # try if there is a prov-n representation defined provn_represenation = value.provn_representation() except: provn_represenation = encoding_PROV_N_value(value) extra.append(u'%s=%s' % (unicode(attr), provn_represenation)) if extra: items.append(u'[%s]' % u', '.join(extra)) prov_n = u'%s(%s)' % (PROV_N_MAP[self.get_type()], u', '.join(items)) return prov_n if self._asserted else u'// ' + prov_n def rdf(self, graph=None, subj=None): if graph is None: graph = Graph() if subj is None: # this method need a subject as relations may not have identifiers return graph if self._attributes: for (attr, value) in self._attributes.items(): if value is None: continue pred = attr2rdf(attr) try: # try if there is a RDF representation defined obj = value.rdf_representation() except: obj = RDFLiteral(value) graph.add((subj, pred, obj)) if self._extra_attributes: for (attr, value) in self._extra_attributes: if attr == PROV['type']: pred = RDF.type elif attr == PROV['label']: pred = RDFS.label else: pred = attr.rdf_representation() try: # try if there is a RDF representation defined obj = value.rdf_representation() except Exception, e: obj = RDFLiteral(value) graph.add((subj, pred, obj)) return graph def is_asserted(self): return self._asserted def is_element(self): return False def is_relation(self): return False # Abstract classes for elements and relations class ProvElement(ProvRecord): def is_element(self): return True def rdf(self, graph=None): if graph is None: graph = Graph() uri = self.get_identifier().rdf_representation() type_uri = self.get_prov_type().rdf_representation() graph.add((uri, RDF.type, type_uri)) ProvRecord.rdf(self, graph, uri) return graph class ProvRelation(ProvRecord): def is_relation(self): return True def rdf(self, graph=None): if graph is None: graph = Graph() pred = PROV[PROV_N_MAP[self.get_type()]].rdf_representation() items = [] subj=None obj=None for idx, (attr, value) in enumerate(self._attributes.items()): if idx == 0: subj = value.get_identifier().rdf_representation() elif idx == 1: if value: obj = value.get_identifier().rdf_representation() items.append((attr2rdf(attr), obj)) elif value: try: # try if there is a RDF representation defined otherobj = value.rdf_representation() except: otherobj = RDFLiteral(value) items.append((attr2rdf(attr), otherobj)) if subj and obj: graph.add((subj, pred, obj)) if self._extra_attributes: for (attr, value) in self._extra_attributes: if not value: continue if attr == PROV['type']: pred = RDF.type elif attr == PROV['label']: pred = RDFS.label else: pred = attr.rdf_representation() try: # try if there is a RDF representation defined otherobj = value.rdf_representation() except: otherobj = RDFLiteral(value) items.append((pred, otherobj)) if obj and len(items) == 1: items = [] if items: QRole = PROV['qualified' + str(self.get_prov_type()).split('prov:')[1]].rdf_representation() bnode = BNode() graph.add((subj, QRole, bnode)) graph.add((bnode, RDF.type, self.get_prov_type().rdf_representation())) for attr, value in items: graph.add((bnode, attr, value)) return graph # ## Component 1: Entities and Activities class ProvEntity(ProvElement): def get_type(self): return PROV_REC_ENTITY def get_prov_type(self): return PROV['Entity'] class ProvActivity(ProvElement): def get_type(self): return PROV_REC_ACTIVITY def get_prov_type(self): return PROV['Activity'] def add_attributes(self, attributes, extra_attributes): startTime = self.optional_attribute(attributes, PROV_ATTR_STARTTIME, datetime.datetime) endTime = self.optional_attribute(attributes, PROV_ATTR_ENDTIME, datetime.datetime) if startTime and endTime and startTime > endTime: # TODO Raise logic exception here pass attributes = OrderedDict() attributes[PROV_ATTR_STARTTIME] = startTime attributes[PROV_ATTR_ENDTIME] = endTime ProvElement.add_attributes(self, attributes, extra_attributes) # Convenient methods def set_time(self, startTime=None, endTime=None): # The _attributes dict should have been initialised if startTime is not None: self._attributes[PROV_ATTR_STARTTIME] = startTime if endTime is not None: self._attributes[PROV_ATTR_ENDTIME] = endTime def get_startTime(self): return self._attributes[PROV_ATTR_STARTTIME] def get_endTime(self): return self._attributes[PROV_ATTR_ENDTIME] class ProvGeneration(ProvRelation): def get_type(self): return PROV_REC_GENERATION def get_prov_type(self): return PROV['Generation'] def add_attributes(self, attributes, extra_attributes): # Required attributes entity = self.required_attribute(attributes, PROV_ATTR_ENTITY, (ProvEntity, ProvAgent)) # Optional attributes activity = self.optional_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) time = self.optional_attribute(attributes, PROV_ATTR_TIME, datetime.datetime) attributes = OrderedDict() attributes[PROV_ATTR_ENTITY] = entity attributes[PROV_ATTR_ACTIVITY] = activity attributes[PROV_ATTR_TIME] = time ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvUsage(ProvRelation): def get_type(self): return PROV_REC_USAGE def get_prov_type(self): return PROV['Usage'] def add_attributes(self, attributes, extra_attributes): # Required attributes activity = self.required_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) # Optional attributes entity = self.optional_attribute(attributes, PROV_ATTR_ENTITY, (ProvEntity, ProvAgent)) time = self.optional_attribute(attributes, PROV_ATTR_TIME, datetime.datetime) attributes = OrderedDict() attributes[PROV_ATTR_ACTIVITY] = activity attributes[PROV_ATTR_ENTITY] = entity attributes[PROV_ATTR_TIME] = time ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvCommunication(ProvRelation): def get_type(self): return PROV_REC_COMMUNICATION def get_prov_type(self): return PROV['Communication'] def add_attributes(self, attributes, extra_attributes): # Required attributes informed = self.required_attribute(attributes, PROV_ATTR_INFORMED, ProvActivity) informant = self.required_attribute(attributes, PROV_ATTR_INFORMANT, ProvActivity) attributes = OrderedDict() attributes[PROV_ATTR_INFORMED] = informed attributes[PROV_ATTR_INFORMANT] = informant ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvStart(ProvRelation): def get_type(self): return PROV_REC_START def get_prov_type(self): return PROV['Start'] def add_attributes(self, attributes, extra_attributes): # Required attributes activity = self.required_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) # Optional attributes trigger = self.optional_attribute(attributes, PROV_ATTR_TRIGGER, (ProvEntity, ProvAgent)) starter = self.optional_attribute(attributes, PROV_ATTR_STARTER, ProvActivity) time = self.optional_attribute(attributes, PROV_ATTR_TIME, datetime.datetime) attributes = OrderedDict() attributes[PROV_ATTR_ACTIVITY] = activity attributes[PROV_ATTR_TRIGGER] = trigger attributes[PROV_ATTR_STARTER] = starter attributes[PROV_ATTR_TIME] = time ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvEnd(ProvRelation): def get_type(self): return PROV_REC_END def get_prov_type(self): return PROV['End'] def add_attributes(self, attributes, extra_attributes): # Required attributes activity = self.required_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) # Optional attributes trigger = self.optional_attribute(attributes, PROV_ATTR_TRIGGER, (ProvEntity, ProvAgent)) ender = self.optional_attribute(attributes, PROV_ATTR_ENDER, ProvActivity) time = self.optional_attribute(attributes, PROV_ATTR_TIME, datetime.datetime) attributes = OrderedDict() attributes[PROV_ATTR_ACTIVITY] = activity attributes[PROV_ATTR_TRIGGER] = trigger attributes[PROV_ATTR_ENDER] = ender attributes[PROV_ATTR_TIME] = time ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvInvalidation(ProvRelation): def get_type(self): return PROV_REC_INVALIDATION def get_prov_type(self): return PROV['Invalidation'] def add_attributes(self, attributes, extra_attributes): # Required attributes entity = self.required_attribute(attributes, PROV_ATTR_ENTITY, (ProvEntity, ProvAgent)) # Optional attributes activity = self.optional_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) time = self.optional_attribute(attributes, PROV_ATTR_TIME, datetime.datetime) attributes = OrderedDict() attributes[PROV_ATTR_ENTITY] = entity attributes[PROV_ATTR_ACTIVITY] = activity attributes[PROV_ATTR_TIME] = time ProvRelation.add_attributes(self, attributes, extra_attributes) # ## Component 2: Derivations class ProvDerivation(ProvRelation): def get_type(self): return PROV_REC_DERIVATION def get_prov_type(self): return PROV['Derivation'] def add_attributes(self, attributes, extra_attributes): # Required attributes generatedEntity = self.required_attribute(attributes, PROV_ATTR_GENERATED_ENTITY, (ProvEntity, ProvAgent)) usedEntity = self.required_attribute(attributes, PROV_ATTR_USED_ENTITY, (ProvEntity, ProvAgent)) # Optional attributes activity = self.optional_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) generation = self.optional_attribute(attributes, PROV_ATTR_GENERATION, ProvGeneration) usage = self.optional_attribute(attributes, PROV_ATTR_USAGE, ProvUsage) attributes = OrderedDict() attributes[PROV_ATTR_GENERATED_ENTITY] = generatedEntity attributes[PROV_ATTR_USED_ENTITY] = usedEntity attributes[PROV_ATTR_ACTIVITY] = activity attributes[PROV_ATTR_GENERATION] = generation attributes[PROV_ATTR_USAGE] = usage ProvRelation.add_attributes(self, attributes, extra_attributes) # ## Component 3: Agents, Responsibility, and Influence class ProvAgent(ProvElement): def get_type(self): return PROV_REC_AGENT def get_prov_type(self): return PROV['Agent'] class ProvAttribution(ProvRelation): def get_type(self): return PROV_REC_ATTRIBUTION def get_prov_type(self): return PROV['Attribution'] def add_attributes(self, attributes, extra_attributes): # Required attributes entity = self.required_attribute(attributes, PROV_ATTR_ENTITY, (ProvEntity, ProvAgent)) agent = self.required_attribute(attributes, PROV_ATTR_AGENT, (ProvAgent, ProvEntity)) attributes = OrderedDict() attributes[PROV_ATTR_ENTITY] = entity attributes[PROV_ATTR_AGENT] = agent ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvAssociation(ProvRelation): def get_type(self): return PROV_REC_ASSOCIATION def get_prov_type(self): return PROV['Association'] def add_attributes(self, attributes, extra_attributes): # Required attributes activity = self.required_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) # Optional attributes agent = self.optional_attribute(attributes, PROV_ATTR_AGENT, (ProvAgent, ProvEntity)) plan = self.optional_attribute(attributes, PROV_ATTR_PLAN, (ProvEntity, ProvAgent)) attributes = OrderedDict() attributes[PROV_ATTR_ACTIVITY] = activity attributes[PROV_ATTR_AGENT] = agent attributes[PROV_ATTR_PLAN] = plan ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvDelegation(ProvRelation): def get_type(self): return PROV_REC_DELEGATION def get_prov_type(self): return PROV['Delegation'] def add_attributes(self, attributes, extra_attributes): # Required attributes delegate = self.required_attribute(attributes, PROV_ATTR_DELEGATE, (ProvAgent, ProvEntity)) responsible = self.required_attribute(attributes, PROV_ATTR_RESPONSIBLE, (ProvAgent, ProvEntity)) # Optional attributes activity = self.optional_attribute(attributes, PROV_ATTR_ACTIVITY, ProvActivity) attributes = OrderedDict() attributes[PROV_ATTR_DELEGATE] = delegate attributes[PROV_ATTR_RESPONSIBLE] = responsible attributes[PROV_ATTR_ACTIVITY] = activity ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvInfluence(ProvRelation): def get_type(self): return PROV_REC_INFLUENCE def get_prov_type(self): return PROV['Influence'] def add_attributes(self, attributes, extra_attributes): # Required attributes influencee = self.required_attribute(attributes, PROV_ATTR_INFLUENCEE, (ProvEntity, ProvActivity, ProvAgent)) influencer = self.required_attribute(attributes, PROV_ATTR_INFLUENCER, (ProvAgent, ProvEntity, ProvActivity)) attributes = OrderedDict() attributes[PROV_ATTR_INFLUENCEE] = influencee attributes[PROV_ATTR_INFLUENCER] = influencer ProvRelation.add_attributes(self, attributes, extra_attributes) # ## Component 4: Bundles # See below # ## Component 5: Alternate Entities class ProvSpecialization(ProvRelation): def get_type(self): return PROV_REC_SPECIALIZATION def get_prov_type(self): return PROV['Specialization'] def add_attributes(self, attributes, extra_attributes): # Required attributes specificEntity = self.required_attribute(attributes, PROV_ATTR_SPECIFIC_ENTITY, (ProvEntity, ProvAgent)) generalEntity = self.required_attribute(attributes, PROV_ATTR_GENERAL_ENTITY, (ProvEntity, ProvAgent)) attributes = OrderedDict() attributes[PROV_ATTR_SPECIFIC_ENTITY] = specificEntity attributes[PROV_ATTR_GENERAL_ENTITY] = generalEntity ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvAlternate(ProvRelation): def get_type(self): return PROV_REC_ALTERNATE def get_prov_type(self): return PROV['Alternate'] def add_attributes(self, attributes, extra_attributes): # Required attributes alternate1 = self.required_attribute(attributes, PROV_ATTR_ALTERNATE1, (ProvEntity, ProvAgent)) alternate2 = self.required_attribute(attributes, PROV_ATTR_ALTERNATE2, (ProvEntity, ProvAgent)) attributes = OrderedDict() attributes[PROV_ATTR_ALTERNATE1] = alternate1 attributes[PROV_ATTR_ALTERNATE2] = alternate2 ProvRelation.add_attributes(self, attributes, extra_attributes) class ProvMention(ProvSpecialization): def get_type(self): return PROV_REC_MENTION def get_prov_type(self): return PROV['Mention'] def add_attributes(self, attributes, extra_attributes): # Required attributes specificEntity = self.required_attribute(attributes, PROV_ATTR_SPECIFIC_ENTITY, (ProvEntity, ProvAgent)) generalEntity = self.required_attribute(attributes, PROV_ATTR_GENERAL_ENTITY, Identifier) bundle = self.required_attribute(attributes, PROV_ATTR_BUNDLE, Identifier) #======================================================================= # # This is disabled so that mentionOf can refer to bundle that is not defined in the same place # bundle = self.required_attribute(attributes, PROV_ATTR_BUNDLE, ProvBundle) # # Check if generalEntity is in the bundle # if generalEntity.get_bundle() is not bundle: # raise ProvExceptionContraint(PROV_REC_MENTION, generalEntity, bundle, 'The generalEntity must belong to the bundle') #======================================================================= attributes = OrderedDict() attributes[PROV_ATTR_SPECIFIC_ENTITY] = specificEntity attributes[PROV_ATTR_GENERAL_ENTITY] = generalEntity attributes[PROV_ATTR_BUNDLE] = bundle ProvRelation.add_attributes(self, attributes, extra_attributes) # ## Component 6: Collections class ProvMembership(ProvRelation): def get_type(self): return PROV_REC_MEMBERSHIP def get_prov_type(self): return PROV['Membership'] def add_attributes(self, attributes, extra_attributes): # Required attributes collection = self.required_attribute(attributes, PROV_ATTR_COLLECTION, (ProvEntity, ProvAgent)) entity = self.required_attribute(attributes, PROV_ATTR_ENTITY, (ProvEntity, ProvAgent)) attributes = OrderedDict() attributes[PROV_ATTR_COLLECTION] = collection attributes[PROV_ATTR_ENTITY] = entity ProvRelation.add_attributes(self, attributes, extra_attributes) # Class mappings from PROV record type PROV_REC_CLS = { PROV_REC_ENTITY : ProvEntity, PROV_REC_ACTIVITY : ProvActivity, PROV_REC_GENERATION : ProvGeneration, PROV_REC_USAGE : ProvUsage, PROV_REC_COMMUNICATION : ProvCommunication, PROV_REC_START : ProvStart, PROV_REC_END : ProvEnd, PROV_REC_INVALIDATION : ProvInvalidation, PROV_REC_DERIVATION : ProvDerivation, PROV_REC_AGENT : ProvAgent, PROV_REC_ATTRIBUTION : ProvAttribution, PROV_REC_ASSOCIATION : ProvAssociation, PROV_REC_DELEGATION : ProvDelegation, PROV_REC_INFLUENCE : ProvInfluence, PROV_REC_SPECIALIZATION : ProvSpecialization, PROV_REC_ALTERNATE : ProvAlternate, PROV_REC_MENTION : ProvMention, PROV_REC_MEMBERSHIP : ProvMembership, } # Bundle class NamespaceManager(dict): def __init__(self, namespaces={}, default_namespaces={PROV.get_prefix(): PROV, XSD.get_prefix(): XSD}, default=None, parent=None): self._default_namespaces = {} self._default_namespaces.update(default_namespaces) self.update(self._default_namespaces) self._namespaces = {} if default is not None: self.set_default_namespace(default) else: self._default = None self.parent = parent # TODO check if default is in the default namespaces self._anon_id_count = 0 self._rename_map = {} self.add_namespaces(namespaces) def get_namespace(self, uri): for namespace in self.values(): if uri == namespace._uri: return namespace return None def get_registered_namespaces(self): return self._namespaces.values() def set_default_namespace(self, uri): self._default = Namespace('', uri) self[''] = self._default def get_default_namespace(self): return self._default def add_namespace(self, namespace): if namespace in self.values(): # no need to do anything return if namespace in self._rename_map: # already renamed and added return prefix = namespace.get_prefix() if prefix in self: # Conflicting prefix new_prefix = self._get_unused_prefix(prefix) new_namespace = Namespace(new_prefix, namespace.get_uri()) self._rename_map[namespace] = new_namespace prefix = new_prefix namespace = new_namespace self._namespaces[prefix] = namespace self[prefix] = namespace return namespace def add_namespaces(self, namespaces): if namespaces: for prefix, uri in namespaces.items(): ns = Namespace(prefix, uri) self.add_namespace(ns) def get_valid_identifier(self, identifier): if not identifier: return None if isinstance(identifier, Identifier): if isinstance(identifier, QName): # Register the namespace if it has not been registered before namespace = identifier._namespace prefix = namespace.get_prefix() if prefix in self and self[prefix] == namespace: # No need to add the namespace existing_ns = self[prefix] if existing_ns is namespace: return identifier else: return existing_ns[identifier._localpart] # reuse the existing namespace else: ns = self.add_namespace(deepcopy(namespace)) # Do not reuse the namespace object return ns[identifier._localpart] else: # return the original identifier return identifier elif isinstance(identifier, (str, unicode)): if identifier.startswith('_:'): return None elif ':' in identifier: # check if the identifier contains a registered prefix prefix, local_part = identifier.split(':', 1) if prefix in self: # return a new QName return self[prefix][local_part] else: # treat as a URI (with the first part as its scheme) # check if the URI can be compacted for namespace in self.values(): if identifier.startswith(namespace.get_uri()): # create a QName with the namespace return namespace[identifier.replace(namespace.get_uri(), '')] if self.parent is not None: # try the parent namespace manager return self.parent.get_valid_identifier(identifier) else: # return an Identifier with the given URI return Identifier(identifier) elif self._default: # create and return an identifier in the default namespace return self._default[identifier] else: # TODO Should an exception raised here return Identifier(identifier) def get_anonymous_identifier(self, local_prefix='id'): self._anon_id_count += 1 return Identifier('_:%s%d' % (local_prefix, self._anon_id_count)) def _get_unused_prefix(self, original_prefix): if original_prefix not in self: return original_prefix count = 1 while True: new_prefix = '_'.join((original_prefix, unicode(count))) if new_prefix in self: count += 1 else: return new_prefix class ProvBundle(ProvEntity): def __init__(self, bundle=None, identifier=None, attributes=None, other_attributes=None, asserted=True, namespaces={}): # Initializing bundle-specific attributes self._records = list() self._id_map = dict() self._bundles = dict() if bundle is None: self._namespaces = NamespaceManager(namespaces) else: self._namespaces = bundle._namespaces self._namespaces.add_namespaces(namespaces) # Initializing record-specific attributes super(ProvBundle, self).__init__(bundle, identifier, attributes, other_attributes, asserted) # Bundle configurations def set_default_namespace(self, uri): self._namespaces.set_default_namespace(uri) def get_default_namespace(self): return self._namespaces.get_default_namespace() def add_namespace(self, namespace_or_prefix, uri=None): if self._bundle is not None: # This is a bundle logger.warn("Namespace cannot be added into a bundle. It will be added to the document instead.") if uri is None: self._namespaces.add_namespace(namespace_or_prefix) else: self._namespaces.add_namespace(Namespace(namespace_or_prefix, uri)) def get_registered_namespaces(self): return self._namespaces.get_registered_namespaces() def valid_identifier(self, identifier): return self._namespaces.get_valid_identifier(identifier) def get_anon_id(self, record): # TODO Implement a dict of self-generated anon ids for records without identifier return self._namespaces.get_anonymous_identifier() def get_records(self, class_or_type_or_tuple=None): if class_or_type_or_tuple is None: return self._records else: return filter(lambda rec: isinstance(rec, class_or_type_or_tuple), self._records) def get_record(self, identifier): if identifier is None: return None valid_id = self.valid_identifier(identifier) try: return self._id_map[valid_id] except: # looking up the parent bundle if self._bundle is not None: return self._bundle.get_record(valid_id) else: return None def get_bundle(self, identifier): try: valid_id = self.valid_identifier(identifier) return self._bundles[valid_id] except: # looking up the parent bundle if self._bundle is not None: return self._bundle.get_bundle(valid_id) else: return None # PROV-JSON serialization/deserialization class JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, ProvBundle): return o._encode_JSON_container() else: # Use the default encoder instead return json.JSONEncoder.default(self, o) class JSONDecoder(json.JSONDecoder): def decode(self, s): json_container = json.JSONDecoder.decode(self, s) result = ProvBundle() result._decode_JSON_container(json_container) return result def _encode_json_representation(self, value): try: return value.json_representation() except AttributeError: if isinstance(value, datetime.datetime): return {'$': value.isoformat(), 'type': u'xsd:dateTime'} else: return value def _decode_json_representation(self, literal): try: value = literal['$'] if 'lang' in literal: return Literal(value, langtag=literal['lang']) else: datatype = literal['type'] if datatype == u'xsd:anyURI': return Identifier(value) elif datatype == u'xsd:QName': return self.valid_identifier(value) else: # The literal of standard Python types is not converted here # It will be automatically converted when added to a record by _auto_literal_conversion() return Literal(value, self.valid_identifier(datatype)) except: # simple type, just return it return literal def _encode_JSON_container(self): container = defaultdict(dict) if self._bundle is None: # This is a document prefixes = {} for namespace in self._namespaces.get_registered_namespaces(): prefixes[namespace.get_prefix()] = namespace.get_uri() if self._namespaces._default: prefixes['default'] = self._namespaces._default.get_uri() if prefixes: container[u'prefix'] = prefixes id_generator = AnonymousIDGenerator() real_or_anon_id = lambda record: record._identifier if record._identifier else id_generator.get_anon_id(record) for record in self._records: if not record.is_asserted(): continue # skipping inferred records rec_type = record.get_type() rec_label = PROV_N_MAP[rec_type] identifier = unicode(real_or_anon_id(record)) if rec_type == PROV_REC_BUNDLE: # encoding the sub-bundle record_json = record._encode_JSON_container() else: record_json = {} if record._attributes: for (attr, value) in record._attributes.items(): if isinstance(value, ProvRecord): attr_record_id = real_or_anon_id(value) record_json[PROV_ID_ATTRIBUTES_MAP[attr]] = unicode(attr_record_id) elif value is not None: # Assuming this is a datetime value record_json[PROV_ID_ATTRIBUTES_MAP[attr]] = value.isoformat() if isinstance(value, datetime.datetime) else unicode(value) if record._extra_attributes: for (attr, value) in record._extra_attributes: attr_id = unicode(attr) value_json = self._encode_json_representation(value) if attr_id in record_json: # Multi-value attribute existing_value = record_json[attr_id] try: # Add the value to the current list of values existing_value.append(value_json) except: # But if the existing value is not a list, it'll fail # create the list for the existing value and the second value record_json[attr_id] = [existing_value, value_json] else: record_json[attr_id] = value_json container[rec_label][identifier] = record_json return container def _decode_JSON_container(self, jc): if u'prefix' in jc: prefixes = jc[u'prefix'] for prefix, uri in prefixes.items(): if prefix != 'default': self.add_namespace(Namespace(prefix, uri)) else: self.set_default_namespace(uri) records = sorted([(PROV_RECORD_IDS_MAP[rec_type], rec_id, jc[rec_type][rec_id]) for rec_type in jc if rec_type != u'prefix' for rec_id in jc[rec_type]], key=lambda tuple_rec: tuple_rec[0]) record_map = {} _parse_attr_value = lambda value: record_map[value] if (isinstance(value, basestring) and value in record_map) else self._decode_json_representation(value) # Create all the records before setting their attributes for (record_type, identifier, content) in records: if record_type == PROV_REC_BUNDLE: bundle = self.bundle(identifier) bundle._decode_JSON_container(content) else: record_map[identifier] = self.add_record(record_type, identifier, None, None) for (record_type, identifier, attributes) in records: if record_type != PROV_REC_BUNDLE: record = record_map[identifier] if hasattr(attributes, 'items'): # it is a dict # There is only one element, create a singleton list elements = [attributes] else: # expect it to be a list of dictionaries elements = attributes for element in elements: prov_attributes = {} extra_attributes = [] # Splitting PROV attributes and the others membership_extra_members = None # this is for the multiple-entity membership hack to come for attr, value in element.items(): if attr in PROV_ATTRIBUTES_ID_MAP: attr_id = PROV_ATTRIBUTES_ID_MAP[attr] if isinstance(value, list): # Multiple values if len(value) == 1: # Only a single value in the list, unpack it value = value[0] else: if record.get_type() == PROV_REC_MEMBERSHIP and attr_id == PROV_ATTR_ENTITY: # This is a membership relation with multiple entities # HACK: create multiple membership relations, one for each entity membership_extra_members = value[1:] # Store all the extra entities value = value[0] # Create the first membership relation as normal for the first entity else: error_msg = 'The prov package does not support PROV attributes having multiple values.' logger.error(error_msg) raise ProvException(error_msg) prov_attributes[attr_id] = _parse_attr_value(value) else: attr_id = self.valid_identifier(attr) if isinstance(value, list): # Parsing multi-value attribute extra_attributes.extend((attr_id, self._decode_json_representation(value_single)) for value_single in value) else: # add the single-value attribute extra_attributes.append((attr_id, self._decode_json_representation(value))) record.add_attributes(prov_attributes, extra_attributes) # HACK: creating extra (unidentified) membership relations if membership_extra_members: collection = prov_attributes[PROV_ATTR_COLLECTION] for member in membership_extra_members: self.membership(collection, _parse_attr_value(member), None, extra_attributes) # Miscellaneous functions def is_document(self): return self._bundle is None def is_bundle(self): return self._bundle is not None def get_type(self): return PROV_REC_BUNDLE def get_provn(self, _indent_level=0, asserted_only=True): indentation = '' + (' ' * _indent_level) newline = '\n' + (' ' * (_indent_level + 1)) # if this is the document, start the document; otherwise, start the bundle records = ['document'] if self._bundle is None else ['bundle %s' % self._identifier] if self._bundle is None: # Only output the namespaces of a document default_namespace = self._namespaces.get_default_namespace() if default_namespace: records.append('default <%s>' % default_namespace.get_uri()) registered_namespaces = self._namespaces.get_registered_namespaces() if registered_namespaces: records.extend(['prefix %s <%s>' % (namespace.get_prefix(), namespace.get_uri()) for namespace in registered_namespaces]) if default_namespace or registered_namespaces: # a blank line between the prefixes and the assertions records.append('') # adding all the records records.extend([record.get_provn(_indent_level + 1) for record in self._records if record.is_asserted() or not asserted_only]) provn_str = newline.join(records) + '\n' # closing the structure provn_str += indentation + ('endDocument' if self._bundle is None else 'endBundle') return provn_str def rdf(self, graph=None): if self._bundle is None: # top bundle if graph is None: graph = ConjunctiveGraph() else: # graph should not None here uri = self.get_identifier().rdf_representation() graph = Graph(graph.store, uri) for prefix, namespace in self._namespaces.items(): graph.bind(prefix, namespace.get_uri()) for record in self._records: if record.is_asserted(): record.rdf(graph) return graph def get_provjson(self, **kw): """Return the `PROV-JSON `_ representation for the bundle/document. Parameters for `json.dumps `_ like `indent=4` can be also passed as keyword arguments. """ # Prevent overwriting the encoder class if 'cls' in kw: del kw['cls'] json_content = json.dumps(self, cls=ProvBundle.JSONEncoder, **kw) return json_content @staticmethod def from_provjson(json_content, **kw): """Construct the bundle/document from the given `PROV-JSON `_ representation. Parameters for `json.loads `_ can be also passed as keyword arguments. """ # Prevent overwriting the decoder class if 'cls' in kw: del kw['cls'] return json.loads(json_content, cls=ProvBundle.JSONDecoder, **kw) def get_flattened(self): namespaces = dict((ns.get_prefix(), ns.get_uri()) for ns in self.get_registered_namespaces()) document = ProvBundle(namespaces=namespaces) default_ns_uri = self.get_default_namespace() if default_ns_uri is not None: document.set_default_namespace(default_ns_uri) # Enumerate records and bundles bundles = [] records = [] for record in self.get_records(): if isinstance(record, ProvBundle): bundles.append(record) else: records.append(record) records = deepcopy(records) for record in records: document._add_record(record) for bundle in bundles: for record in bundle._records: document.add_record(record.get_type(), copy(record._identifier), deepcopy(record._attributes), deepcopy(record._extra_attributes), record._asserted) return document def __eq__(self, other): try: other_records = set(other._records) except: # other is not a bundle return False this_records = set(self._records) if len(this_records) != len(other_records): return False # check if all records for equality for record_a in this_records: if record_a._identifier: if record_a.get_type() == PROV_REC_BUNDLE: record_b = other.get_bundle(record_a._identifier) else: record_b = other.get_record(record_a._identifier) if record_b: if record_a == record_b: other_records.remove(record_b) continue else: logger.debug("Unequal PROV records:") logger.debug("%s" % unicode(record_a)) logger.debug("%s" % unicode(record_b)) return False else: logger.debug("Could not find a record with this identifier: %s" % unicode(record_a._identifier)) return False else: # Manually look for the record found = False for record_b in other_records: if record_a == record_b: other_records.remove(record_b) found = True break if not found: logger.debug("Could not find this record: %s" % unicode(record_a)) return False return True # Provenance statements def _add_record(self, record): if record._identifier: if record.get_type() == PROV_REC_BUNDLE: # Don't mix bunle ids with normal record ids. self._bundles[record._identifier] = record self._records.append(record) else: if record._identifier in self._id_map: merge_target = self._id_map[record._identifier] if not merge_target._asserted and record._asserted: if record.__class__ in merge_target.get_allowed_types(): for attribute_id, attribute in merge_target._infered_for._attributes.iteritems(): if attribute == merge_target: merge_target._infered_for._attributes[attribute_id] = record self._records.remove(merge_target) self._id_map[record._identifier] = record self._records.append(record) else: raise ProvExceptionCannotUnifyAttribute(record._identifier, merge_target.get_type(), record.get_type()) else: if record.get_type() != merge_target.get_type(): raise ProvExceptionCannotUnifyAttribute(record._identifier, merge_target.get_type(), record.get_type()) merge_target.add_attributes(record._attributes, record._extra_attributes) else: self._records.append(record) self._id_map[record._identifier] = record else: self._records.append(record) def add_record(self, record_type, identifier, attributes=None, other_attributes=None, asserted=True): new_record = PROV_REC_CLS[record_type](self, self.valid_identifier(identifier), attributes, other_attributes, asserted) self._add_record(new_record) return new_record def add_inferred_record(self, record_cls, identifier, infered_for, allowed_types): record_id = self.valid_identifier(identifier) record = record_cls(self, record_id, asserted=False, allowed_types=allowed_types, infered_for=infered_for) self._add_record(record) return record def add_bundle(self, bundle, identifier=None): '''Add a bundle to the current document ''' if identifier == None: identifier = bundle.get_identifier() if not identifier: raise ProvException(u"The added bundle has no identifier") valid_id = self.valid_identifier(identifier) bundle._identifier = valid_id if valid_id in self._bundles: raise ProvException(u"A bundle with that identifier already exists") if len(bundle._bundles) > 0: raise ProvException(u"A bundle may not contain bundles") self._bundles[valid_id] = bundle self._records.append(bundle) for namespace in bundle.get_registered_namespaces(): self.add_namespace(namespace) bundle._bundle = self def add_element(self, record_type, identifier, attributes=None, other_attributes=None): return self.add_record(record_type, identifier, attributes, other_attributes) def entity(self, identifier, other_attributes=None): return self.add_element(PROV_REC_ENTITY, identifier, None, other_attributes) def activity(self, identifier, startTime=None, endTime=None, other_attributes=None): return self.add_element(PROV_REC_ACTIVITY, identifier, {PROV_ATTR_STARTTIME: _ensure_datetime(startTime), PROV_ATTR_ENDTIME: _ensure_datetime(endTime)}, other_attributes) def generation(self, entity, activity=None, time=None, identifier=None, other_attributes=None): return self.add_record(PROV_REC_GENERATION, identifier, {PROV_ATTR_ENTITY: entity, PROV_ATTR_ACTIVITY: activity, PROV_ATTR_TIME: _ensure_datetime(time)}, other_attributes) def usage(self, activity, entity=None, time=None, identifier=None, other_attributes=None): return self.add_record(PROV_REC_USAGE, identifier, {PROV_ATTR_ACTIVITY: activity, PROV_ATTR_ENTITY: entity, PROV_ATTR_TIME: _ensure_datetime(time)}, other_attributes) def start(self, activity, trigger=None, starter=None, time=None, identifier=None, other_attributes=None): return self.add_record(PROV_REC_START, identifier, {PROV_ATTR_ACTIVITY: activity, PROV_ATTR_TRIGGER: trigger, PROV_ATTR_STARTER: starter, PROV_ATTR_TIME: _ensure_datetime(time)}, other_attributes) def end(self, activity, trigger=None, ender=None, time=None, identifier=None, other_attributes=None): return self.add_record(PROV_REC_END, identifier, {PROV_ATTR_ACTIVITY: activity, PROV_ATTR_TRIGGER: trigger, PROV_ATTR_ENDER: ender, PROV_ATTR_TIME: _ensure_datetime(time)}, other_attributes) def invalidation(self, entity, activity=None, time=None, identifier=None, other_attributes=None): return self.add_record(PROV_REC_INVALIDATION, identifier, {PROV_ATTR_ENTITY: entity, PROV_ATTR_ACTIVITY: activity, PROV_ATTR_TIME: _ensure_datetime(time)}, other_attributes) def communication(self, informed, informant, identifier=None, other_attributes=None): return self.add_record(PROV_REC_COMMUNICATION, identifier, {PROV_ATTR_INFORMED: informed, PROV_ATTR_INFORMANT: informant}, other_attributes) def agent(self, identifier, other_attributes=None): return self.add_element(PROV_REC_AGENT, identifier, None, other_attributes) def attribution(self, entity, agent, identifier=None, other_attributes=None): return self.add_record(PROV_REC_ATTRIBUTION, identifier, {PROV_ATTR_ENTITY: entity, PROV_ATTR_AGENT: agent}, other_attributes) def association(self, activity, agent=None, plan=None, identifier=None, other_attributes=None): return self.add_record(PROV_REC_ASSOCIATION, identifier, {PROV_ATTR_ACTIVITY: activity, PROV_ATTR_AGENT: agent, PROV_ATTR_PLAN: plan}, other_attributes) def delegation(self, delegate, responsible, activity=None, identifier=None, other_attributes=None): return self.add_record(PROV_REC_DELEGATION, identifier, {PROV_ATTR_DELEGATE: delegate, PROV_ATTR_RESPONSIBLE: responsible, PROV_ATTR_ACTIVITY: activity}, other_attributes) def influence(self, influencee, influencer, identifier=None, other_attributes=None): return self.add_record(PROV_REC_INFLUENCE, identifier, {PROV_ATTR_INFLUENCEE: influencee, PROV_ATTR_INFLUENCER: influencer}, other_attributes) def derivation(self, generatedEntity, usedEntity, activity=None, generation=None, usage=None, time=None, identifier=None, other_attributes=None): attributes = {PROV_ATTR_GENERATED_ENTITY: generatedEntity, PROV_ATTR_USED_ENTITY: usedEntity, PROV_ATTR_ACTIVITY: activity, PROV_ATTR_GENERATION: generation, PROV_ATTR_USAGE: usage} return self.add_record(PROV_REC_DERIVATION, identifier, attributes, other_attributes) def revision(self, generatedEntity, usedEntity, activity=None, generation=None, usage=None, time=None, identifier=None, other_attributes=None): record = self.derivation(generatedEntity, usedEntity, activity, generation, usage, time, identifier, other_attributes) record.add_asserted_type(PROV['Revision']) return record def quotation(self, generatedEntity, usedEntity, activity=None, generation=None, usage=None, time=None, identifier=None, other_attributes=None): record = self.derivation(generatedEntity, usedEntity, activity, generation, usage, time, identifier, other_attributes) record.add_asserted_type(PROV['Quotation']) return record def primary_source(self, generatedEntity, usedEntity, activity=None, generation=None, usage=None, time=None, identifier=None, other_attributes=None): record = self.derivation(generatedEntity, usedEntity, activity, generation, usage, time, identifier, other_attributes) record.add_asserted_type(PROV['PrimarySource']) return record def specialization(self, specificEntity, generalEntity, identifier=None, other_attributes=None): return self.add_record(PROV_REC_SPECIALIZATION, identifier, {PROV_ATTR_SPECIFIC_ENTITY: specificEntity, PROV_ATTR_GENERAL_ENTITY: generalEntity}, other_attributes) def alternate(self, alternate1, alternate2, identifier=None, other_attributes=None): return self.add_record(PROV_REC_ALTERNATE, identifier, {PROV_ATTR_ALTERNATE1: alternate1, PROV_ATTR_ALTERNATE2: alternate2}, other_attributes) def mention(self, specificEntity, generalEntity, bundle, identifier=None, other_attributes=None): return self.add_record(PROV_REC_MENTION, identifier, {PROV_ATTR_SPECIFIC_ENTITY: specificEntity, PROV_ATTR_GENERAL_ENTITY: generalEntity, PROV_ATTR_BUNDLE: bundle}, other_attributes) def collection(self, identifier, other_attributes=None): record = self.add_element(PROV_REC_ENTITY, identifier, None, other_attributes) record.add_asserted_type(PROV['Collection']) return record def membership(self, collection, entity, identifier=None, other_attributes=None): return self.add_record(PROV_REC_MEMBERSHIP, identifier, {PROV_ATTR_COLLECTION: collection, PROV_ATTR_ENTITY: entity}, other_attributes) def bundle(self, identifier, other_attributes=None): return self.add_element(PROV_REC_BUNDLE, identifier, None, other_attributes) # Aliases wasGeneratedBy = generation used = usage wasStartedBy = start wasEndedBy = end wasInvalidatedBy = invalidation wasInformedBy = communication wasAttributedTo = attribution wasAssociatedWith = association actedOnBehalfOf = delegation wasInfluencedBy = influence wasDerivedFrom = derivation wasRevisionOf = revision wasQuotedFrom = quotation hadPrimarySource = primary_source alternateOf = alternate specializationOf = specialization mentionOf = mention hadMember = membership # Add the newly defined ProvBundle into the PROV class dictionary PROV_REC_CLS[PROV_REC_BUNDLE] = ProvBundle nipype-0.9.2/nipype/external/setup.py000066400000000000000000000005541227300005300176620ustar00rootroot00000000000000import os def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('external', parent_package, top_path) config.add_data_files('d3.v3.min.js') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/fixes/000077500000000000000000000000001227300005300154405ustar00rootroot00000000000000nipype-0.9.2/nipype/fixes/README.txt000066400000000000000000000006061227300005300171400ustar00rootroot00000000000000This directory is meant to contain fixes to external packages, such as scipy, numpy that are meant to eventually be moved upstream to these packages. When these changes find their way upstream and are released, they can be deleted from the "fixes" directory when new versions of NIPY are released. PACKAGES/MODULES: --------- scipy/stats_models: corresponds to module "scipy.stats.models"nipype-0.9.2/nipype/fixes/__init__.py000066400000000000000000000012671227300005300175570ustar00rootroot00000000000000# We import numpy fixes during init of the testing package. We need to delay # import of the testing package until after it has initialized from os.path import dirname # Cache for the actual testing functin _tester = None def test(*args, **kwargs): """ test function for fixes subpackage This function defers import of the testing machinery so it can import from us first. See nipy.test docstring for parameters and return values """ global _tester if _tester is None: from nipy.testing import Tester _tester = Tester(dirname(__file__)).test return _tester(*args, **kwargs) # Remind nose not to test the test function test.__test__ = False nipype-0.9.2/nipype/fixes/numpy/000077500000000000000000000000001227300005300166105ustar00rootroot00000000000000nipype-0.9.2/nipype/fixes/numpy/__init__.py000066400000000000000000000000261227300005300207170ustar00rootroot00000000000000# numpy fixes package nipype-0.9.2/nipype/fixes/numpy/testing/000077500000000000000000000000001227300005300202655ustar00rootroot00000000000000nipype-0.9.2/nipype/fixes/numpy/testing/__init__.py000066400000000000000000000000471227300005300223770ustar00rootroot00000000000000# Package init for fixes.numpy.testing nipype-0.9.2/nipype/fixes/numpy/testing/noseclasses.py000066400000000000000000000340151227300005300231640ustar00rootroot00000000000000# These classes implement a doctest runner plugin for nose, a "known failure" # error class, and a customized TestProgram for NumPy. # Because this module imports nose directly, it should not # be used except by nosetester.py to avoid a general NumPy # dependency on nose. import os import doctest import nose from nose.plugins import doctests as npd from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin from nose.plugins.base import Plugin from nose.util import src import numpy from nosetester import get_package_name import inspect # Some of the classes in this module begin with 'Numpy' to clearly distinguish # them from the plethora of very similar names from nose/unittest/doctest #----------------------------------------------------------------------------- # Modified version of the one in the stdlib, that fixes a python bug (doctests # not found in extension modules, http://bugs.python.org/issue3158) class NumpyDocTestFinder(doctest.DocTestFinder): def _from_module(self, module, object): """ Return true if the given object is defined in the given module. """ if module is None: #print '_fm C1' # dbg return True elif inspect.isfunction(object): #print '_fm C2' # dbg return module.__dict__ is object.func_globals elif inspect.isbuiltin(object): #print '_fm C2-1' # dbg return module.__name__ == object.__module__ elif inspect.isclass(object): #print '_fm C3' # dbg return module.__name__ == object.__module__ elif inspect.ismethod(object): # This one may be a bug in cython that fails to correctly set the # __module__ attribute of methods, but since the same error is easy # to make by extension code writers, having this safety in place # isn't such a bad idea #print '_fm C3-1' # dbg return module.__name__ == object.im_class.__module__ elif inspect.getmodule(object) is not None: #print '_fm C4' # dbg #print 'C4 mod',module,'obj',object # dbg return module is inspect.getmodule(object) elif hasattr(object, '__module__'): #print '_fm C5' # dbg return module.__name__ == object.__module__ elif isinstance(object, property): #print '_fm C6' # dbg return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ doctest.DocTestFinder._find(self,tests, obj, name, module, source_lines, globs, seen) # Below we re-run pieces of the above method with manual modifications, # because the original code is buggy and fails to correctly identify # doctests in extension modules. # Local shorthands from inspect import isroutine, isclass, ismodule, isfunction, \ ismethod # Look for tests in a module's contained objects. if ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): valname1 = '%s.%s' % (name, valname) if ( (isroutine(val) or isclass(val)) and self._from_module(module, val) ): self._find(tests, val, valname1, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if isclass(obj) and self._recurse: #print 'RECURSE into class:',obj # dbg for valname, val in obj.__dict__.items(): #valname1 = '%s.%s' % (name, valname) # dbg #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).im_func # Recurse to methods, properties, and nested classes. if ((isfunction(val) or isclass(val) or ismethod(val) or isinstance(val, property)) and self._from_module(module, val)): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # second-chance checker; if the default comparison doesn't # pass, then see if the expected output string contains flags that # tell us to ignore the output class NumpyOutputChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): ret = doctest.OutputChecker.check_output(self, want, got, optionflags) if not ret: if "#random" in want: return True # it would be useful to normalize endianness so that # bigendian machines don't fail all the tests (and there are # actually some bigendian examples in the doctests). Let's try # making them all little endian got = got.replace("'>","'<") want= want.replace("'>","'<") # try to normalize out 32 and 64 bit default int sizes for sz in [4,8]: got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') 'numpy' """ fullpath = filepath[:] pkg_name = [] while 'site-packages' in filepath or 'dist-packages' in filepath: filepath, p2 = os.path.split(filepath) if p2 in ('site-packages', 'dist-packages'): break pkg_name.append(p2) # if package name determination failed, just default to numpy/scipy if not pkg_name: if 'scipy' in fullpath: return 'scipy' else: return 'numpy' # otherwise, reverse to get correct order and return pkg_name.reverse() # don't include the outer egg directory if pkg_name[0].endswith('.egg'): pkg_name.pop(0) return '.'.join(pkg_name) def import_nose(): """ Import nose only when needed. """ fine_nose = True minimum_nose_version = (0,10,0) try: import nose from nose.tools import raises except ImportError: fine_nose = False else: if nose.__versioninfo__ < minimum_nose_version: fine_nose = False if not fine_nose: msg = 'Need nose >= %d.%d.%d for tests - see ' \ 'http://somethingaboutorange.com/mrl/projects/nose' % \ minimum_nose_version raise ImportError(msg) return nose def run_module_suite(file_to_run = None): if file_to_run is None: f = sys._getframe(1) file_to_run = f.f_locals.get('__file__', None) if file_to_run is None: raise AssertionError import_nose().run(argv=['',file_to_run]) class NoseTester(object): """ Nose test runner. This class is made available as numpy.testing.Tester, and a test function is typically added to a package's __init__.py like so:: from numpy.testing import Tester test = Tester().test Calling this test function finds and runs all tests associated with the package and all its sub-packages. Attributes ---------- package_path : str Full path to the package to test. package_name : str Name of the package to test. Parameters ---------- package : module, str or None The package to test. If a string, this should be the full path to the package. If None (default), `package` is set to the module from which `NoseTester` is initialized. """ # Stuff to exclude from tests. These are from numpy.distutils excludes = ['f2py_ext', 'f2py_f90_ext', 'gen_ext', 'pyrex_ext', 'swig_ext'] def __init__(self, package=None): ''' Test class init Parameters ---------- package : string or module If string, gives full path to package If None, extract calling module path Default is None ''' package_name = None if package is None: f = sys._getframe(1) package_path = f.f_locals.get('__file__', None) if package_path is None: raise AssertionError package_path = os.path.dirname(package_path) package_name = f.f_locals.get('__name__', None) elif isinstance(package, type(os)): package_path = os.path.dirname(package.__file__) package_name = getattr(package, '__name__', None) else: package_path = str(package) self.package_path = package_path # find the package name under test; this name is used to limit coverage # reporting (if enabled) if package_name is None: package_name = get_package_name(package_path) self.package_name = package_name def _test_argv(self, label, verbose, extra_argv): ''' Generate argv for nosetest command Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional see ``test`` docstring verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. Returns ------- argv : list command line arguments that will be passed to nose ''' argv = [__file__, self.package_path, '-s'] if label and label != 'full': if not isinstance(label, basestring): raise TypeError('Selection label should be a string') if label == 'fast': label = 'not slow' argv += ['-A', label] argv += ['--verbosity', str(verbose)] if extra_argv: argv += extra_argv return argv def _show_system_info(self): nose = import_nose() import numpy print "NumPy version %s" % numpy.__version__ npdir = os.path.dirname(numpy.__file__) print "NumPy is installed in %s" % npdir if 'scipy' in self.package_name: import scipy print "SciPy version %s" % scipy.__version__ spdir = os.path.dirname(scipy.__file__) print "SciPy is installed in %s" % spdir pyversion = sys.version.replace('\n','') print "Python version %s" % pyversion print "nose version %d.%d.%d" % nose.__versioninfo__ def _get_custom_doctester(self): """ Return instantiated plugin for doctests Allows subclassing of this class to override doctester A return value of None means use the nose builtin doctest plugin """ from noseclasses import NumpyDoctest return NumpyDoctest() def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False): """ Run tests for module using nose. This method does the heavy lifting for the `test` method. It takes all the same arguments, for details see `test`. See Also -------- test """ # fail with nice error message if nose is not present import_nose() # compile argv argv = self._test_argv(label, verbose, extra_argv) # bypass tests noted for exclude for ename in self.excludes: argv += ['--exclude', ename] # our way of doing coverage if coverage: argv+=['--cover-package=%s' % self.package_name, '--with-coverage', '--cover-tests', '--cover-inclusive', '--cover-erase'] # construct list of plugins import nose.plugins.builtin from noseclasses import KnownFailure, Unplugger plugins = [KnownFailure()] plugins += [p() for p in nose.plugins.builtin.plugins] # add doctesting if required doctest_argv = '--with-doctest' in argv if doctests == False and doctest_argv: doctests = True plug = self._get_custom_doctester() if plug is None: # use standard doctesting if doctests and not doctest_argv: argv += ['--with-doctest'] else: # custom doctesting if doctest_argv: # in fact the unplugger would take care of this argv.remove('--with-doctest') plugins += [Unplugger('doctest'), plug] if doctests: argv += ['--with-' + plug.name] return argv, plugins def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False): """ Run tests for module using nose. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the tests to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow tests as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. attribute_identifier - string passed directly to nosetests as '-A'. verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. doctests : bool, optional If True, run doctests in module. Default is False. coverage : bool, optional If True, report coverage of NumPy code. Default is False. (This requires the `coverage module: `_). Returns ------- result : object Returns the result of running the tests as a ``nose.result.TextTestResult`` object. Notes ----- Each NumPy module exposes `test` in its namespace to run all tests for it. For example, to run all tests for numpy.lib: >>> np.lib.test() #doctest: +SKIP Examples -------- >>> result = np.lib.test() #doctest: +SKIP Running unit tests for numpy.lib ... Ran 976 tests in 3.933s OK >>> result.errors #doctest: +SKIP [] >>> result.knownfail #doctest: +SKIP [] """ # cap verbosity at 3 because nose becomes *very* verbose beyond that verbose = min(verbose, 3) import utils utils.verbose = verbose if doctests: print "Running unit tests and doctests for %s" % self.package_name else: print "Running unit tests for %s" % self.package_name self._show_system_info() # reset doctest state on every run import doctest doctest.master = None argv, plugins = self.prepare_test_args(label, verbose, extra_argv, doctests, coverage) from noseclasses import NumpyTestProgram t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) return t.result def bench(self, label='fast', verbose=1, extra_argv=None): """ Run benchmarks for module using nose. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the benchmarks to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow benchmarks as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. attribute_identifier - string passed directly to nosetests as '-A'. verbose : int, optional Verbosity value for benchmark outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. Returns ------- success : bool Returns True if running the benchmarks works, False if an error occurred. Notes ----- Benchmarks are like tests, but have names starting with "bench" instead of "test", and can be found under the "benchmarks" sub-directory of the module. Each NumPy module exposes `bench` in its namespace to run all benchmarks for it. Examples -------- >>> success = np.lib.bench() #doctest: +SKIP Running benchmarks for numpy.lib ... using 562341 items: unique: 0.11 unique1d: 0.11 ratio: 1.0 nUnique: 56230 == 56230 ... OK >>> success #doctest: +SKIP True """ print "Running benchmarks for %s" % self.package_name self._show_system_info() argv = self._test_argv(label, verbose, extra_argv) argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] # import nose or make informative error nose = import_nose() # get plugin to disable doctests from noseclasses import Unplugger add_plugins = [Unplugger('doctest')] return nose.run(argv=argv, addplugins=add_plugins) nipype-0.9.2/nipype/fixes/numpy/testing/utils.py000066400000000000000000000001361227300005300217770ustar00rootroot00000000000000# Allow numpy fixes noseclasses to do local import of utils from numpy.testing.utils import * nipype-0.9.2/nipype/fixes/setup.py000066400000000000000000000007641227300005300171610ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fixes', parent_package, top_path) config.add_subpackage('numpy') config.add_subpackage('numpy.testing') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/info.py000066400000000000000000000114571227300005300156370ustar00rootroot00000000000000""" This file contains defines parameters for nipy that we use to fill settings in setup.py, the nipy top-level docstring, and for building the docs. In setup.py in particular, we exec this file, so it cannot import nipy """ # nipy version information. An empty _version_extra corresponds to a # full release. '.dev' as a _version_extra string means this is a development # version _version_major = 0 _version_minor = 9 _version_micro = 2 _version_extra = '' def get_nipype_gitversion(): """Nipype version as reported by the last commit in git Returns ------- None or str Version of NiPype according to git. """ import os import subprocess try: import nipype gitpath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)) except: gitpath = os.getcwd() gitpathgit = os.path.join(gitpath, '.git') if not os.path.exists(gitpathgit): return None ver = None try: o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath, stdout=subprocess.PIPE).communicate() except Exception: pass else: ver = o.strip().split('-')[-1] return ver if '.dev' in _version_extra: gitversion = get_nipype_gitversion() if gitversion: _version_extra = '.' + gitversion + '-' + 'dev' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, _version_minor, _version_micro, _version_extra) CLASSIFIERS = ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Scientific/Engineering"] description = 'Neuroimaging in Python: Pipelines and Interfaces' # Note: this long_description is actually a copy/paste from the top-level # README.txt, so that it shows up nicely on PyPI. So please remember to edit # it only in one place and sync it correctly. long_description = \ """ ======================================================== NIPYPE: Neuroimaging in Python: Pipelines and Interfaces ======================================================== Current neuroimaging software offer users an incredible opportunity to analyze data using a variety of different algorithms. However, this has resulted in a heterogeneous collection of specialized applications without transparent interoperability or a uniform operating interface. *Nipype*, an open-source, community-developed initiative under the umbrella of NiPy, is a Python project that provides a uniform interface to existing neuroimaging software and facilitates interaction between these packages within a single workflow. Nipype provides an environment that encourages interactive exploration of algorithms from different packages (e.g., SPM, FSL, FreeSurfer, AFNI, Slicer), eases the design of workflows within and between packages, and reduces the learning curve necessary to use different packages. Nipype is creating a collaborative platform for neuroimaging software development in a high-level language and addressing limitations of existing pipeline systems. *Nipype* allows you to: * easily interact with tools from different software packages * combine processing steps from different software packages * develop new workflows faster by reusing common steps from old ones * process data faster by running it in parallel on many cores/machines * make your research easily reproducible * share your processing workflows with the community """ # versions NIBABEL_MIN_VERSION = '1.0' NETWORKX_MIN_VERSION = '1.0' NUMPY_MIN_VERSION = '1.3' SCIPY_MIN_VERSION = '0.7' TRAITS_MIN_VERSION = '4.0' NAME = 'nipype' MAINTAINER = "nipype developers" MAINTAINER_EMAIL = "nipy-devel@neuroimaging.scipy.org" DESCRIPTION = description LONG_DESCRIPTION = long_description URL = "http://nipy.org/nipype" DOWNLOAD_URL = "http://github.com/nipy/nipype/archives/master" LICENSE = "BSD license" CLASSIFIERS = CLASSIFIERS AUTHOR = "nipype developmers" AUTHOR_EMAIL = "nipy-devel@neuroimaging.scipy.org" PLATFORMS = "OS Independent" MAJOR = _version_major MINOR = _version_minor MICRO = _version_micro ISRELEASE = _version_extra == '' VERSION = __version__ REQUIRES = ["nibabel (>=1.0)", "networkx (>=1.0)", "numpy (>=1.3)", "scipy (>=0.7)", "traits (>=4.0)"] STATUS = 'stable' nipype-0.9.2/nipype/interfaces/000077500000000000000000000000001227300005300164455ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/__init__.py000066400000000000000000000007251227300005300205620ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package contains interfaces for using existing functionality in other packages Requires Packages to be installed """ __docformat__ = 'restructuredtext' from io import DataGrabber, DataSink, SelectFiles from utility import IdentityInterface, Rename, Function, Select, Merge import fsl, spm, freesurfer, afni, ants, slicer, dipy, nipy, mrtrix, camino nipype-0.9.2/nipype/interfaces/afni/000077500000000000000000000000001227300005300173625ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/afni/__init__.py000066400000000000000000000013261227300005300214750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The afni module provides classes for interfacing with the `AFNI `_ command line tools. Top-level namespace for afni. """ from .base import Info from .preprocess import (To3D, Refit, Resample, TStat, Automask, Volreg, Merge, ZCutUp, Calc, TShift, Warp, Detrend, Despike, Copy, Fourier, Allineate, Maskave, SkullStrip, TCat, Fim, BlurInMask, Autobox, TCorrMap, Bandpass, Retroicor, TCorrelate, TCorr1D, BrickStat, ROIStats, AutoTcorrelate, AFNItoNIFTI) nipype-0.9.2/nipype/interfaces/afni/base.py000066400000000000000000000114511227300005300206500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Provide interface to AFNI commands.""" import os import warnings from ...utils.filemanip import split_filename from ..base import ( CommandLine, traits, CommandLineInputSpec, isdefined, File, TraitedSpec) warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class Info(object): """Handle afni output type and version information. """ __outputtype = 'AFNI' ftypes = {'NIFTI': '.nii', 'AFNI': '', 'NIFTI_GZ': '.nii.gz'} @staticmethod def version(): """Check for afni version on system Parameters ---------- None Returns ------- version : str Version number as string or None if AFNI not found """ clout = CommandLine(command='afni_vcheck', terminal_output='allatonce').run() out = clout.runtime.stdout return out.split('\n')[1] @classmethod def outputtype_to_ext(cls, outputtype): """Get the file extension for the given output type. Parameters ---------- outputtype : {'NIFTI', 'NIFTI_GZ', 'AFNI'} String specifying the output type. Returns ------- extension : str The file extension for the output type. """ try: return cls.ftypes[outputtype] except KeyError: msg = 'Invalid AFNIOUTPUTTYPE: ', outputtype raise KeyError(msg) @classmethod def outputtype(cls): """AFNI has no environment variables, Output filetypes get set in command line calls Nipype uses AFNI as default Returns ------- None """ #warn(('AFNI has no environment variable that sets filetype ' # 'Nipype uses NIFTI_GZ as default')) return 'AFNI' @staticmethod def standard_image(img_name): '''Grab an image from the standard location. Could be made more fancy to allow for more relocatability''' clout = CommandLine('which afni', terminal_output='allatonce').run() if clout.runtime.returncode is not 0: return None out = clout.runtime.stdout basedir = os.path.split(out)[0] return os.path.join(basedir, img_name) class AFNICommandInputSpec(CommandLineInputSpec): outputtype = traits.Enum('AFNI', Info.ftypes.keys(), desc='AFNI output filetype') out_file = File(name_template="%s_afni", desc='output image file name', argstr='-prefix %s', name_source=["in_file"]) class AFNICommandOutputSpec(TraitedSpec): out_file = File(desc='output file', exists=True) class AFNICommand(CommandLine): input_spec = AFNICommandInputSpec _outputtype = None def __init__(self, **inputs): super(AFNICommand, self).__init__(**inputs) self.inputs.on_trait_change(self._output_update, 'outputtype') if self._outputtype is None: self._outputtype = Info.outputtype() if not isdefined(self.inputs.outputtype): self.inputs.outputtype = self._outputtype else: self._output_update() def _output_update(self): """ i think? updates class private attribute based on instance input in fsl also updates ENVIRON variable....not valid in afni as it uses no environment variables """ self._outputtype = self.inputs.outputtype @classmethod def set_default_output_type(cls, outputtype): """Set the default output type for AFNI classes. This method is used to set the default output type for all afni subclasses. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.outputtype. """ if outputtype in Info.ftypes: cls._outputtype = outputtype else: raise AttributeError('Invalid AFNI outputtype: %s' % outputtype) def _overload_extension(self, value): path, base, _ = split_filename(value) return os.path.join(path, base + Info.outputtype_to_ext(self.inputs.outputtype)) def _list_outputs(self): outputs = super(AFNICommand, self)._list_outputs() metadata = dict(name_source=lambda t: t is not None) out_names = self.inputs.traits(**metadata).keys() if out_names: for name in out_names: if outputs[name]: _,_,ext = split_filename(outputs[name]) if ext == "": outputs[name] = outputs[name] + "+orig.BRIK" return outputs nipype-0.9.2/nipype/interfaces/afni/preprocess.py000066400000000000000000002077201227300005300221310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft = python sts = 4 ts = 4 sw = 4 et: """Afni preprocessing interfaces Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import warnings import os import re from ..base import (Directory, TraitedSpec, traits, isdefined, File, InputMultiPath, Undefined) from ...utils.filemanip import (load_json, save_json, split_filename) from nipype.utils.filemanip import fname_presuffix from .base import AFNICommand, AFNICommandInputSpec,\ AFNICommandOutputSpec from nipype.interfaces.base import CommandLineInputSpec, CommandLine,\ OutputMultiPath warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class To3DInputSpec(AFNICommandInputSpec): out_file = File(name_template="%s", desc='output image file name', argstr='-prefix %s', name_source=["in_folder"]) in_folder = Directory(desc='folder with DICOM images to convert', argstr='%s/*.dcm', position=-1, mandatory=True, exists=True) filetype = traits.Enum('spgr', 'fse', 'epan', 'anat', 'ct', 'spct', 'pet', 'mra', 'bmap', 'diff', 'omri', 'abuc', 'fim', 'fith', 'fico', 'fitt', 'fift', 'fizt', 'fict', 'fibt', 'fibn', 'figt', 'fipt', 'fbuc', argstr='-%s', desc='type of datafile being converted') skipoutliers = traits.Bool(desc='skip the outliers check', argstr='-skip_outliers') assumemosaic = traits.Bool(desc='assume that Siemens image is mosaic', argstr='-assume_dicom_mosaic') datatype = traits.Enum('short', 'float', 'byte', 'complex', desc='set output file datatype', argstr='-datum %s') funcparams = traits.Str(desc='parameters for functional data', argstr='-time:zt %s alt+z2') class To3D(AFNICommand): """Create a 3D dataset from 2D image files using AFNI to3d command For complete details, see the `to3d Documentation `_ Examples ======== >>> from nipype.interfaces import afni >>> To3D = afni.To3D() >>> To3D.inputs.datatype = 'float' >>> To3D.inputs.in_folder = '.' >>> To3D.inputs.out_file = 'dicomdir.nii' >>> To3D.inputs.filetype = "anat" >>> To3D.cmdline #doctest: +ELLIPSIS 'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm' >>> res = To3D.run() #doctest: +SKIP """ _cmd = 'to3d' input_spec = To3DInputSpec output_spec = AFNICommandOutputSpec class TShiftInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dTShift', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_tshift", desc='output image file name', argstr='-prefix %s', name_source="in_file") tr = traits.Str(desc='manually set the TR' + 'You can attach suffix "s" for seconds or "ms" for milliseconds.', argstr='-TR %s') tzero = traits.Float(desc='align each slice to given time offset', argstr='-tzero %s', xor=['tslice']) tslice = traits.Int(desc='align each slice to time offset of given slice', argstr='-slice %s', xor=['tzero']) ignore = traits.Int(desc='ignore the first set of points specified', argstr='-ignore %s') interp = traits.Enum(('Fourier', 'linear', 'cubic', 'quintic', 'heptic'), desc='different interpolation methods (see 3dTShift for details)' + ' default = Fourier', argstr='-%s') tpattern = traits.Enum(('alt+z', 'alt+z2', 'alt-z', 'alt-z2', 'seq+z', 'seq-z'), desc='use specified slice time pattern rather than one in header', argstr='-tpattern %s') rlt = traits.Bool(desc='Before shifting, remove the mean and linear trend', argstr="-rlt") rltplus = traits.Bool(desc='Before shifting,' + ' remove the mean and linear trend and ' + 'later put back the mean', argstr="-rlt+") class TShift(AFNICommand): """Shifts voxel time series from input so that seperate slices are aligned to the same temporal origin For complete details, see the `3dTshift Documentation. Examples ======== >>> from nipype.interfaces import afni as afni >>> tshift = afni.TShift() >>> tshift.inputs.in_file = 'functional.nii' >>> tshift.inputs.tpattern = 'alt+z' >>> tshift.inputs.tzero = 0.0 >>> tshift.cmdline #doctest: '3dTshift -prefix functional_tshift -tpattern alt+z -tzero 0.0 functional.nii' >>> res = tshift.run() # doctest: +SKIP """ _cmd = '3dTshift' input_spec = TShiftInputSpec output_spec = AFNICommandOutputSpec class RefitInputSpec(CommandLineInputSpec): in_file = File(desc='input file to 3drefit', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=True) deoblique = traits.Bool(desc='replace current transformation' + ' matrix with cardinal matrix', argstr='-deoblique') xorigin = traits.Str(desc='x distance for edge voxel offset', argstr='-xorigin %s') yorigin = traits.Str(desc='y distance for edge voxel offset', argstr='-yorigin %s') zorigin = traits.Str(desc='z distance for edge voxel offset', argstr='-zorigin %s') class Refit(CommandLine): """Changes some of the information inside a 3D dataset's header For complete details, see the `3drefit Documentation. Examples ======== >>> from nipype.interfaces import afni as afni >>> refit = afni.Refit() >>> refit.inputs.in_file = 'structural.nii' >>> refit.inputs.deoblique = True >>> refit.cmdline '3drefit -deoblique structural.nii' >>> res = refit.run() # doctest: +SKIP """ _cmd = '3drefit' input_spec = RefitInputSpec output_spec = AFNICommandOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = os.path.abspath(self.inputs.in_file) return outputs class WarpInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dWarp', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_warp", desc='output image file name', argstr='-prefix %s', name_source="in_file") tta2mni = traits.Bool(desc='transform dataset from Talairach to MNI152', argstr='-tta2mni') mni2tta = traits.Bool(desc='transform dataset from MNI152 to Talaraich', argstr='-mni2tta') matparent = File(desc="apply transformation from 3dWarpDrive", argstr="-matparent %s", exists=True) deoblique = traits.Bool(desc='transform dataset from oblique to cardinal', argstr='-deoblique') interp = traits.Enum(('linear', 'cubic', 'NN', 'quintic'), desc='spatial interpolation methods [default = linear]', argstr='-%s') gridset = File(desc="copy grid of specified dataset", argstr="-gridset %s", exists=True) zpad = traits.Int(desc="pad input dataset with N planes" + " of zero on all sides.", argstr="-zpad %d") class Warp(AFNICommand): """Use 3dWarp for spatially transforming a dataset For complete details, see the `3dWarp Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> warp = afni.Warp() >>> warp.inputs.in_file = 'structural.nii' >>> warp.inputs.deoblique = True >>> warp.inputs.out_file = "trans.nii.gz" >>> warp.cmdline '3dWarp -deoblique -prefix trans.nii.gz structural.nii' >>> res = warp.run() # doctest: +SKIP """ _cmd = '3dWarp' input_spec = WarpInputSpec output_spec = AFNICommandOutputSpec class ResampleInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dresample', argstr='-inset %s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_resample", desc='output image file name', argstr='-prefix %s', name_source="in_file") orientation = traits.Str(desc='new orientation code', argstr='-orient %s') resample_mode = traits.Enum('NN', 'Li', 'Cu', 'Bk', argstr='-rmode %s', desc="resampling method from set {'NN', 'Li', 'Cu', 'Bk'}. These are for 'Nearest Neighbor', 'Linear', 'Cubic' and 'Blocky' interpolation, respectively. Default is NN.") voxel_size = traits.Tuple(*[traits.Float()]*3, argstr='-dxyz %f %f %f', desc="resample to new dx, dy and dz") master = traits.File(argstr='-master %s', desc='align dataset grid to a reference file') class Resample(AFNICommand): """Resample or reorient an image using AFNI 3dresample command For complete details, see the `3dresample Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> resample = afni.Resample() >>> resample.inputs.in_file = 'functional.nii' >>> resample.inputs.orientation= 'RPI' >>> resample.inputs.outputtype = "NIFTI" >>> resample.cmdline '3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii' >>> res = resample.run() # doctest: +SKIP """ _cmd = '3dresample' input_spec = ResampleInputSpec output_spec = AFNICommandOutputSpec class AutoTcorrelateInputSpec(AFNICommandInputSpec): in_file = File(desc='timeseries x space (volume or surface) file', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) polort = traits.Int( desc='Remove polynomical trend of order m or -1 for no detrending', argstr="-polort %d") eta2 = traits.Bool(desc='eta^2 similarity', argstr="-eta2") mask = File(exists=True, desc="mask of voxels", argstr="-mask %s") mask_only_targets = traits.Bool(desc="use mask only on targets voxels", argstr="-mask_only_targets", xor=['mask_source']) mask_source = File(exists=True, desc="mask for source voxels", argstr="-mask_source %s", xor=['mask_only_targets']) out_file = File(name_template="%s_similarity_matrix.1D", desc='output image file name', argstr='-prefix %s', name_source="in_file") class AutoTcorrelate(AFNICommand): """Computes the correlation coefficient between the time series of each pair of voxels in the input dataset, and stores the output into a new anatomical bucket dataset [scaled to shorts to save memory space]. Examples ======== >>> from nipype.interfaces import afni as afni >>> corr = afni.AutoTcorrelate() >>> corr.inputs.in_file = 'functional.nii' >>> corr.inputs.polort = -1 >>> corr.inputs.eta2 = True >>> corr.inputs.mask = 'mask.nii' >>> corr.inputs.mask_only_targets = True >>> corr.cmdline # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE '3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii' >>> res = corr.run() # doctest: +SKIP """ input_spec = AutoTcorrelateInputSpec output_spec = AFNICommandOutputSpec _cmd = '3dAutoTcorrelate' def _overload_extension(self, value): path, base, ext = split_filename(value) if ext.lower() not in [".1d", ".nii.gz", ".nii"]: ext = ext + ".1D" return os.path.join(path, base + ext) class TStatInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dTstat', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_tstat", desc='output image file name', argstr='-prefix %s', name_source="in_file") mask = File(desc='mask file', argstr='-mask %s', exists=True) options = traits.Str(desc='selected statistical output', argstr='%s') class TStat(AFNICommand): """Compute voxel-wise statistics using AFNI 3dTstat command For complete details, see the `3dTstat Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> tstat = afni.TStat() >>> tstat.inputs.in_file = 'functional.nii' >>> tstat.inputs.args= '-mean' >>> tstat.inputs.out_file = "stats" >>> tstat.cmdline '3dTstat -mean -prefix stats functional.nii' >>> res = tstat.run() # doctest: +SKIP """ _cmd = '3dTstat' input_spec = TStatInputSpec output_spec = AFNICommandOutputSpec class DetrendInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dDetrend', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_detrend", desc='output image file name', argstr='-prefix %s', name_source="in_file") class Detrend(AFNICommand): """This program removes components from voxel time series using linear least squares For complete details, see the `3dDetrend Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> detrend = afni.Detrend() >>> detrend.inputs.in_file = 'functional.nii' >>> detrend.inputs.args = '-polort 2' >>> detrend.inputs.outputtype = "AFNI" >>> detrend.cmdline '3dDetrend -polort 2 -prefix functional_detrend functional.nii' >>> res = detrend.run() # doctest: +SKIP """ _cmd = '3dDetrend' input_spec = DetrendInputSpec output_spec = AFNICommandOutputSpec class DespikeInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dDespike', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_despike", desc='output image file name', argstr='-prefix %s', name_source="in_file") class Despike(AFNICommand): """Removes 'spikes' from the 3D+time input dataset For complete details, see the `3dDespike Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> despike = afni.Despike() >>> despike.inputs.in_file = 'functional.nii' >>> despike.cmdline '3dDespike -prefix functional_despike functional.nii' >>> res = despike.run() # doctest: +SKIP """ _cmd = '3dDespike' input_spec = DespikeInputSpec output_spec = AFNICommandOutputSpec class AutomaskInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dAutomask', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_mask", desc='output image file name', argstr='-prefix %s', name_source="in_file") brain_file = File(name_template="%s_masked", desc="output file from 3dAutomask", argstr='-apply_prefix %s', name_source="in_file") clfrac = traits.Float(desc='sets the clip level fraction' + ' (must be 0.1-0.9). ' + 'A small value will tend to make the mask larger [default = 0.5].', argstr="-clfrac %s") dilate = traits.Int(desc='dilate the mask outwards', argstr="-dilate %s") erode = traits.Int(desc='erode the mask inwards', argstr="-erode %s") class AutomaskOutputSpec(TraitedSpec): out_file = File(desc='mask file', exists=True) brain_file = File(desc='brain file (skull stripped)', exists=True) class Automask(AFNICommand): """Create a brain-only mask of the image using AFNI 3dAutomask command For complete details, see the `3dAutomask Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> automask = afni.Automask() >>> automask.inputs.in_file = 'functional.nii' >>> automask.inputs.dilate = 1 >>> automask.inputs.outputtype = "NIFTI" >>> automask.cmdline #doctest: +ELLIPSIS '3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii' >>> res = automask.run() # doctest: +SKIP """ _cmd = '3dAutomask' input_spec = AutomaskInputSpec output_spec = AutomaskOutputSpec class VolregInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dvolreg', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_volreg", desc='output image file name', argstr='-prefix %s', name_source="in_file") basefile = File(desc='base file for registration', argstr='-base %s', position=-6, exists=True) zpad = traits.Int(desc='Zeropad around the edges' + ' by \'n\' voxels during rotations', argstr='-zpad %d', position=-5) md1d_file = File(name_template='%s_md.1D', desc='max displacement output file', argstr='-maxdisp1D %s', name_source="in_file", keep_extension=True, position=-4) oned_file = File(name_template='%s.1D', desc='1D movement parameters output file', argstr='-1Dfile %s', name_source="in_file", keep_extension=True) verbose = traits.Bool(desc='more detailed description of the process', argstr='-verbose') timeshift = traits.Bool(desc='time shift to mean slice time offset', argstr='-tshift 0') copyorigin = traits.Bool(desc='copy base file origin coords to output', argstr='-twodup') class VolregOutputSpec(TraitedSpec): out_file = File(desc='registered file', exists=True) md1d_file = File(desc='max displacement info file', exists=True) oned_file = File(desc='movement parameters info file', exists=True) class Volreg(AFNICommand): """Register input volumes to a base volume using AFNI 3dvolreg command For complete details, see the `3dvolreg Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> volreg = afni.Volreg() >>> volreg.inputs.in_file = 'functional.nii' >>> volreg.inputs.args = '-Fourier -twopass' >>> volreg.inputs.zpad = 4 >>> volreg.inputs.outputtype = "NIFTI" >>> volreg.cmdline #doctest: +ELLIPSIS '3dvolreg -Fourier -twopass -1Dfile functional.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii' >>> res = volreg.run() # doctest: +SKIP """ _cmd = '3dvolreg' input_spec = VolregInputSpec output_spec = VolregOutputSpec class MergeInputSpec(AFNICommandInputSpec): in_files = InputMultiPath( File(desc='input file to 3dmerge', exists=True), argstr='%s', position=-1, mandatory=True, copyfile=False) out_file = File(name_template="%s_merge", desc='output image file name', argstr='-prefix %s', name_source="in_file") doall = traits.Bool(desc='apply options to all sub-bricks in dataset', argstr='-doall') blurfwhm = traits.Int(desc='FWHM blur value (mm)', argstr='-1blur_fwhm %d', units='mm') class Merge(AFNICommand): """Merge or edit volumes using AFNI 3dmerge command For complete details, see the `3dmerge Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> merge = afni.Merge() >>> merge.inputs.in_files = ['functional.nii', 'functional2.nii'] >>> merge.inputs.blurfwhm = 4 >>> merge.inputs.doall = True >>> merge.inputs.out_file = 'e7.nii' >>> res = merge.run() # doctest: +SKIP """ _cmd = '3dmerge' input_spec = MergeInputSpec output_spec = AFNICommandOutputSpec class CopyInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dcopy', argstr='%s', position=-2, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_copy", desc='output image file name', argstr='-prefix %s', name_source="in_file") class Copy(AFNICommand): """Copies an image of one type to an image of the same or different type using 3dcopy command For complete details, see the `3dcopy Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> copy = afni.Copy() >>> copy.inputs.in_file = 'functional.nii' >>> copy.inputs.out_file = 'new_func.nii' >>> res = copy.run() # doctest: +SKIP """ _cmd = '3dcopy' input_spec = CopyInputSpec output_spec = AFNICommandOutputSpec class FourierInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dFourier', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_fourier", desc='output image file name', argstr='-prefix %s', name_source="in_file") lowpass = traits.Float(desc='lowpass', argstr='-lowpass %f', position=0, mandatory=True) highpass = traits.Float(desc='highpass', argstr='-highpass %f', position=1, mandatory=True) class Fourier(AFNICommand): """Program to lowpass and/or highpass each voxel time series in a dataset, via the FFT For complete details, see the `3dFourier Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> fourier = afni.Fourier() >>> fourier.inputs.in_file = 'functional.nii' >>> fourier.inputs.args = '-retrend' >>> fourier.inputs.highpass = 0.005 >>> fourier.inputs.lowpass = 0.1 >>> res = fourier.run() # doctest: +SKIP """ _cmd = '3dFourier' input_spec = FourierInputSpec output_spec = AFNICommandOutputSpec class BandpassInputSpec(AFNICommandInputSpec): in_file = File( desc='input file to 3dBandpass', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File( name_template='%s_bp', desc='output file from 3dBandpass', argstr='-prefix %s', position=1, name_source='in_file', genfile=True) lowpass = traits.Float( desc='lowpass', argstr='%f', position=-2, mandatory=True) highpass = traits.Float( desc='highpass', argstr='%f', position=-3, mandatory=True) mask = File( desc='mask file', position=2, argstr='-mask %s', exists=True) despike = traits.Bool( argstr='-despike', desc="""Despike each time series before other processing. ++ Hopefully, you don't actually need to do this, which is why it is optional.""") orthogonalize_file = InputMultiPath( File(exists=True), argstr="-ort %s", desc="""Also orthogonalize input to columns in f.1D ++ Multiple '-ort' options are allowed.""") orthogonalize_dset = File( exists=True, argstr="-dsort %s", desc="""Orthogonalize each voxel to the corresponding voxel time series in dataset 'fset', which must have the same spatial and temporal grid structure as the main input dataset. ++ At present, only one '-dsort' option is allowed.""") no_detrend = traits.Bool( argstr='-nodetrend', desc="""Skip the quadratic detrending of the input that occurs before the FFT-based bandpassing. ++ You would only want to do this if the dataset had been detrended already in some other program.""") tr = traits.Float( argstr="-dt %f", desc="set time step (TR) in sec [default=from dataset header]") nfft = traits.Int( argstr='-nfft %d', desc="set the FFT length [must be a legal value]") normalize = traits.Bool( argstr='-norm', desc="""Make all output time series have L2 norm = 1 ++ i.e., sum of squares = 1""") automask = traits.Bool( argstr='-automask', desc="Create a mask from the input dataset") blur = traits.Float( argstr='-blur %f', desc="""Blur (inside the mask only) with a filter width (FWHM) of 'fff' millimeters.""") localPV = traits.Float( argstr='-localPV %f', desc="""Replace each vector by the local Principal Vector (AKA first singular vector) from a neighborhood of radius 'rrr' millimiters. ++ Note that the PV time series is L2 normalized. ++ This option is mostly for Bob Cox to have fun with.""") notrans = traits.Bool( argstr='-notrans', desc="""Don't check for initial positive transients in the data: ++ The test is a little slow, so skipping it is OK, if you KNOW the data time series are transient-free.""") class Bandpass(AFNICommand): """Program to lowpass and/or highpass each voxel time series in a dataset, offering more/different options than Fourier For complete details, see the `3dBandpass Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> from nipype.testing import example_data >>> bandpass = afni.Bandpass() >>> bandpass.inputs.in_file = example_data('functional.nii') >>> bandpass.inputs.highpass = 0.005 >>> bandpass.inputs.lowpass = 0.1 >>> res = bandpass.run() # doctest: +SKIP """ _cmd = '3dBandpass' input_spec = BandpassInputSpec output_spec = AFNICommandOutputSpec class ZCutUpInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dZcutup', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_zcupup", desc='output image file name', argstr='-prefix %s', name_source="in_file") keep = traits.Str(desc='slice range to keep in output', argstr='-keep %s') class ZCutUp(AFNICommand): """Cut z-slices from a volume using AFNI 3dZcutup command For complete details, see the `3dZcutup Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> zcutup = afni.ZCutUp() >>> zcutup.inputs.in_file = 'functional.nii' >>> zcutup.inputs.out_file = 'functional_zcutup.nii' >>> zcutup.inputs.keep= '0 10' >>> res = zcutup.run() # doctest: +SKIP """ _cmd = '3dZcutup' input_spec = ZCutUpInputSpec output_spec = AFNICommandOutputSpec class AllineateInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dAllineate', argstr='-source %s', position=-1, mandatory=True, exists=True, copyfile=False) reference = File( exists=True, argstr='-base %s', desc="""file to be used as reference, the first volume will be used if not given the reference will be the first volume of in_file.""") out_file = File( desc='output file from 3dAllineate', argstr='-prefix %s', position=-2, name_source='%s_allineate', genfile=True) out_param_file = File( argstr='-1Dparam_save %s', desc='Save the warp parameters in ASCII (.1D) format.') in_param_file = File( exists=True, argstr='-1Dparam_apply %s', desc="""Read warp parameters from file and apply them to the source dataset, and produce a new dataset""") out_matrix = File( argstr='-1Dmatrix_save %s', desc='Save the transformation matrix for each volume.') in_matrix = File(desc='matrix to align input file', argstr='-1Dmatrix_apply %s', position=-3) _cost_funcs = [ 'leastsq', 'ls', 'mutualinfo', 'mi', 'corratio_mul', 'crM', 'norm_mutualinfo', 'nmi', 'hellinger', 'hel', 'corratio_add', 'crA', 'corratio_uns', 'crU'] cost = traits.Enum( *_cost_funcs, argstr='-cost %s', desc="""Defines the 'cost' function that defines the matching between the source and the base""") _interp_funcs = [ 'nearestneighbour', 'linear', 'cubic', 'quintic', 'wsinc5'] interpolation = traits.Enum( *_interp_funcs[:-1], argstr='-interp %s', desc='Defines interpolation method to use during matching') final_interpolation = traits.Enum( *_interp_funcs, argstr='-final %s', desc='Defines interpolation method used to create the output dataset') # TECHNICAL OPTIONS (used for fine control of the program): nmatch = traits.Int( argstr='-nmatch %d', desc='Use at most n scattered points to match the datasets.') no_pad = traits.Bool( argstr='-nopad', desc='Do not use zero-padding on the base image.') zclip = traits.Bool( argstr='-zclip', desc='Replace negative values in the input datasets (source & base) with zero.') convergence = traits.Float( argstr='-conv %f', desc='Convergence test in millimeters (default 0.05mm).') usetemp = traits.Bool(argstr='-usetemp', desc='temporary file use') check = traits.List( traits.Enum(*_cost_funcs), argstr='-check %s', desc="""After cost functional optimization is done, start at the final parameters and RE-optimize using this new cost functions. If the results are too different, a warning message will be printed. However, the final parameters from the original optimization will be used to create the output dataset.""") # ** PARAMETERS THAT AFFECT THE COST OPTIMIZATION STRATEGY ** one_pass = traits.Bool( argstr='-onepass', desc="""Use only the refining pass -- do not try a coarse resolution pass first. Useful if you know that only small amounts of image alignment are needed.""") two_pass = traits.Bool( argstr='-twopass', desc="""Use a two pass alignment strategy for all volumes, searching for a large rotation+shift and then refining the alignment.""") two_blur = traits.Float( argstr='-twoblur', desc='Set the blurring radius for the first pass in mm.') two_first = traits.Bool( argstr='-twofirst', desc="""Use -twopass on the first image to be registered, and then on all subsequent images from the source dataset, use results from the first image's coarse pass to start the fine pass.""") two_best = traits.Int( argstr='-twobest %d', desc="""In the coarse pass, use the best 'bb' set of initial points to search for the starting point for the fine pass. If bb==0, then no search is made for the best starting point, and the identity transformation is used as the starting point. [Default=5; min=0 max=11]""") fine_blur = traits.Float( argstr='-fineblur %f', desc="""Set the blurring radius to use in the fine resolution pass to 'x' mm. A small amount (1-2 mm?) of blurring at the fine step may help with convergence, if there is some problem, especially if the base volume is very noisy. [Default == 0 mm = no blurring at the final alignment pass]""") center_of_mass = traits.Str( argstr='-cmass%s', desc='Use the center-of-mass calculation to bracket the shifts.') autoweight = traits.Str( argstr='-autoweight%s', desc="""Compute a weight function using the 3dAutomask algorithm plus some blurring of the base image.""") automask = traits.Int( argstr='-automask+%d', desc="""Compute a mask function, set a value for dilation or 0.""") autobox = traits.Bool( argstr='-autobox', desc="""Expand the -automask function to enclose a rectangular box that holds the irregular mask.""") nomask = traits.Bool( argstr='-nomask', desc="""Don't compute the autoweight/mask; if -weight is not also used, then every voxel will be counted equally.""") weight_file = File( argstr='-weight %s', exists=True, desc="""Set the weighting for each voxel in the base dataset; larger weights mean that voxel count more in the cost function. Must be defined on the same grid as the base dataset""") out_weight_file = traits.File( argstr='-wtprefix %s', desc="""Write the weight volume to disk as a dataset""") source_mask = File( exists=True, argstr='-source_mask %s', desc='mask the input dataset') source_automask = traits.Int( argstr='-source_automask+%d', desc='Automatically mask the source dataset with dilation or 0.') warp_type = traits.Enum( 'shift_only', 'shift_rotate', 'shift_rotate_scale', 'affine_general', argstr='-warp %s', desc='Set the warp type.') warpfreeze = traits.Bool( argstr='-warpfreeze', desc='Freeze the non-rigid body parameters after first volume.') replacebase = traits.Bool( argstr='-replacebase', desc="""If the source has more than one volume, then after the first volume is aligned to the base""") replacemeth = traits.Enum( *_cost_funcs, argstr='-replacemeth %s', desc="""After first volume is aligned, switch method for later volumes. For use with '-replacebase'.""") epi = traits.Bool( argstr='-EPI', desc="""Treat the source dataset as being composed of warped EPI slices, and the base as comprising anatomically 'true' images. Only phase-encoding direction image shearing and scaling will be allowed with this option.""") master = File( exists=True, argstr='-master %s', desc='Write the output dataset on the same grid as this file') newgrid = traits.Float( argstr='-newgrid %f', desc='Write the output dataset using isotropic grid spacing in mm') # Non-linear experimental _nwarp_types = ['bilinear', 'cubic', 'quintic', 'heptic', 'nonic', 'poly3', 'poly5', 'poly7', 'poly9'] # same non-hellenistic nwarp = traits.Enum( *_nwarp_types, argstr='-nwarp %s', desc='Experimental nonlinear warping: bilinear or legendre poly.') _dirs = ['X', 'Y', 'Z', 'I', 'J', 'K'] nwarp_fixmot = traits.List( traits.Enum(*_dirs), argstr='-nwarp_fixmot%s', desc='To fix motion along directions.') nwarp_fixdep = traits.List( traits.Enum(*_dirs), argstr='-nwarp_fixdep%s', desc='To fix non-linear warp dependency along directions.') class AllineateOutputSpec(TraitedSpec): out_file = File(desc='output image file name') matrix = File(desc='matrix to align input file') class Allineate(AFNICommand): """Program to align one dataset (the 'source') to a base dataset For complete details, see the `3dAllineate Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> allineate = afni.Allineate() >>> allineate.inputs.in_file = 'functional.nii' >>> allineate.inputs.out_file= 'functional_allineate.nii' >>> allineate.inputs.in_matrix= 'cmatrix.mat' >>> res = allineate.run() # doctest: +SKIP """ _cmd = '3dAllineate' input_spec = AllineateInputSpec output_spec = AllineateOutputSpec def _format_arg(self, name, trait_spec, value): if name == 'nwarp_fixmot' or name == 'nwarp_fixdep': arg = ' '.join([trait_spec.argstr % v for v in value]) return arg return super(Allineate, self)._format_arg(name, trait_spec, value) def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): outputs['out_file'] = self._gen_fname(self.inputs.in_file, suffix=self.inputs.suffix) else: outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] class MaskaveInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dmaskave', argstr='%s', position=-2, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_maskave.1D", desc='output image file name', keep_extension=True, argstr="> %s", name_source="in_file", position=-1) mask = File(desc='matrix to align input file', argstr='-mask %s', position=1, exists=True) quiet = traits.Bool(desc='matrix to align input file', argstr='-quiet', position=2) class Maskave(AFNICommand): """Computes average of all voxels in the input dataset which satisfy the criterion in the options list For complete details, see the `3dmaskave Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> maskave = afni.Maskave() >>> maskave.inputs.in_file = 'functional.nii' >>> maskave.inputs.mask= 'seed_mask.nii' >>> maskave.inputs.quiet= True >>> maskave.cmdline #doctest: +ELLIPSIS '3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D' >>> res = maskave.run() # doctest: +SKIP """ _cmd = '3dmaskave' input_spec = MaskaveInputSpec output_spec = AFNICommandOutputSpec class SkullStripInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dSkullStrip', argstr='-input %s', position=1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_skullstrip", desc='output image file name', argstr='-prefix %s', name_source="in_file") class SkullStrip(AFNICommand): """A program to extract the brain from surrounding tissue from MRI T1-weighted images For complete details, see the `3dSkullStrip Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> skullstrip = afni.SkullStrip() >>> skullstrip.inputs.in_file = 'functional.nii' >>> skullstrip.inputs.args = '-o_ply' >>> res = skullstrip.run() # doctest: +SKIP """ _cmd = '3dSkullStrip' input_spec = SkullStripInputSpec output_spec = AFNICommandOutputSpec class TCatInputSpec(AFNICommandInputSpec): in_files = InputMultiPath( File(exists=True), desc='input file to 3dTcat', argstr=' %s', position=-1, mandatory=True, copyfile=False) out_file = File(name_template="%s_tcat", desc='output image file name', argstr='-prefix %s', name_source="in_file") rlt = traits.Str(desc='options', argstr='-rlt%s', position=1) class TCat(AFNICommand): """Concatenate sub-bricks from input datasets into one big 3D+time dataset For complete details, see the `3dTcat Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> tcat = afni.TCat() >>> tcat.inputs.in_files = ['functional.nii', 'functional2.nii'] >>> tcat.inputs.out_file= 'functional_tcat.nii' >>> tcat.inputs.rlt = '+' >>> res = tcat.run() # doctest: +SKIP """ _cmd = '3dTcat' input_spec = TCatInputSpec output_spec = AFNICommandOutputSpec class FimInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dfim+', argstr=' -input %s', position=1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_fim", desc='output image file name', argstr='-bucket %s', name_source="in_file") ideal_file = File(desc='ideal time series file name', argstr='-ideal_file %s', position=2, mandatory=True, exists=True) fim_thr = traits.Float(desc='fim internal mask threshold value', argstr='-fim_thr %f', position=3) out = traits.Str(desc='Flag to output the specified parameter', argstr='-out %s', position=4) class Fim(AFNICommand): """Program to calculate the cross-correlation of an ideal reference waveform with the measured FMRI time series for each voxel For complete details, see the `3dfim+ Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> fim = afni.Fim() >>> fim.inputs.in_file = 'functional.nii' >>> fim.inputs.ideal_file= 'seed.1D' >>> fim.inputs.out_file = 'functional_corr.nii' >>> fim.inputs.out = 'Correlation' >>> fim.inputs.fim_thr = 0.0009 >>> res = fim.run() # doctest: +SKIP """ _cmd = '3dfim+' input_spec = FimInputSpec output_spec = AFNICommandOutputSpec class TCorrelateInputSpec(AFNICommandInputSpec): xset = File(desc='input xset', argstr=' %s', position=-2, mandatory=True, exists=True, copyfile=False) yset = File(desc='input yset', argstr=' %s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s_tcorr", desc='output image file name', argstr='-prefix %s', name_source="xset") pearson = traits.Bool(desc='Correlation is the normal' + ' Pearson correlation coefficient', argstr='-pearson', position=1) polort = traits.Int(desc='Remove polynomical trend of order m', argstr='-polort %d', position=2) class TCorrelate(AFNICommand): """Computes the correlation coefficient between corresponding voxel time series in two input 3D+time datasets 'xset' and 'yset' For complete details, see the `3dTcorrelate Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> tcorrelate = afni.TCorrelate() >>> tcorrelate.inputs.xset= 'u_rc1s1_Template.nii' >>> tcorrelate.inputs.yset = 'u_rc1s2_Template.nii' >>> tcorrelate.inputs.out_file = 'functional_tcorrelate.nii.gz' >>> tcorrelate.inputs.polort = -1 >>> tcorrelate.inputs.pearson = True >>> res = tcarrelate.run() # doctest: +SKIP """ _cmd = '3dTcorrelate' input_spec = TCorrelateInputSpec output_spec = AFNICommandOutputSpec class TCorr1DInputSpec(AFNICommandInputSpec): xset = File(desc = '3d+time dataset input', argstr = ' %s', position = -2, mandatory = True, exists = True, copyfile=False) y_1d = File(desc = '1D time series file input', argstr = ' %s', position = -1, mandatory = True, exists = True) out_file = File(desc = 'output filename prefix', name_template='%s_correlation.nii.gz', argstr = '-prefix %s', name_source = 'xset', keep_extension = True) pearson = traits.Bool(desc='Correlation is the normal' + ' Pearson correlation coefficient', argstr=' -pearson', xor=['spearman','quadrant','ktaub'], position=1) spearman = traits.Bool(desc='Correlation is the' + ' Spearman (rank) correlation coefficient', argstr=' -spearman', xor=['pearson','quadrant','ktaub'], position=1) quadrant = traits.Bool(desc='Correlation is the' + ' quadrant correlation coefficient', argstr=' -quadrant', xor=['pearson','spearman','ktaub'], position=1) ktaub = traits.Bool(desc='Correlation is the' + ' Kendall\'s tau_b correlation coefficient', argstr=' -ktaub', xor=['pearson','spearman','quadrant'], position=1) class TCorr1DOutputSpec(TraitedSpec): out_file = File(desc = 'output file containing correlations', exists = True) class TCorr1D(AFNICommand): """Computes the correlation coefficient between each voxel time series in the input 3D+time dataset. For complete details, see the `3dTcorr1D Documentation. `_ >>> from nipype.interfaces import afni as afni >>> tcorr1D = afni.TCorr1D() >>> tcorr1D.inputs.xset= 'u_rc1s1_Template.nii' >>> tcorr1D.inputs.y_1d = 'seed.1D' >>> tcorr1D.cmdline '3dTcorr1D -prefix u_rc1s1_Template_correlation.nii.gz u_rc1s1_Template.nii seed.1D' >>> res = tcorr1D.run() # doctest: +SKIP """ _cmd = '3dTcorr1D' input_spec = TCorr1DInputSpec output_spec = TCorr1DOutputSpec class BrickStatInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dmaskave', argstr='%s', position=-1, mandatory=True, exists=True) mask = File(desc='-mask dset = use dset as mask to include/exclude voxels', argstr='-mask %s', position=2, exists=True) min = traits.Bool(desc='print the minimum value in dataset', argstr='-min', position=1) class BrickStatOutputSpec(TraitedSpec): min_val = traits.Float(desc='output') class BrickStat(AFNICommand): """Compute maximum and/or minimum voxel values of an input dataset For complete details, see the `3dBrickStat Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> brickstat = afni.BrickStat() >>> brickstat.inputs.in_file = 'functional.nii' >>> brickstat.inputs.mask = 'skeleton_mask.nii.gz' >>> brickstat.inputs.min = True >>> res = brickstat.run() # doctest: +SKIP """ _cmd = '3dBrickStat' input_spec = BrickStatInputSpec output_spec = BrickStatOutputSpec def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() outfile = os.path.join(os.getcwd(), 'stat_result.json') if runtime is None: try: min_val = load_json(outfile)['stat'] except IOError: return self.run().outputs else: min_val = [] for line in runtime.stdout.split('\n'): if line: values = line.split() if len(values) > 1: min_val.append([float(val) for val in values]) else: min_val.extend([float(val) for val in values]) if len(min_val) == 1: min_val = min_val[0] save_json(outfile, dict(stat=min_val)) outputs.min_val = min_val return outputs class ROIStatsInputSpec(CommandLineInputSpec): in_file = File(desc='input file to 3dROIstats', argstr='%s', position=-1, mandatory=True, exists=True) mask = File(desc='input mask', argstr='-mask %s', position=3, exists=True) mask_f2short = traits.Bool( desc='Tells the program to convert a float mask ' + 'to short integers, by simple rounding.', argstr='-mask_f2short', position=2) quiet = traits.Bool(desc='execute quietly', argstr='-quiet', position=1) terminal_output = traits.Enum('allatonce', desc=('Control terminal output:' '`allatonce` - waits till command is ' 'finished to display output'), nohash=True, mandatory=True, usedefault=True) class ROIStatsOutputSpec(TraitedSpec): stats = File(desc='output tab separated values file', exists=True) class ROIStats(CommandLine): """Display statistics over masked regions For complete details, see the `3dROIstats Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> roistats = afni.ROIStats() >>> roistats.inputs.in_file = 'functional.nii' >>> roistats.inputs.mask = 'skeleton_mask.nii.gz' >>> roistats.inputs.quiet=True >>> res = roistats.run() # doctest: +SKIP """ _cmd = '3dROIstats' input_spec = ROIStatsInputSpec output_spec = ROIStatsOutputSpec def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() output_filename = "roi_stats.csv" f = open(output_filename, "w") f.write(runtime.stdout) f.close() outputs.stats = os.path.abspath(output_filename) return outputs class CalcInputSpec(AFNICommandInputSpec): in_file_a = File(desc='input file to 3dcalc', argstr='-a %s', position=0, mandatory=True, exists=True) in_file_b = File(desc='operand file to 3dcalc', argstr=' -b %s', position=1, exists=True) in_file_c = File(desc='operand file to 3dcalc', argstr=' -c %s', position=2, exists=True) out_file = File(name_template="%s_calc", desc='output image file name', argstr='-prefix %s', name_source="in_file_a") expr = traits.Str(desc='expr', argstr='-expr "%s"', position=3, mandatory=True) start_idx = traits.Int(desc='start index for in_file_a', requires=['stop_idx']) stop_idx = traits.Int(desc='stop index for in_file_a', requires=['start_idx']) single_idx = traits.Int(desc='volume index for in_file_a') other = File(desc='other options', argstr='') class Calc(AFNICommand): """This program does voxel-by-voxel arithmetic on 3D datasets For complete details, see the `3dcalc Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> calc = afni.Calc() >>> calc.inputs.in_file_a = 'functional.nii' >>> calc.inputs.in_file_b = 'functional2.nii' >>> calc.inputs.expr='a*b' >>> calc.inputs.out_file = 'functional_calc.nii.gz' >>> calc.inputs.outputtype = "NIFTI" >>> calc.cmdline #doctest: +ELLIPSIS '3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz' """ _cmd = '3dcalc' input_spec = CalcInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, trait_spec, value): if name == 'in_file_a': arg = trait_spec.argstr % value if isdefined(self.inputs.start_idx): arg += '[%d..%d]' % (self.inputs.start_idx, self.inputs.stop_idx) if isdefined(self.inputs.single_idx): arg += '[%d]' % (self.inputs.single_idx) return arg return super(Calc, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): """Skip the arguments without argstr metadata """ return super(Calc, self)._parse_inputs( skip=('start_idx', 'stop_idx', 'other')) class BlurInMaskInputSpec(AFNICommandInputSpec): in_file = File( desc='input file to 3dSkullStrip', argstr='-input %s', position=1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template='%s_blur', desc='output to the file', argstr='-prefix %s', name_source='in_file', position=-1) mask = File( desc='Mask dataset, if desired. Blurring will occur only within the mask. Voxels NOT in the mask will be set to zero in the output.', argstr='-mask %s') multimask = File( desc='Multi-mask dataset -- each distinct nonzero value in dataset will be treated as a separate mask for blurring purposes.', argstr='-Mmask %s') automask = traits.Bool( desc='Create an automask from the input dataset.', argstr='-automask') fwhm = traits.Float( desc='fwhm kernel size', argstr='-FWHM %f', mandatory=True) preserve = traits.Bool( desc='Normally, voxels not in the mask will be set to zero in the output. If you want the original values in the dataset to be preserved in the output, use this option.', argstr='-preserve') float_out = traits.Bool( desc='Save dataset as floats, no matter what the input data type is.', argstr='-float') options = traits.Str(desc='options', argstr='%s', position=2) class BlurInMask(AFNICommand): """ Blurs a dataset spatially inside a mask. That's all. Experimental. For complete details, see the `3dBlurInMask Documentation. Examples ======== >>> from nipype.interfaces import afni as afni >>> bim = afni.BlurInMask() >>> bim.inputs.in_file = 'functional.nii' >>> bim.inputs.mask = 'mask.nii' >>> bim.inputs.fwhm = 5.0 >>> bim.cmdline #doctest: +ELLIPSIS '3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur' >>> res = bim.run() # doctest: +SKIP """ _cmd = '3dBlurInMask' input_spec = BlurInMaskInputSpec output_spec = AFNICommandOutputSpec class TCorrMapInputSpec(AFNICommandInputSpec): in_file = File(exists=True, argstr='-input %s', mandatory=True, copyfile=False) seeds = File(exists=True, argstr='-seed %s', xor=('seeds_width')) mask = File(exists=True, argstr='-mask %s') automask = traits.Bool(argstr='-automask') polort = traits.Int(argstr='-polort %d') bandpass = traits.Tuple((traits.Float(), traits.Float()), argstr='-bpass %f %f') regress_out_timeseries = traits.File(exists=True, argstr='-ort %s') blur_fwhm = traits.Float(argstr='-Gblur %f') seeds_width = traits.Float(argstr='-Mseed %f', xor=('seeds')) # outputs mean_file = File(argstr='-Mean %s', suffix='_mean', name_source="in_file") zmean = File(argstr='-Zmean %s', suffix='_zmean', name_source="in_file") qmean = File(argstr='-Qmean %s', suffix='_qmean', name_source="in_file") pmean = File(argstr='-Pmean %s', suffix='_pmean', name_source="in_file") _thresh_opts = ('absolute_threshold', 'var_absolute_threshold', 'var_absolute_threshold_normalize') thresholds = traits.List(traits.Int()) absolute_threshold = File( argstr='-Thresh %f %s', suffix='_thresh', name_source="in_file", xor=_thresh_opts) var_absolute_threshold = File( argstr='-VarThresh %f %f %f %s', suffix='_varthresh', name_source="in_file", xor=_thresh_opts) var_absolute_threshold_normalize = File( argstr='-VarThreshN %f %f %f %s', suffix='_varthreshn', name_source="in_file", xor=_thresh_opts) correlation_maps = File( argstr='-CorrMap %s', name_source="in_file") correlation_maps_masked = File( argstr='-CorrMask %s', name_source="in_file") _expr_opts = ('average_expr', 'average_expr_nonzero', 'sum_expr') expr = traits.Str() average_expr = File( argstr='-Aexpr %s %s', suffix='_aexpr', name_source='in_file', xor=_expr_opts) average_expr_nonzero = File( argstr='-Cexpr %s %s', suffix='_cexpr', name_source='in_file', xor=_expr_opts) sum_expr = File( argstr='-Sexpr %s %s', suffix='_sexpr', name_source='in_file', xor=_expr_opts) histogram_bin_numbers = traits.Int() histogram = File( name_source='in_file', argstr='-Hist %d %s', suffix='_hist') class TCorrMapOutputSpec(TraitedSpec): mean_file = File() zmean = File() qmean = File() pmean = File() absolute_threshold = File() var_absolute_threshold = File() var_absolute_threshold_normalize = File() correlation_maps = File() correlation_maps_masked = File() average_expr = File() average_expr_nonzero = File() sum_expr = File() histogram = File() class TCorrMap(AFNICommand): """ For each voxel time series, computes the correlation between it and all other voxels, and combines this set of values into the output dataset(s) in some way. For complete details, see the `3dTcorrMap Documentation. Examples ======== >>> from nipype.interfaces import afni as afni >>> tcm = afni.TCorrMap() >>> tcm.inputs.in_file = 'functional.nii' >>> tcm.inputs.mask = 'mask.nii' >>> tcm.mean_file = '%s_meancorr.nii' >>> res = tcm.run() # doctest: +SKIP """ _cmd = '3dTcorrMap' input_spec = TCorrMapInputSpec output_spec = TCorrMapOutputSpec _additional_metadata = ['suffix'] def _format_arg(self, name, trait_spec, value): if name in self.inputs._thresh_opts: return trait_spec.argstr % self.inputs.thresholds + [value] elif name in self.inputs._expr_opts: return trait_spec.argstr % (self.inputs.expr, value) elif name == 'histogram': return trait_spec.argstr % (self.inputs.histogram_bin_numbers, value) else: return super(TCorrMap, self)._format_arg(name, trait_spec, value) class AutoboxInputSpec(AFNICommandInputSpec): in_file = File(exists=True, mandatory=True, argstr='-input %s', desc='input file', copyfile=False) padding = traits.Int( argstr='-npad %d', desc='Number of extra voxels to pad on each side of box') out_file = File(argstr="-prefix %s", name_source="in_file") no_clustering = traits.Bool( argstr='-noclust', desc="""Don't do any clustering to find box. Any non-zero voxel will be preserved in the cropped volume. The default method uses some clustering to find the cropping box, and will clip off small isolated blobs.""") class AutoboxOuputSpec(TraitedSpec): # out_file not mandatory x_min = traits.Int() x_max = traits.Int() y_min = traits.Int() y_max = traits.Int() z_min = traits.Int() z_max = traits.Int() out_file = File(desc='output file') class Autobox(AFNICommand): """ Computes size of a box that fits around the volume. Also can be used to crop the volume to that box. For complete details, see the `3dAutobox Documentation. Examples ======== >>> from nipype.interfaces import afni as afni >>> abox = afni.Autobox() >>> abox.inputs.in_file = 'structural.nii' >>> abox.inputs.padding = 5 >>> res = abox.run() # doctest: +SKIP """ _cmd = '3dAutobox' input_spec = AutoboxInputSpec output_spec = AutoboxOuputSpec def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() pattern = 'x=(?P-?\d+)\.\.(?P-?\d+) y=(?P-?\d+)\.\.(?P-?\d+) z=(?P-?\d+)\.\.(?P-?\d+)' for line in runtime.stderr.split('\n'): m = re.search(pattern, line) if m: d = m.groupdict() for k in d.keys(): d[k] = int(d[k]) outputs.set(**d) outputs.set(out_file=self._gen_filename('out_file')) return outputs def _gen_filename(self, name): if name == 'out_file' and (not isdefined(self.inputs.out_file)): return Undefined return super(Autobox, self)._gen_filename(name) class RetroicorInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dretroicor', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(desc='output image file name', argstr='-prefix %s', mandatory=True, position=1) card = File(desc='1D cardiac data file for cardiac correction', argstr='-card %s', position=-2, exists=True) resp = File(desc='1D respiratory waveform data for correction', argstr='-resp %s', position=-3, exists=True) threshold = traits.Int(desc='Threshold for detection of R-wave peaks in input (Make sure it is above the background noise level, Try 3/4 or 4/5 times range plus minimum)', argstr='-threshold %d', position=-4) order = traits.Int(desc='The order of the correction (2 is typical)', argstr='-order %s', position=-5) cardphase = File(desc='Filename for 1D cardiac phase output', argstr='-cardphase %s', position=-6, hash_files=False) respphase = File(desc='Filename for 1D resp phase output', argstr='-respphase %s', position=-7, hash_files=False) class Retroicor(AFNICommand): """Performs Retrospective Image Correction for physiological motion effects, using a slightly modified version of the RETROICOR algorithm The durations of the physiological inputs are assumed to equal the duration of the dataset. Any constant sampling rate may be used, but 40 Hz seems to be acceptable. This program's cardiac peak detection algorithm is rather simplistic, so you might try using the scanner's cardiac gating output (transform it to a spike wave if necessary). This program uses slice timing information embedded in the dataset to estimate the proper cardiac/respiratory phase for each slice. It makes sense to run this program before any program that may destroy the slice timings (e.g. 3dvolreg for motion correction). For complete details, see the `3dretroicor Documentation. `_ Examples ======== >>> from nipype.interfaces import afni as afni >>> ret = afni.Retroicor() >>> ret.inputs.in_file = 'functional.nii' >>> ret.inputs.card = 'mask.1D' >>> ret.inputs.resp = 'resp.1D' >>> res = ret.run() # doctest: +SKIP """ _cmd = '3dretroicor' input_spec = RetroicorInputSpec output_spec = AFNICommandOutputSpec class AFNItoNIFTIInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dAFNItoNIFTI', argstr='%s', position=-1, mandatory=True, exists=True, copyfile=False) out_file = File(name_template="%s.nii", desc='output image file name', argstr='-prefix %s', name_source="in_file") hash_files = False class AFNItoNIFTI(AFNICommand): """Changes AFNI format files to NIFTI format using 3dAFNItoNIFTI see AFNI Documentation: this can also convert 2D or 1D data, which you can numpy.squeeze() to remove extra dimensions Examples ======== >>> from nipype.interfaces import afni as afni >>> a2n = afni.AFNItoNIFTI() >>> a2n.inputs.in_file = 'afni_output.3D' >>> a2n.inputs.out_file = 'afni_output.nii' >>> a2n.cmdline '3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D' """ _cmd = '3dAFNItoNIFTI' input_spec = AFNItoNIFTIInputSpec output_spec = AFNICommandOutputSpec def _overload_extension(self, value): path, base, ext = split_filename(value) if ext.lower() not in [".1d", ".nii.gz", ".1D"]: ext = ext + ".nii" return os.path.join(path, base + ext) def _gen_filename(self, name): return os.path.abspath(super(AFNItoNIFTI, self)._gen_filename(name)) nipype-0.9.2/nipype/interfaces/afni/setup.py000066400000000000000000000007111227300005300210730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('afni', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/afni/tests/000077500000000000000000000000001227300005300205245ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_AFNICommand.py000066400000000000000000000013721227300005300252040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.base import AFNICommand def test_AFNICommand_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='-prefix %s', name_source=['in_file'], name_template='%s_afni', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = AFNICommand.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py000066400000000000000000000021761227300005300250450ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import AFNItoNIFTI def test_AFNItoNIFTI_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s.nii', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = AFNItoNIFTI.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AFNItoNIFTI_outputs(): output_map = dict(out_file=dict(), ) outputs = AFNItoNIFTI.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Allineate.py000066400000000000000000000060001227300005300250570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Allineate def test_Allineate_inputs(): input_map = dict(args=dict(argstr='%s', ), autobox=dict(argstr='-autobox', ), automask=dict(argstr='-automask+%d', ), autoweight=dict(argstr='-autoweight%s', ), center_of_mass=dict(argstr='-cmass%s', ), check=dict(argstr='-check %s', ), convergence=dict(argstr='-conv %f', ), cost=dict(argstr='-cost %s', ), environ=dict(nohash=True, usedefault=True, ), epi=dict(argstr='-EPI', ), final_interpolation=dict(argstr='-final %s', ), fine_blur=dict(argstr='-fineblur %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-source %s', copyfile=False, mandatory=True, position=-1, ), in_matrix=dict(argstr='-1Dmatrix_apply %s', position=-3, ), in_param_file=dict(argstr='-1Dparam_apply %s', ), interpolation=dict(argstr='-interp %s', ), master=dict(argstr='-master %s', ), newgrid=dict(argstr='-newgrid %f', ), nmatch=dict(argstr='-nmatch %d', ), no_pad=dict(argstr='-nopad', ), nomask=dict(argstr='-nomask', ), nwarp=dict(argstr='-nwarp %s', ), nwarp_fixdep=dict(argstr='-nwarp_fixdep%s', ), nwarp_fixmot=dict(argstr='-nwarp_fixmot%s', ), one_pass=dict(argstr='-onepass', ), out_file=dict(argstr='-prefix %s', genfile=True, name_source='%s_allineate', position=-2, ), out_matrix=dict(argstr='-1Dmatrix_save %s', ), out_param_file=dict(argstr='-1Dparam_save %s', ), out_weight_file=dict(argstr='-wtprefix %s', ), outputtype=dict(), reference=dict(argstr='-base %s', ), replacebase=dict(argstr='-replacebase', ), replacemeth=dict(argstr='-replacemeth %s', ), source_automask=dict(argstr='-source_automask+%d', ), source_mask=dict(argstr='-source_mask %s', ), terminal_output=dict(mandatory=True, nohash=True, ), two_best=dict(argstr='-twobest %d', ), two_blur=dict(argstr='-twoblur', ), two_first=dict(argstr='-twofirst', ), two_pass=dict(argstr='-twopass', ), usetemp=dict(argstr='-usetemp', ), warp_type=dict(argstr='-warp %s', ), warpfreeze=dict(argstr='-warpfreeze', ), weight_file=dict(argstr='-weight %s', ), zclip=dict(argstr='-zclip', ), ) inputs = Allineate.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Allineate_outputs(): output_map = dict(matrix=dict(), out_file=dict(), ) outputs = Allineate.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py000066400000000000000000000027041227300005300261250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import AutoTcorrelate def test_AutoTcorrelate_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), eta2=dict(argstr='-eta2', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), mask=dict(argstr='-mask %s', ), mask_only_targets=dict(argstr='-mask_only_targets', xor=['mask_source'], ), mask_source=dict(argstr='-mask_source %s', xor=['mask_only_targets'], ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_similarity_matrix.1D', ), outputtype=dict(), polort=dict(argstr='-polort %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = AutoTcorrelate.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AutoTcorrelate_outputs(): output_map = dict(out_file=dict(), ) outputs = AutoTcorrelate.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Autobox.py000066400000000000000000000024141227300005300246070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Autobox def test_Autobox_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', copyfile=False, mandatory=True, ), no_clustering=dict(argstr='-noclust', ), out_file=dict(argstr='-prefix %s', name_source='in_file', ), outputtype=dict(), padding=dict(argstr='-npad %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Autobox.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Autobox_outputs(): output_map = dict(out_file=dict(), x_max=dict(), x_min=dict(), y_max=dict(), y_min=dict(), z_max=dict(), z_min=dict(), ) outputs = Autobox.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Automask.py000066400000000000000000000025711227300005300247560ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Automask def test_Automask_inputs(): input_map = dict(args=dict(argstr='%s', ), brain_file=dict(argstr='-apply_prefix %s', name_source='in_file', name_template='%s_masked', ), clfrac=dict(argstr='-clfrac %s', ), dilate=dict(argstr='-dilate %s', ), environ=dict(nohash=True, usedefault=True, ), erode=dict(argstr='-erode %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_mask', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Automask.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Automask_outputs(): output_map = dict(brain_file=dict(), out_file=dict(), ) outputs = Automask.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Bandpass.py000066400000000000000000000035101227300005300247170ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Bandpass def test_Bandpass_inputs(): input_map = dict(args=dict(argstr='%s', ), automask=dict(argstr='-automask', ), blur=dict(argstr='-blur %f', ), despike=dict(argstr='-despike', ), environ=dict(nohash=True, usedefault=True, ), highpass=dict(argstr='%f', mandatory=True, position=-3, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), localPV=dict(argstr='-localPV %f', ), lowpass=dict(argstr='%f', mandatory=True, position=-2, ), mask=dict(argstr='-mask %s', position=2, ), nfft=dict(argstr='-nfft %d', ), no_detrend=dict(argstr='-nodetrend', ), normalize=dict(argstr='-norm', ), notrans=dict(argstr='-notrans', ), orthogonalize_dset=dict(argstr='-dsort %s', ), orthogonalize_file=dict(argstr='-ort %s', ), out_file=dict(argstr='-prefix %s', genfile=True, name_source='in_file', name_template='%s_bp', position=1, ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), tr=dict(argstr='-dt %f', ), ) inputs = Bandpass.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Bandpass_outputs(): output_map = dict(out_file=dict(), ) outputs = Bandpass.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_BlurInMask.py000066400000000000000000000027351227300005300252030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import BlurInMask def test_BlurInMask_inputs(): input_map = dict(args=dict(argstr='%s', ), automask=dict(argstr='-automask', ), environ=dict(nohash=True, usedefault=True, ), float_out=dict(argstr='-float', ), fwhm=dict(argstr='-FWHM %f', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', copyfile=False, mandatory=True, position=1, ), mask=dict(argstr='-mask %s', ), multimask=dict(argstr='-Mmask %s', ), options=dict(argstr='%s', position=2, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_blur', position=-1, ), outputtype=dict(), preserve=dict(argstr='-preserve', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = BlurInMask.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BlurInMask_outputs(): output_map = dict(out_file=dict(), ) outputs = BlurInMask.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_BrickStat.py000066400000000000000000000023151227300005300250540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import BrickStat def test_BrickStat_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-1, ), mask=dict(argstr='-mask %s', position=2, ), min=dict(argstr='-min', position=1, ), out_file=dict(argstr='-prefix %s', name_source=['in_file'], name_template='%s_afni', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = BrickStat.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BrickStat_outputs(): output_map = dict(min_val=dict(), ) outputs = BrickStat.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Calc.py000066400000000000000000000026541227300005300240360ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Calc def test_Calc_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), expr=dict(argstr='-expr "%s"', mandatory=True, position=3, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file_a=dict(argstr='-a %s', mandatory=True, position=0, ), in_file_b=dict(argstr=' -b %s', position=1, ), in_file_c=dict(argstr=' -c %s', position=2, ), other=dict(argstr='', ), out_file=dict(argstr='-prefix %s', name_source='in_file_a', name_template='%s_calc', ), outputtype=dict(), single_idx=dict(), start_idx=dict(requires=['stop_idx'], ), stop_idx=dict(requires=['start_idx'], ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Calc.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Calc_outputs(): output_map = dict(out_file=dict(), ) outputs = Calc.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Copy.py000066400000000000000000000021341227300005300240770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Copy def test_Copy_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-2, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_copy', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Copy.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Copy_outputs(): output_map = dict(out_file=dict(), ) outputs = Copy.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Despike.py000066400000000000000000000021561227300005300245550ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Despike def test_Despike_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_despike', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Despike.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Despike_outputs(): output_map = dict(out_file=dict(), ) outputs = Despike.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Detrend.py000066400000000000000000000021561227300005300245560ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Detrend def test_Detrend_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_detrend', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Detrend.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Detrend_outputs(): output_map = dict(out_file=dict(), ) outputs = Detrend.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Fim.py000066400000000000000000000024511227300005300237020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Fim def test_Fim_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fim_thr=dict(argstr='-fim_thr %f', position=3, ), ideal_file=dict(argstr='-ideal_file %s', mandatory=True, position=2, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr=' -input %s', copyfile=False, mandatory=True, position=1, ), out=dict(argstr='-out %s', position=4, ), out_file=dict(argstr='-bucket %s', name_source='in_file', name_template='%s_fim', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Fim.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Fim_outputs(): output_map = dict(out_file=dict(), ) outputs = Fim.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Fourier.py000066400000000000000000000024241227300005300246020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Fourier def test_Fourier_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), highpass=dict(argstr='-highpass %f', mandatory=True, position=1, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), lowpass=dict(argstr='-lowpass %f', mandatory=True, position=0, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_fourier', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Fourier.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Fourier_outputs(): output_map = dict(out_file=dict(), ) outputs = Fourier.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Maskave.py000066400000000000000000000024041227300005300245540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Maskave def test_Maskave_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-2, ), mask=dict(argstr='-mask %s', position=1, ), out_file=dict(argstr='> %s', keep_extension=True, name_source='in_file', name_template='%s_maskave.1D', position=-1, ), outputtype=dict(), quiet=dict(argstr='-quiet', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Maskave.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Maskave_outputs(): output_map = dict(out_file=dict(), ) outputs = Maskave.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Merge.py000066400000000000000000000023141227300005300242240ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Merge def test_Merge_inputs(): input_map = dict(args=dict(argstr='%s', ), blurfwhm=dict(argstr='-1blur_fwhm %d', units='mm', ), doall=dict(argstr='-doall', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_merge', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Merge.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Merge_outputs(): output_map = dict(out_file=dict(), ) outputs = Merge.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_ROIStats.py000066400000000000000000000022451227300005300246400ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import ROIStats def test_ROIStats_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-1, ), mask=dict(argstr='-mask %s', position=3, ), mask_f2short=dict(argstr='-mask_f2short', position=2, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, usedefault=True, ), ) inputs = ROIStats.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ROIStats_outputs(): output_map = dict(stats=dict(), ) outputs = ROIStats.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Refit.py000066400000000000000000000022341227300005300242370ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Refit def test_Refit_inputs(): input_map = dict(args=dict(argstr='%s', ), deoblique=dict(argstr='-deoblique', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=True, mandatory=True, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), xorigin=dict(argstr='-xorigin %s', ), yorigin=dict(argstr='-yorigin %s', ), zorigin=dict(argstr='-zorigin %s', ), ) inputs = Refit.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Refit_outputs(): output_map = dict(out_file=dict(), ) outputs = Refit.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Resample.py000066400000000000000000000024761227300005300247460ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Resample def test_Resample_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inset %s', copyfile=False, mandatory=True, position=-1, ), master=dict(argstr='-master %s', ), orientation=dict(argstr='-orient %s', ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_resample', ), outputtype=dict(), resample_mode=dict(argstr='-rmode %s', ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_size=dict(argstr='-dxyz %f %f %f', ), ) inputs = Resample.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Resample_outputs(): output_map = dict(out_file=dict(), ) outputs = Resample.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Retroicor.py000066400000000000000000000030031227300005300251310ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Retroicor def test_Retroicor_inputs(): input_map = dict(args=dict(argstr='%s', ), card=dict(argstr='-card %s', position=-2, ), cardphase=dict(argstr='-cardphase %s', hash_files=False, position=-6, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), order=dict(argstr='-order %s', position=-5, ), out_file=dict(argstr='-prefix %s', mandatory=True, position=1, ), outputtype=dict(), resp=dict(argstr='-resp %s', position=-3, ), respphase=dict(argstr='-respphase %s', hash_files=False, position=-7, ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='-threshold %d', position=-4, ), ) inputs = Retroicor.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Retroicor_outputs(): output_map = dict(out_file=dict(), ) outputs = Retroicor.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_SkullStrip.py000066400000000000000000000022061227300005300253010ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import SkullStrip def test_SkullStrip_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', copyfile=False, mandatory=True, position=1, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_skullstrip', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SkullStrip.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SkullStrip_outputs(): output_map = dict(out_file=dict(), ) outputs = SkullStrip.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_TCat.py000066400000000000000000000022231227300005300240170ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import TCat def test_TCat_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr=' %s', copyfile=False, mandatory=True, position=-1, ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_tcat', ), outputtype=dict(), rlt=dict(argstr='-rlt%s', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = TCat.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TCat_outputs(): output_map = dict(out_file=dict(), ) outputs = TCat.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_TCorr1D.py000066400000000000000000000031611227300005300244040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import TCorr1D def test_TCorr1D_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), ktaub=dict(argstr=' -ktaub', position=1, xor=['pearson', 'spearman', 'quadrant'], ), out_file=dict(argstr='-prefix %s', keep_extension=True, name_source='xset', name_template='%s_correlation.nii.gz', ), outputtype=dict(), pearson=dict(argstr=' -pearson', position=1, xor=['spearman', 'quadrant', 'ktaub'], ), quadrant=dict(argstr=' -quadrant', position=1, xor=['pearson', 'spearman', 'ktaub'], ), spearman=dict(argstr=' -spearman', position=1, xor=['pearson', 'quadrant', 'ktaub'], ), terminal_output=dict(mandatory=True, nohash=True, ), xset=dict(argstr=' %s', copyfile=False, mandatory=True, position=-2, ), y_1d=dict(argstr=' %s', mandatory=True, position=-1, ), ) inputs = TCorr1D.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TCorr1D_outputs(): output_map = dict(out_file=dict(), ) outputs = TCorr1D.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_TCorrMap.py000066400000000000000000000072001227300005300246530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import TCorrMap def test_TCorrMap_inputs(): input_map = dict(absolute_threshold=dict(argstr='-Thresh %f %s', name_source='in_file', suffix='_thresh', xor=('absolute_threshold', 'var_absolute_threshold', 'var_absolute_threshold_normalize'), ), args=dict(argstr='%s', ), automask=dict(argstr='-automask', ), average_expr=dict(argstr='-Aexpr %s %s', name_source='in_file', suffix='_aexpr', xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), ), average_expr_nonzero=dict(argstr='-Cexpr %s %s', name_source='in_file', suffix='_cexpr', xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), ), bandpass=dict(argstr='-bpass %f %f', ), blur_fwhm=dict(argstr='-Gblur %f', ), correlation_maps=dict(argstr='-CorrMap %s', name_source='in_file', ), correlation_maps_masked=dict(argstr='-CorrMask %s', name_source='in_file', ), environ=dict(nohash=True, usedefault=True, ), expr=dict(), histogram=dict(argstr='-Hist %d %s', name_source='in_file', suffix='_hist', ), histogram_bin_numbers=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', copyfile=False, mandatory=True, ), mask=dict(argstr='-mask %s', ), mean_file=dict(argstr='-Mean %s', name_source='in_file', suffix='_mean', ), out_file=dict(argstr='-prefix %s', name_source=['in_file'], name_template='%s_afni', ), outputtype=dict(), pmean=dict(argstr='-Pmean %s', name_source='in_file', suffix='_pmean', ), polort=dict(argstr='-polort %d', ), qmean=dict(argstr='-Qmean %s', name_source='in_file', suffix='_qmean', ), regress_out_timeseries=dict(argstr='-ort %s', ), seeds=dict(argstr='-seed %s', xor='seeds_width', ), seeds_width=dict(argstr='-Mseed %f', xor='seeds', ), sum_expr=dict(argstr='-Sexpr %s %s', name_source='in_file', suffix='_sexpr', xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), ), terminal_output=dict(mandatory=True, nohash=True, ), thresholds=dict(), var_absolute_threshold=dict(argstr='-VarThresh %f %f %f %s', name_source='in_file', suffix='_varthresh', xor=('absolute_threshold', 'var_absolute_threshold', 'var_absolute_threshold_normalize'), ), var_absolute_threshold_normalize=dict(argstr='-VarThreshN %f %f %f %s', name_source='in_file', suffix='_varthreshn', xor=('absolute_threshold', 'var_absolute_threshold', 'var_absolute_threshold_normalize'), ), zmean=dict(argstr='-Zmean %s', name_source='in_file', suffix='_zmean', ), ) inputs = TCorrMap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TCorrMap_outputs(): output_map = dict(absolute_threshold=dict(), average_expr=dict(), average_expr_nonzero=dict(), correlation_maps=dict(), correlation_maps_masked=dict(), histogram=dict(), mean_file=dict(), pmean=dict(), qmean=dict(), sum_expr=dict(), var_absolute_threshold=dict(), var_absolute_threshold_normalize=dict(), zmean=dict(), ) outputs = TCorrMap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_TCorrelate.py000066400000000000000000000025111227300005300252300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import TCorrelate def test_TCorrelate_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='-prefix %s', name_source='xset', name_template='%s_tcorr', ), outputtype=dict(), pearson=dict(argstr='-pearson', position=1, ), polort=dict(argstr='-polort %d', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), xset=dict(argstr=' %s', copyfile=False, mandatory=True, position=-2, ), yset=dict(argstr=' %s', copyfile=False, mandatory=True, position=-1, ), ) inputs = TCorrelate.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TCorrelate_outputs(): output_map = dict(out_file=dict(), ) outputs = TCorrelate.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_TShift.py000066400000000000000000000027241227300005300243730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import TShift def test_TShift_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore=dict(argstr='-ignore %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), interp=dict(argstr='-%s', ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_tshift', ), outputtype=dict(), rlt=dict(argstr='-rlt', ), rltplus=dict(argstr='-rlt+', ), terminal_output=dict(mandatory=True, nohash=True, ), tpattern=dict(argstr='-tpattern %s', ), tr=dict(argstr='-TR %s', ), tslice=dict(argstr='-slice %s', xor=['tzero'], ), tzero=dict(argstr='-tzero %s', xor=['tslice'], ), ) inputs = TShift.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TShift_outputs(): output_map = dict(out_file=dict(), ) outputs = TShift.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_TStat.py000066400000000000000000000022571227300005300242320ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import TStat def test_TStat_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), mask=dict(argstr='-mask %s', ), options=dict(argstr='%s', ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_tstat', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = TStat.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TStat_outputs(): output_map = dict(out_file=dict(), ) outputs = TStat.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_To3D.py000066400000000000000000000025151227300005300237410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import To3D def test_To3D_inputs(): input_map = dict(args=dict(argstr='%s', ), assumemosaic=dict(argstr='-assume_dicom_mosaic', ), datatype=dict(argstr='-datum %s', ), environ=dict(nohash=True, usedefault=True, ), filetype=dict(argstr='-%s', ), funcparams=dict(argstr='-time:zt %s alt+z2', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_folder=dict(argstr='%s/*.dcm', mandatory=True, position=-1, ), out_file=dict(argstr='-prefix %s', name_source=['in_folder'], name_template='%s', ), outputtype=dict(), skipoutliers=dict(argstr='-skip_outliers', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = To3D.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_To3D_outputs(): output_map = dict(out_file=dict(), ) outputs = To3D.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Volreg.py000066400000000000000000000032431227300005300244250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Volreg def test_Volreg_inputs(): input_map = dict(args=dict(argstr='%s', ), basefile=dict(argstr='-base %s', position=-6, ), copyorigin=dict(argstr='-twodup', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), md1d_file=dict(argstr='-maxdisp1D %s', keep_extension=True, name_source='in_file', name_template='%s_md.1D', position=-4, ), oned_file=dict(argstr='-1Dfile %s', keep_extension=True, name_source='in_file', name_template='%s.1D', ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_volreg', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), timeshift=dict(argstr='-tshift 0', ), verbose=dict(argstr='-verbose', ), zpad=dict(argstr='-zpad %d', position=-5, ), ) inputs = Volreg.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Volreg_outputs(): output_map = dict(md1d_file=dict(), oned_file=dict(), out_file=dict(), ) outputs = Volreg.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_Warp.py000066400000000000000000000026161227300005300241030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Warp def test_Warp_inputs(): input_map = dict(args=dict(argstr='%s', ), deoblique=dict(argstr='-deoblique', ), environ=dict(nohash=True, usedefault=True, ), gridset=dict(argstr='-gridset %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), interp=dict(argstr='-%s', ), matparent=dict(argstr='-matparent %s', ), mni2tta=dict(argstr='-mni2tta', ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_warp', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), tta2mni=dict(argstr='-tta2mni', ), zpad=dict(argstr='-zpad %d', ), ) inputs = Warp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Warp_outputs(): output_map = dict(out_file=dict(), ) outputs = Warp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/afni/tests/test_auto_ZCutUp.py000066400000000000000000000022201227300005300243530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import ZCutUp def test_ZCutUp_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), keep=dict(argstr='-keep %s', ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_zcupup', ), outputtype=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ZCutUp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ZCutUp_outputs(): output_map = dict(out_file=dict(), ) outputs = ZCutUp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/000077500000000000000000000000001227300005300174125ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/ants/__init__.py000066400000000000000000000010341227300005300215210ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for ants.""" # Registraiton programs from .registration import ANTS, Registration # Resampling Programs from resampling import ApplyTransforms, WarpImageMultiTransform, WarpTimeSeriesImageMultiTransform # Segmentation Programs from .segmentation import Atropos, N4BiasFieldCorrection # Utility Programs from .utils import AverageAffineTransform, AverageImages, MultiplyImages, JacobianDeterminant nipype-0.9.2/nipype/interfaces/ants/base.py000066400000000000000000000067261227300005300207110ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The ants module provides basic functions for interfacing with ANTS tools.""" # Local imports from ..base import (CommandLine, CommandLineInputSpec, traits, isdefined) from ... import logging logger = logging.getLogger('interface') # -Using -1 gives primary responsibilty to ITKv4 to do the correct # thread limitings. # -Using 1 takes a very conservative approach to avoid overloading # the computer (when running MultiProc) by forcing everything to # single threaded. This can be a severe penalty for registration # performance. LOCAL_DEFAULT_NUMBER_OF_THREADS=-1 # -Using NSLOTS has the same behavior as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS # as long as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS is not set. Otherwise # ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence. # This behavior states that you the user explicitly specifies # num_threads, then respect that no matter what SGE tries to limit. PREFERED_ITKv4_THREAD_LIMIT_VARIABLE='NSLOTS' ALT_ITKv4_THREAD_LIMIT_VARIABLE='ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS' class ANTSCommandInputSpec(CommandLineInputSpec): """Base Input Specification for all ANTS Commands """ num_threads = traits.Int(LOCAL_DEFAULT_NUMBER_OF_THREADS, usedefault=True, nohash=True, desc="Number of ITK threads to use") class ANTSCommand(CommandLine): """Base class for ANTS interfaces """ input_spec = ANTSCommandInputSpec _num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS def __init__(self, **inputs): super(ANTSCommand, self).__init__(**inputs) self.inputs.on_trait_change(self._num_threads_update, 'num_threads') if not isdefined(self.inputs.num_threads): self.inputs.num_threads = self._num_threads else: self._num_threads_update() def _num_threads_update(self): self._num_threads = self.inputs.num_threads ## ONLY SET THE ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS if requested ## by the end user. The default setting did not allow for ## overwriting the default values. ## In ITKv4 (the version used for all ANTS programs), ITK respects ## the SGE controlled $NSLOTS environmental variable. ## If user specifies -1, then that indicates that the system ## default behavior should be the one specified by ITKv4 rules ## (i.e. respect SGE $NSLOTS or environmental variables of threads, or ## user environmental settings) if ( self.inputs.num_threads == -1 ): if ( ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ ): del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE] if ( PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ ): del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE] else: self.inputs.environ.update({PREFERED_ITKv4_THREAD_LIMIT_VARIABLE: '%s' % self.inputs.num_threads}) @classmethod def set_default_num_threads(cls, num_threads): """Set the default number of threads for ITK calls This method is used to set the default number of ITK threads for all the ANTS interfaces. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.num_threads """ cls._num_threads = num_threads nipype-0.9.2/nipype/interfaces/ants/legacy.py000066400000000000000000000311741227300005300212360ustar00rootroot00000000000000## NOTE: This implementation has been superceeded buy the antsApplyTransform ## implmeentation that more closely follows the strucutre and capabilities ## of the antsApplyTransform program. This implementation is here ## for backwards compatibility. """ANTS Apply Transforms interface Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ # Local imports from ..base import (TraitedSpec, File, traits, InputMultiPath, isdefined) from ...utils.filemanip import split_filename from .base import ANTSCommand, ANTSCommandInputSpec import os from glob import glob from nipype.interfaces.base import OutputMultiPath class antsIntroductionInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)', position=1) reference_image = File(exists=True, argstr='-r %s', desc='template file to warp to', mandatory=True, copyfile=True) input_image = File(exists=True, argstr='-i %s', desc='input image to warp to template', mandatory=True, copyfile=False) force_proceed = traits.Bool(argstr='-f 1', desc=('force script to proceed even if headers ' 'may be incompatible')) inverse_warp_template_labels = traits.Bool(argstr='-l', desc=('Applies inverse warp to the template labels ' 'to estimate label positions in target space (use ' 'for template-based segmentation)')) max_iterations = traits.List(traits.Int, argstr='-m %s', sep='x', desc=('maximum number of iterations (must be ' 'list of integers in the form [J,K,L...]: ' 'J = coarsest resolution iterations, K = ' 'middle resolution interations, L = fine ' 'resolution iterations')) bias_field_correction = traits.Bool(argstr='-n 1', desc=('Applies bias field correction to moving ' 'image')) similarity_metric = traits.Enum('PR', 'CC', 'MI', 'MSQ', argstr='-s %s', desc=('Type of similartiy metric used for registration ' '(CC = cross correlation, MI = mutual information, ' 'PR = probability mapping, MSQ = mean square difference)')) transformation_model = traits.Enum('GR', 'EL', 'SY', 'S2', 'EX', 'DD', 'RI', 'RA', argstr='-t %s', usedefault=True, desc=('Type of transofmration model used for registration ' '(EL = elastic transformation model, SY = SyN with time, ' 'arbitrary number of time points, S2 = SyN with time ' 'optimized for 2 time points, GR = greedy SyN, EX = ' 'exponential, DD = diffeomorphic demons style exponential ' 'mapping, RI = purely rigid, RA = affine rigid')) out_prefix = traits.Str('ants_', argstr='-o %s', usedefault=True, desc=('Prefix that is prepended to all output ' 'files (default = ants_)')) quality_check = traits.Bool(argstr='-q 1', desc='Perform a quality check of the result') class antsIntroductionOutputSpec(TraitedSpec): affine_transformation = File(exists=True, desc='affine (prefix_Affine.txt)') warp_field = File(exists=True, desc='warp field (prefix_Warp.nii)') inverse_warp_field = File(exists=True, desc='inverse warp field (prefix_InverseWarp.nii)') input_file = File(exists=True, desc='input image (prefix_repaired.nii)') output_file = File(exists=True, desc='output image (prefix_deformed.nii)') class antsIntroduction(ANTSCommand): """Uses ANTS to generate matrices to warp data from one space to another. Examples -------- >>> from nipype.interfaces.ants.legacy import antsIntroduction >>> warp = antsIntroduction() >>> warp.inputs.reference_image = 'Template_6.nii' >>> warp.inputs.input_image = 'structural.nii' >>> warp.inputs.max_iterations = [30,90,20] >>> warp.cmdline 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' """ _cmd = 'antsIntroduction.sh' input_spec = antsIntroductionInputSpec output_spec = antsIntroductionOutputSpec def _list_outputs(self): outputs = self._outputs().get() outputs['affine_transformation'] = os.path.join(os.getcwd(), self.inputs.out_prefix + 'Affine.txt') outputs['warp_field'] = os.path.join(os.getcwd(), self.inputs.out_prefix + 'Warp.nii.gz') outputs['inverse_warp_field'] = os.path.join(os.getcwd(), self.inputs.out_prefix + 'InverseWarp.nii.gz') outputs['input_file'] = os.path.join(os.getcwd(), self.inputs.out_prefix + 'repaired.nii.gz') outputs['output_file'] = os.path.join(os.getcwd(), self.inputs.out_prefix + 'deformed.nii.gz') return outputs ## How do we make a pass through so that GenWarpFields is just an alias for antsIntroduction ? class GenWarpFields(antsIntroduction): pass class buildtemplateparallelInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)', position=1) out_prefix = traits.Str('antsTMPL_', argstr='-o %s', usedefault=True, desc=('Prefix that is prepended to all output ' 'files (default = antsTMPL_)')) in_files = traits.List(File(exists=True), mandatory=True, desc='list of images to generate template from', argstr='%s', position=-1) parallelization = traits.Enum(0, 1, 2, argstr='-c %d', usedefault=True, desc=('control for parallel processing (0 = ' 'serial, 1 = use PBS, 2 = use PEXEC, 3 = ' 'use Apple XGrid')) gradient_step_size = traits.Float(argstr='-g %f', desc=('smaller magnitude results in ' 'more cautious steps (default = ' '.25)')) iteration_limit = traits.Int(4, argstr='-i %d', usedefault=True, desc='iterations of template construction') num_cores = traits.Int(argstr='-j %d', requires=['parallelization'], desc=('Requires parallelization = 2 (PEXEC). ' 'Sets number of cpu cores to use')) max_iterations = traits.List(traits.Int, argstr='-m %s', sep='x', desc=('maximum number of iterations (must be ' 'list of integers in the form [J,K,L...]: ' 'J = coarsest resolution iterations, K = ' 'middle resolution interations, L = fine ' 'resolution iterations')) bias_field_correction = traits.Bool(argstr='-n 1', desc=('Applies bias field correction to moving ' 'image')) rigid_body_registration = traits.Bool(argstr='-r 1', desc=('registers inputs before creating template ' '(useful if no initial template available)')) similarity_metric = traits.Enum('PR', 'CC', 'MI', 'MSQ', argstr='-s %s', desc=('Type of similartiy metric used for registration ' '(CC = cross correlation, MI = mutual information, ' 'PR = probability mapping, MSQ = mean square difference)')) transformation_model = traits.Enum('GR', 'EL', 'SY', 'S2', 'EX', 'DD', argstr='-t %s', usedefault=True, desc=('Type of transofmration model used for registration ' '(EL = elastic transformation model, SY = SyN with time, ' 'arbitrary number of time points, S2 = SyN with time ' 'optimized for 2 time points, GR = greedy SyN, EX = ' 'exponential, DD = diffeomorphic demons style exponential ' 'mapping')) use_first_as_target = traits.Bool(desc=('uses first volume as target of ' 'all inputs. When not used, an ' 'unbiased average image is used ' 'to start.')) class buildtemplateparallelOutputSpec(TraitedSpec): final_template_file = File(exists=True, desc='final ANTS template') template_files = OutputMultiPath(File(exists=True), desc='Templates from different stages of iteration') subject_outfiles = OutputMultiPath(File(exists=True), desc=('Outputs for each input image. Includes warp ' 'field, inverse warp, Affine, original image ' '(repaired) and warped image (deformed)')) class buildtemplateparallel(ANTSCommand): """Generate a optimal average template .. warning:: This can take a VERY long time to complete Examples -------- >>> from nipype.interfaces.ants.legacy import buildtemplateparallel >>> tmpl = buildtemplateparallel() >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] >>> tmpl.inputs.max_iterations = [30, 90, 20] >>> tmpl.cmdline 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' """ _cmd = 'buildtemplateparallel.sh' input_spec = buildtemplateparallelInputSpec output_spec = buildtemplateparallelOutputSpec def _format_arg(self, opt, spec, val): if opt == 'num_cores': if self.inputs.parallelization == 2: return '-j ' + str(val) else: return '' if opt == 'in_files': if self.inputs.use_first_as_target: start = '-z ' else: start = '' return start + ' '.join(name for name in val) return super(buildtemplateparallel, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['template_files'] = [] for i in range(len(glob(os.path.realpath('*iteration*')))): temp = os.path.realpath('%s_iteration_%d/%stemplate.nii.gz' % (self.inputs.transformation_model, i, self.inputs.out_prefix)) os.rename(temp, os.path.realpath('%s_iteration_%d/%stemplate_i%d.nii.gz' % (self.inputs.transformation_model, i, self.inputs.out_prefix, i))) file_ = ('%s_iteration_%d/%stemplate_i%d.nii.gz' % (self.inputs.transformation_model, i, self.inputs.out_prefix, i)) outputs['template_files'].append(os.path.realpath(file_)) outputs['final_template_file'] = \ os.path.realpath('%stemplate.nii.gz' % self.inputs.out_prefix) outputs['subject_outfiles'] = [] for filename in self.inputs.in_files: _, base, _ = split_filename(filename) temp = glob(os.path.realpath('%s%s*' % (self.inputs.out_prefix, base))) for file_ in temp: outputs['subject_outfiles'].append(file_) return outputs nipype-0.9.2/nipype/interfaces/ants/registration.py000066400000000000000000001157121227300005300225050ustar00rootroot00000000000000"""The ants module provides basic functions for interfacing with ants functions. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from ..base import (TraitedSpec, File, traits) from .base import ANTSCommand, ANTSCommandInputSpec import os from nipype.interfaces.base import InputMultiPath from nipype.interfaces.traits_extension import isdefined class ANTSInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, position=1, desc='image dimension (2 or 3)') fixed_image = InputMultiPath(File(exists=True), mandatory=True, desc=('image to apply transformation to (generally a coregistered ' 'functional)')) moving_image = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, desc=('image to apply transformation to (generally a coregistered ' 'functional)')) metric = traits.List(traits.Enum('CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ', 'PSE'), mandatory=True, desc='') metric_weight = traits.List(traits.Float(), requires=['metric'], desc='') radius = traits.List(traits.Int(), requires=['metric'], desc='') output_transform_prefix = traits.Str('out', usedefault=True, argstr='--output-naming %s', mandatory=True, desc='') transformation_model = traits.Enum('Diff', 'Elast', 'Exp', 'Greedy Exp', 'SyN', argstr='%s', mandatory=True, desc='') gradient_step_length = traits.Float( requires=['transformation_model'], desc='') number_of_time_steps = traits.Float( requires=['gradient_step_length'], desc='') delta_time = traits.Float(requires=['number_of_time_steps'], desc='') symmetry_type = traits.Float(requires=['delta_time'], desc='') use_histogram_matching = traits.Bool( argstr='%s', default=True, usedefault=True) number_of_iterations = traits.List( traits.Int(), argstr='--number-of-iterations %s', sep='x') smoothing_sigmas = traits.List( traits.Int(), argstr='--gaussian-smoothing-sigmas %s', sep='x') subsampling_factors = traits.List( traits.Int(), argstr='--subsampling-factors %s', sep='x') affine_gradient_descent_option = traits.List(traits.Float(), argstr='%s') mi_option = traits.List(traits.Int(), argstr='--MI-option %s', sep='x') regularization = traits.Enum('Gauss', 'DMFFD', argstr='%s', desc='') regularization_gradient_field_sigma = traits.Float( requires=['regularization'], desc='') regularization_deformation_field_sigma = traits.Float( requires=['regularization'], desc='') number_of_affine_iterations = traits.List( traits.Int(), argstr='--number-of-affine-iterations %s', sep='x') class ANTSOutputSpec(TraitedSpec): affine_transform = File(exists=True, desc='Affine transform file') warp_transform = File(exists=True, desc='Warping deformation field') inverse_warp_transform = File( exists=True, desc='Inverse warping deformation field') metaheader = File(exists=True, desc='VTK metaheader .mhd file') metaheader_raw = File(exists=True, desc='VTK metaheader .raw file') class ANTS(ANTSCommand): """ Examples -------- >>> from nipype.interfaces.ants import ANTS >>> ants = ANTS() >>> ants.inputs.dimension = 3 >>> ants.inputs.output_transform_prefix = 'MY' >>> ants.inputs.metric = ['CC'] >>> ants.inputs.fixed_image = ['T1.nii'] >>> ants.inputs.moving_image = ['resting.nii'] >>> ants.inputs.metric_weight = [1.0] >>> ants.inputs.radius = [5] >>> ants.inputs.transformation_model = 'SyN' >>> ants.inputs.gradient_step_length = 0.25 >>> ants.inputs.number_of_iterations = [50, 35, 15] >>> ants.inputs.use_histogram_matching = True >>> ants.inputs.mi_option = [32, 16000] >>> ants.inputs.regularization = 'Gauss' >>> ants.inputs.regularization_gradient_field_sigma = 3 >>> ants.inputs.regularization_deformation_field_sigma = 0 >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] >>> ants.cmdline 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] --transformation-model SyN[0.25] --use-Histogram-Matching 1' """ _cmd = 'ANTS' input_spec = ANTSInputSpec output_spec = ANTSOutputSpec def _image_metric_constructor(self): retval = [] intensityBased = ['CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ'] pointSetBased = ['PSE', 'JTB'] for ii in range(len(self.inputs.moving_image)): if self.inputs.metric[ii] in intensityBased: retval.append( '--image-metric %s[ %s, %s, %g, %d ]' % (self.inputs.metric[ii], self.inputs.fixed_image[ii], self.inputs.moving_image[ii], self.inputs.metric_weight[ii], self.inputs.radius[ii])) elif self.inputs.metric[ii] == pointSetBased: pass # retval.append('--image-metric %s[%s, %s, ...'.format(self.inputs.metric[ii], self.inputs.fixed_image[ii], self.inputs.moving_image[ii], ...)) return ' '.join(retval) def _transformation_constructor(self): model = self.inputs.transformation_model stepLength = self.inputs.gradient_step_length timeStep = self.inputs.number_of_time_steps deltaTime = self.inputs.delta_time symmetryType = self.inputs.symmetry_type retval = ['--transformation-model %s' % model] parameters = [] for elem in (stepLength, timeStep, deltaTime, symmetryType): if not elem is traits.Undefined: parameters.append('%#.2g' % elem) if len(parameters) > 0: if len(parameters) > 1: parameters = ','.join(parameters) else: parameters = ''.join(parameters) retval.append('[%s]' % parameters) return ''.join(retval) def _regularization_constructor(self): return '--regularization {0}[{1},{2}]'.format(self.inputs.regularization, self.inputs.regularization_gradient_field_sigma, self.inputs.regularization_deformation_field_sigma) def _affine_gradient_descent_option_constructor(self): retval = ['--affine-gradient-descent-option'] values = self.inputs.affine_gradient_descent_option defaults = [0.1, 0.5, 1.e-4, 1.e-4] for ii in range(len(defaults)): try: defaults[ii] = values[ii] except IndexError: break stringList = [('%g' % defaults[index]) for index in range(4)] parameters = 'x'.join(stringList) retval.append(parameters) return ' '.join(retval) def _format_arg(self, opt, spec, val): if opt == 'moving_image': return self._image_metric_constructor() elif opt == 'transformation_model': return self._transformation_constructor() elif opt == 'regularization': return self._regularization_constructor() elif opt == 'affine_gradient_descent_option': return self._affine_gradient_descent_option_constructor() elif opt == 'use_histogram_matching': if self.inputs.use_histogram_matching: return '--use-Histogram-Matching 1' else: return '--use-Histogram-Matching 0' return super(ANTS, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['affine_transform'] = os.path.abspath( self.inputs.output_transform_prefix + 'Affine.txt') outputs['warp_transform'] = os.path.abspath( self.inputs.output_transform_prefix + 'Warp.nii.gz') outputs['inverse_warp_transform'] = os.path.abspath( self.inputs.output_transform_prefix + 'InverseWarp.nii.gz') #outputs['metaheader'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.mhd') #outputs['metaheader_raw'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.raw') return outputs class RegistrationInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='--dimensionality %d', usedefault=True, desc='image dimension (2 or 3)') fixed_image = InputMultiPath(File(exists=True), mandatory=True, desc='image to apply transformation to (generally a coregistered functional)') fixed_image_mask = File(argstr='%s', exists=True, desc='mask used to limit registration region') moving_image = InputMultiPath(File(exists=True), mandatory=True, desc='image to apply transformation to (generally a coregistered functional)') moving_image_mask = File(requires=['fixed_image_mask'], exists=True, desc='') initial_moving_transform = File(argstr='%s', exists=True, desc='', xor=['initial_moving_transform_com']) invert_initial_moving_transform = traits.Bool( default=False, requires=["initial_moving_transform"], desc='', xor=['initial_moving_transform_com']) initial_moving_transform_com = traits.Enum(0, 1, 2, argstr='%s', default=0, xor=['initial_moving_transform'], desc="Use center of mass for moving transform") metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI", "Mattes") metric_stage_trait = traits.Either( metric_item_trait, traits.List(metric_item_trait)) metric = traits.List(metric_stage_trait, mandatory=True, desc='the metric(s) to use for each stage. ' 'Note that multiple metrics per stage are not supported ' 'in ANTS 1.9.1 and earlier.') metric_weight_item_trait = traits.Float(1.0) metric_weight_stage_trait = traits.Either( metric_weight_item_trait, traits.List(metric_weight_item_trait)) metric_weight = traits.List( metric_weight_stage_trait, value=[1.0], usedefault=True, requires=['metric'], mandatory=True, desc='the metric weight(s) for each stage. ' 'The weights must sum to 1 per stage.') radius_bins_item_trait = traits.Int(5) radius_bins_stage_trait = traits.Either( radius_bins_item_trait, traits.List(radius_bins_item_trait)) radius_or_number_of_bins = traits.List( radius_bins_stage_trait, value=[5], usedefault=True, requires=['metric_weight'], desc='the number of bins in each stage for the MI and Mattes metric, ' 'the radius for other metrics') sampling_strategy_item_trait = traits.Enum("None", "Regular", "Random", None) sampling_strategy_stage_trait = traits.Either( sampling_strategy_item_trait, traits.List(sampling_strategy_item_trait)) sampling_strategy = traits.List( trait=sampling_strategy_stage_trait, requires=['metric_weight'], desc='the metric sampling strategy (strategies) for each stage') sampling_percentage_item_trait = traits.Either(traits.Range(low=0.0, high=1.0), None) sampling_percentage_stage_trait = traits.Either( sampling_percentage_item_trait, traits.List(sampling_percentage_item_trait)) sampling_percentage = traits.List( trait=sampling_percentage_stage_trait, requires=['sampling_strategy'], desc="the metric sampling percentage(s) to use for each stage") use_estimate_learning_rate_once = traits.List(traits.Bool(), desc='') use_histogram_matching = traits.Either(traits.Bool, traits.List(traits.Bool(argstr='%s')), default=True, usedefault=True) interpolation = traits.Enum( 'Linear', 'NearestNeighbor', 'CosineWindowedSinc', 'WelchWindowedSinc', 'HammingWindowedSinc', 'LanczosWindowedSinc', 'BSpline', # 'MultiLabel', # 'Gaussian', # 'BSpline', argstr='%s', usedefault=True) #MultiLabel[,] #Gaussian[,] #BSpline[] write_composite_transform = traits.Bool(argstr='--write-composite-transform %d', default=False, usedefault=True, desc='') collapse_output_transforms = traits.Bool( argstr='--collapse-output-transforms %d', default=True, usedefault=True, # This should be true for explicit completeness desc=('Collapse output transforms. Specifically, enabling this option ' 'combines all adjacent linear transforms and composes all ' 'adjacent displacement field transforms before writing the ' 'results to disk.')) transforms = traits.List(traits.Enum('Rigid', 'Affine', 'CompositeAffine', 'Similarity', 'Translation', 'BSpline', 'GaussianDisplacementField', 'TimeVaryingVelocityField', 'TimeVaryingBSplineVelocityField', 'SyN', 'BSplineSyN', 'Exponential', 'BSplineExponential'), argstr='%s', mandatory=True) # TODO: transform_parameters currently supports rigid, affine, composite affine, translation, bspline, gaussian displacement field (gdf), and SyN -----ONLY-----! transform_parameters = traits.List(traits.Either(traits.Float(), traits.Tuple(traits.Float()), traits.Tuple(traits.Float(), # gdf & syn traits.Float(), traits.Float()), traits.Tuple(traits.Float(), # BSplineSyn traits.Int(), traits.Int(), traits.Int()))) # Convergence flags number_of_iterations = traits.List(traits.List(traits.Int())) smoothing_sigmas = traits.List(traits.List(traits.Float()), mandatory=True) sigma_units = traits.List(traits.Enum('mm', 'vox'), requires=['smoothing_sigmas'], desc="units for smoothing sigmas") shrink_factors = traits.List(traits.List(traits.Int()), mandatory=True) convergence_threshold = traits.List(trait=traits.Float(), value=[1e-6], minlen=1, requires=['number_of_iterations'], usedefault=True) convergence_window_size = traits.List(trait=traits.Int(), value=[10], minlen=1, requires=['convergence_threshold'], usedefault=True) # Output flags output_transform_prefix = traits.Str( "transform", usedefault=True, argstr="%s", desc="") output_warped_image = traits.Either( traits.Bool, File(), hash_files=False, desc="") output_inverse_warped_image = traits.Either(traits.Bool, File(), hash_files=False, requires=['output_warped_image'], desc="") winsorize_upper_quantile = traits.Range(low=0.0, high=1.0, value=1.0, argstr='%s', usedefault=True, desc="The Upper quantile to clip image ranges") winsorize_lower_quantile = traits.Range(low=0.0, high=1.0, value=0.0, argstr='%s', usedefault=True, desc="The Lower quantile to clip image ranges") collapse_linear_transforms_to_fixed_image_header = traits.Bool( argstr='%s', default=False, usedefault=True, desc='') class RegistrationOutputSpec(TraitedSpec): forward_transforms = traits.List( File(exists=True), desc='List of output transforms for forward registration') reverse_transforms = traits.List( File(exists=True), desc='List of output transforms for reverse registration') forward_invert_flags = traits.List(traits.Bool( ), desc='List of flags corresponding to the forward transforms') reverse_invert_flags = traits.List(traits.Bool( ), desc='List of flags corresponding to the reverse transforms') composite_transform = traits.List(File(exists=True), desc='Composite transform file') inverse_composite_transform = traits.List( File(exists=True), desc='Inverse composite transform file') warped_image = File(desc="Outputs warped image") inverse_warped_image = File(desc="Outputs the inverse of the warped image") class Registration(ANTSCommand): """ Examples -------- >>> import copy >>> from nipype.interfaces.ants import Registration >>> reg = Registration() >>> reg.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] >>> reg.inputs.moving_image = ['moving1.nii', 'moving2.nii'] >>> reg.inputs.output_transform_prefix = "output_" >>> reg.inputs.initial_moving_transform = 'trans.mat' >>> reg.inputs.invert_initial_moving_transform = True >>> reg.inputs.transforms = ['Affine', 'SyN'] >>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)] >>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]] >>> reg.inputs.dimension = 3 >>> reg.inputs.write_composite_transform = True >>> reg.inputs.collapse_output_transforms = False >>> reg.inputs.metric = ['Mattes']*2 >>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs) >>> reg.inputs.radius_or_number_of_bins = [32]*2 >>> reg.inputs.sampling_strategy = ['Random', None] >>> reg.inputs.sampling_percentage = [0.05, None] >>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9] >>> reg.inputs.convergence_window_size = [20]*2 >>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]] >>> reg.inputs.sigma_units = ['vox'] * 2 >>> reg.inputs.shrink_factors = [[2,1], [3,2,1]] >>> reg.inputs.use_estimate_learning_rate_once = [True, True] >>> reg.inputs.use_histogram_matching = [True, True] # This is the default >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz' >>> reg1 = copy.deepcopy(reg) >>> reg1.inputs.winsorize_lower_quantile = 0.025 >>> reg1.inputs.collapse_linear_transforms_to_fixed_image_header = False >>> reg1.cmdline 'antsRegistration --collapse-linear-transforms-to-fixed-image-header 0 --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' >>> reg1.run() #doctest: +SKIP >>> reg2 = copy.deepcopy(reg) >>> reg2.inputs.winsorize_upper_quantile = 0.975 >>> reg2.cmdline 'antsRegistration --collapse-linear-transforms-to-fixed-image-header 0 --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' >>> reg3 = copy.deepcopy(reg) >>> reg3.inputs.winsorize_lower_quantile = 0.025 >>> reg3.inputs.winsorize_upper_quantile = 0.975 >>> reg3.cmdline 'antsRegistration --collapse-linear-transforms-to-fixed-image-header 0 --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' >>> # Test collapse transforms flag >>> reg4 = copy.deepcopy(reg) >>> reg4.inputs.collapse_output_transforms = True >>> outputs = reg4._list_outputs() >>> print outputs #doctest: +ELLIPSIS {'reverse_invert_flags': [True, False], 'inverse_composite_transform': ['.../nipype/testing/data/output_InverseComposite.h5'], 'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz', 'inverse_warped_image': , 'forward_invert_flags': [False, False], 'reverse_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', '.../nipype/testing/data/output_1InverseWarp.nii.gz'], 'composite_transform': ['.../nipype/testing/data/output_Composite.h5'], 'forward_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', '.../nipype/testing/data/output_1Warp.nii.gz']} >>> reg4.aggregate_outputs() #doctest: +SKIP >>> # Test multiple metrics per stage >>> reg5 = copy.deepcopy(reg) >>> reg5.inputs.metric = ['CC', ['CC', 'Mattes']] >>> reg5.inputs.metric_weight = [1, [.5]*2] >>> reg5.inputs.radius_or_number_of_bins = [4, [32]*2] >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] >>> reg5.cmdline 'antsRegistration --collapse-linear-transforms-to-fixed-image-header 0 --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric CC[ fixed1.nii, moving1.nii, 1, 4, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' """ DEF_SAMPLING_STRATEGY = 'None' """The default sampling stratey argument.""" _cmd = 'antsRegistration' input_spec = RegistrationInputSpec output_spec = RegistrationOutputSpec _quantilesDone = False def _formatMetric(self, index): """ Format the antsRegistration -m metric argument(s). Parameters ---------- index: the stage index """ # The common fixed image. fixed = self.inputs.fixed_image[0] # The common moving image. moving = self.inputs.moving_image[0] # The metric name input for the current stage. name_input = self.inputs.metric[index] # The stage-specific input dictionary. stage_inputs = dict( metric=name_input, weight=self.inputs.metric_weight[index], radius_or_bins=self.inputs.radius_or_number_of_bins[index], optional=self.inputs.radius_or_number_of_bins[index] ) # The optional sampling strategy and percentage. if (isdefined(self.inputs.sampling_strategy) and self.inputs.sampling_strategy): sampling_strategy = self.inputs.sampling_strategy[index] if sampling_strategy: stage_inputs['sampling_strategy'] = sampling_strategy sampling_percentage = self.inputs.sampling_percentage if (isdefined(self.inputs.sampling_percentage) and self.inputs.sampling_percentage): sampling_percentage = self.inputs.sampling_percentage[index] if sampling_percentage: stage_inputs['sampling_percentage'] = sampling_percentage # Make a list of metric specifications, one per -m command line # argument for the current stage. # If there are multiple inputs for this stage, then convert the # dictionary of list inputs into a list of metric specifications. # Otherwise, make a singleton list of the metric specification # from the non-list inputs. if isinstance(name_input, list): items = stage_inputs.items() indexes = range(0, len(name_input)) # dict-comprehension only works with python 2.7 and up #specs = [{k: v[i] for k, v in items} for i in indexes] specs = [dict([(k, v[i]) for k,v in items]) for i in indexes] else: specs = [stage_inputs] # Format the --metric command line metric arguments, one per specification. return [self._formatMetricArgument(fixed, moving, **spec) for spec in specs] def _formatMetricArgument(self, fixed, moving, **kwargs): retval = '%s[ %s, %s, %g, %d' % (kwargs['metric'], fixed, moving, kwargs['weight'], kwargs['radius_or_bins']) # The optional sampling strategy. if kwargs.has_key('sampling_strategy'): sampling_strategy = kwargs['sampling_strategy'] elif kwargs.has_key('sampling_percentage'): # The sampling percentage is specified but not the # sampling strategy. Use the default strategy. sampling_strategy = Registration.DEF_SAMPLING_STRATEGY else: sampling_strategy = None # Format the optional sampling arguments. if sampling_strategy: retval += ', %s' % sampling_strategy if kwargs.has_key('sampling_percentage'): retval += ', %g' % kwargs['sampling_percentage'] retval += ' ]' return retval def _formatTransform(self, index): retval = [] retval.append('%s[ ' % self.inputs.transforms[index]) parameters = ', '.join([str( element) for element in self.inputs.transform_parameters[index]]) retval.append('%s' % parameters) retval.append(' ]') return "".join(retval) def _formatRegistration(self): retval = [] for ii in range(len(self.inputs.transforms)): retval.append('--transform %s' % (self._formatTransform(ii))) for metric in self._formatMetric(ii): retval.append('--metric %s' % metric) retval.append('--convergence %s' % self._formatConvergence(ii)) if isdefined(self.inputs.sigma_units): retval.append('--smoothing-sigmas %s%s' % (self._antsJoinList(self.inputs.smoothing_sigmas[ii]), self.inputs.sigma_units[ii])) else: retval.append('--smoothing-sigmas %s' % self._antsJoinList(self.inputs.smoothing_sigmas[ii])) retval.append('--shrink-factors %s' % self._antsJoinList(self.inputs.shrink_factors[ii])) if isdefined(self.inputs.use_estimate_learning_rate_once): retval.append('--use-estimate-learning-rate-once %d' % self.inputs.use_estimate_learning_rate_once[ii]) if isdefined(self.inputs.use_histogram_matching): # use_histogram_matching is either a common flag for all transforms # or a list of transform-specific flags if isinstance(self.inputs.use_histogram_matching, bool): histval = self.inputs.use_histogram_matching else: histval = self.inputs.use_histogram_matching[ii] retval.append('--use-histogram-matching %d' % histval) return " ".join(retval) def _antsJoinList(self, antsList): return "x".join([str(i) for i in antsList]) def _get_outputfilenames(self, inverse=False): output_filename = None if not inverse: if isdefined(self.inputs.output_warped_image) and \ self.inputs.output_warped_image: output_filename = self.inputs.output_warped_image if isinstance(output_filename, bool): output_filename = '%s_Warped.nii.gz' % self.inputs.output_transform_prefix else: output_filename = output_filename return output_filename inv_output_filename = None if isdefined(self.inputs.output_inverse_warped_image) and \ self.inputs.output_inverse_warped_image: inv_output_filename = self.inputs.output_inverse_warped_image if isinstance(inv_output_filename, bool): inv_output_filename = '%s_InverseWarped.nii.gz' % self.inputs.output_transform_prefix else: inv_output_filename = inv_output_filename return inv_output_filename def _formatConvergence(self, ii): convergence_iter = self._antsJoinList( self.inputs.number_of_iterations[ii]) if len(self.inputs.convergence_threshold) > ii: convergence_value = self.inputs.convergence_threshold[ii] else: convergence_value = self.inputs.convergence_threshold[0] if len(self.inputs.convergence_window_size) > ii: convergence_ws = self.inputs.convergence_window_size[ii] else: convergence_ws = self.inputs.convergence_window_size[0] return '[ %s, %g, %d ]' % (convergence_iter, convergence_value, convergence_ws) def _formatWinsorizeImageIntensities(self): assert(self.inputs.winsorize_upper_quantile > self.inputs.winsorize_lower_quantile), "Upper bound MUST be more than lower bound: %g > %g" \ % (self.inputs.winsorize_upper_quantile, self.inputs.winsorize_lower_quantile) self._quantilesDone = True return '--winsorize-image-intensities [ %s, %s ]' % (self.inputs.winsorize_lower_quantile, self.inputs.winsorize_upper_quantile) def _formatCollapseLinearTransformsToFixedImageHeader(self): if self.inputs.collapse_linear_transforms_to_fixed_image_header: return '--collapse-linear-transforms-to-fixed-image-header 1' else: return '--collapse-linear-transforms-to-fixed-image-header 0' def _format_arg(self, opt, spec, val): if opt == 'fixed_image_mask': if isdefined(self.inputs.moving_image_mask): return '--masks [ %s, %s ]' % (self.inputs.fixed_image_mask, self.inputs.moving_image_mask) else: return '--masks %s' % self.inputs.fixed_image_mask elif opt == 'transforms': return self._formatRegistration() elif opt == 'initial_moving_transform': try: doInvertTransform = int(self.inputs.invert_initial_moving_transform) except: doInvertTransform = 0 ## Just do the default behavior return '--initial-moving-transform [ %s, %d ]' % (self.inputs.initial_moving_transform, doInvertTransform) elif opt == 'initial_moving_transform_com': try: doCenterOfMassInit = int(self.inputs.initial_moving_transform_com) except: doCenterOfMassInit = 0 ## Just do the default behavior return '--initial-moving-transform [ %s, %s, %d ]' % (self.inputs.fixed_image[0], self.inputs.moving_image[0], doCenterOfMassInit) elif opt == 'interpolation': # TODO: handle multilabel, gaussian, and bspline options return '--interpolation %s' % self.inputs.interpolation elif opt == 'output_transform_prefix': out_filename = self._get_outputfilenames(inverse=False) inv_out_filename = self._get_outputfilenames(inverse=True) if out_filename and inv_out_filename: return '--output [ %s, %s, %s ]' % (self.inputs.output_transform_prefix, out_filename, inv_out_filename) elif out_filename: return '--output [ %s, %s ]' % (self.inputs.output_transform_prefix, out_filename) else: return '--output %s' % self.inputs.output_transform_prefix elif opt == 'winsorize_upper_quantile' or opt == 'winsorize_lower_quantile': if not self._quantilesDone: return self._formatWinsorizeImageIntensities() return '' # Must return something for argstr! elif opt == 'collapse_linear_transforms_to_fixed_image_header': return self._formatCollapseLinearTransformsToFixedImageHeader() return super(Registration, self)._format_arg(opt, spec, val) def _outputFileNames(self, prefix, count, transform, inverse=False): self.lowDimensionalTransformMap = {'Rigid': 'Rigid.mat', 'Affine': 'Affine.mat', 'GenericAffine': 'GenericAffine.mat', 'CompositeAffine': 'Affine.mat', 'Similarity': 'Similarity.mat', 'Translation': 'Translation.mat', 'BSpline': 'BSpline.txt', 'Initial': 'DerivedInitialMovingTranslation.mat'} if transform in self.lowDimensionalTransformMap.keys(): suffix = self.lowDimensionalTransformMap[transform] inverse_mode = inverse else: inverse_mode = False # These are not analytically invertable if inverse: suffix = 'InverseWarp.nii.gz' else: suffix = 'Warp.nii.gz' return '%s%d%s' % (prefix, count, suffix), inverse_mode def _list_outputs(self): outputs = self._outputs().get() outputs['forward_transforms'] = [] outputs['forward_invert_flags'] = [] outputs['reverse_transforms'] = [] outputs['reverse_invert_flags'] = [] if not self.inputs.collapse_output_transforms: transformCount = 0 if isdefined(self.inputs.initial_moving_transform): outputs['forward_transforms'].append( self.inputs.initial_moving_transform) outputs['forward_invert_flags'].append( self.inputs.invert_initial_moving_transform) outputs['reverse_transforms'].insert( 0, self.inputs.initial_moving_transform) outputs['reverse_invert_flags'].insert(0, not self.inputs.invert_initial_moving_transform) # Prepend transformCount += 1 elif isdefined(self.inputs.initial_moving_transform_com): #forwardFileName, _ = self._outputFileNames(self.inputs.output_transform_prefix, # transformCount, # 'Initial') #outputs['forward_transforms'].append(forwardFileName) transformCount += 1 for count in range(len(self.inputs.transforms)): forwardFileName, forwardInverseMode = self._outputFileNames(self.inputs.output_transform_prefix, transformCount, self.inputs.transforms[count]) reverseFileName, reverseInverseMode = self._outputFileNames(self.inputs.output_transform_prefix, transformCount, self.inputs.transforms[count], True) outputs['forward_transforms'].append( os.path.abspath(forwardFileName)) outputs['forward_invert_flags'].append(forwardInverseMode) outputs['reverse_transforms'].insert( 0, os.path.abspath(reverseFileName)) outputs[ 'reverse_invert_flags'].insert(0, reverseInverseMode) transformCount += 1 else: transformCount = 0 for transform in ['GenericAffine', 'SyN']: # Only files returned by collapse_output_transforms forwardFileName, forwardInverseMode = self._outputFileNames(self.inputs.output_transform_prefix, transformCount, transform) reverseFileName, reverseInverseMode = self._outputFileNames(self.inputs.output_transform_prefix, transformCount, transform, True) outputs['forward_transforms'].append( os.path.abspath(forwardFileName)) outputs['forward_invert_flags'].append(forwardInverseMode) outputs['reverse_transforms'].append( os.path.abspath(reverseFileName)) outputs['reverse_invert_flags'].append(reverseInverseMode) transformCount += 1 if self.inputs.write_composite_transform: fileName = self.inputs.output_transform_prefix + 'Composite.h5' outputs['composite_transform'] = [os.path.abspath(fileName)] fileName = self.inputs.output_transform_prefix + \ 'InverseComposite.h5' outputs['inverse_composite_transform'] = [ os.path.abspath(fileName)] out_filename = self._get_outputfilenames(inverse=False) inv_out_filename = self._get_outputfilenames(inverse=True) if out_filename: outputs['warped_image'] = os.path.abspath(out_filename) if inv_out_filename: outputs['inverse_warped_image'] = os.path.abspath(inv_out_filename) return outputs nipype-0.9.2/nipype/interfaces/ants/resampling.py000066400000000000000000000400571227300005300221330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft = python sts = 4 ts = 4 sw = 4 et: """ANTS Apply Transforms interface Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from .base import ANTSCommand, ANTSCommandInputSpec from ..base import (TraitedSpec, File, traits, isdefined) from ...utils.filemanip import split_filename from nipype.interfaces.base import InputMultiPath class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(4, 3, argstr='%d', usedefault=True, desc='image dimension (3 or 4)', position=1) input_image = File(argstr='%s', mandatory=True, copyfile=True, desc=('image to apply transformation to (generally a ' 'coregistered functional)')) out_postfix = traits.Str('_wtsimt', argstr='%s', usedefault=True, desc=('Postfix that is prepended to all output ' 'files (default = _wtsimt)')) reference_image = File(argstr='-R %s', xor=['tightest_box'], desc='reference image space that you wish to warp INTO') tightest_box = traits.Bool(argstr='--tightest-bounding-box', desc=('computes tightest bounding box (overrided by ' 'reference_image if given)'), xor=['reference_image']) reslice_by_header = traits.Bool(argstr='--reslice-by-header', desc=('Uses orientation matrix and origin encoded in ' 'reference image file header. Not typically used ' 'with additional transforms')) use_nearest = traits.Bool(argstr='--use-NN', desc='Use nearest neighbor interpolation') use_bspline = traits.Bool(argstr='--use-Bspline', desc='Use 3rd order B-Spline interpolation') transformation_series = InputMultiPath(File(exists=True), argstr='%s', desc='transformation file(s) to be applied', mandatory=True, copyfile=False) invert_affine = traits.List(traits.Int, desc=('List of Affine transformations to invert. ' 'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines ' 'found in transformation_series')) class WarpTimeSeriesImageMultiTransformOutputSpec(TraitedSpec): output_image = File(exists=True, desc='Warped image') class WarpTimeSeriesImageMultiTransform(ANTSCommand): """Warps a time-series from one space to another Examples -------- >>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform >>> wtsimt = WarpTimeSeriesImageMultiTransform() >>> wtsimt.inputs.input_image = 'resting.nii' >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] >>> wtsimt.cmdline 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' """ _cmd = 'WarpTimeSeriesImageMultiTransform' input_spec = WarpTimeSeriesImageMultiTransformInputSpec output_spec = WarpTimeSeriesImageMultiTransformOutputSpec def _format_arg(self, opt, spec, val): if opt == 'out_postfix': _, name, ext = split_filename( os.path.abspath(self.inputs.input_image)) return name + val + ext if opt == 'transformation_series': series = [] affine_counter = 0 for transformation in val: if 'Affine' in transformation and \ isdefined(self.inputs.invert_affine): affine_counter += 1 if affine_counter in self.inputs.invert_affine: series += ['-i'], series += [transformation] return ' '.join(series) return super(WarpTimeSeriesImageMultiTransform, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() _, name, ext = split_filename(os.path.abspath(self.inputs.input_image)) outputs['output_image'] = os.path.join(os.getcwd(), ''.join((name, self.inputs.out_postfix, ext))) return outputs def _run_interface(self, runtime): runtime = super(WarpTimeSeriesImageMultiTransform, self)._run_interface(runtime, correct_return_codes = [0,1]) if "100 % complete" not in runtime.stdout: self.raise_exception(runtime) return runtime class WarpImageMultiTransformInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=True, desc='image dimension (2 or 3)', position=1) input_image = File(argstr='%s', mandatory=True, desc=('image to apply transformation to (generally a ' 'coregistered functional)'), position=2) output_image = File(genfile=True, hash_files=False, argstr='%s', desc=('name of the output warped image'), position = 3, xor=['out_postfix']) out_postfix = File("_wimt", usedefault=True, hash_files=False, desc=('Postfix that is prepended to all output ' 'files (default = _wimt)'), xor=['output_image']) reference_image = File(argstr='-R %s', xor=['tightest_box'], desc='reference image space that you wish to warp INTO') tightest_box = traits.Bool(argstr='--tightest-bounding-box', desc=('computes tightest bounding box (overrided by ' 'reference_image if given)'), xor=['reference_image']) reslice_by_header = traits.Bool(argstr='--reslice-by-header', desc=('Uses orientation matrix and origin encoded in ' 'reference image file header. Not typically used ' 'with additional transforms')) use_nearest = traits.Bool(argstr='--use-NN', desc='Use nearest neighbor interpolation') use_bspline = traits.Bool(argstr='--use-Bspline', desc='Use 3rd order B-Spline interpolation') transformation_series = InputMultiPath(File(exists=True), argstr='%s', desc='transformation file(s) to be applied', mandatory=True) invert_affine = traits.List(traits.Int, desc=('List of Affine transformations to invert.' 'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines ' 'found in transformation_series. Note that indexing ' 'starts with 1 and does not include warp fields. Affine ' 'transformations are distinguished ' 'from warp fields by the word "affine" included in their filenames.')) class WarpImageMultiTransformOutputSpec(TraitedSpec): output_image = File(exists=True, desc='Warped image') class WarpImageMultiTransform(ANTSCommand): """Warps an image from one space to another Examples -------- >>> from nipype.interfaces.ants import WarpImageMultiTransform >>> wimt = WarpImageMultiTransform() >>> wimt.inputs.input_image = 'structural.nii' >>> wimt.inputs.reference_image = 'ants_deformed.nii.gz' >>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] >>> wimt.cmdline 'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' >>> wimt = WarpImageMultiTransform() >>> wimt.inputs.input_image = 'diffusion_weighted.nii' >>> wimt.inputs.reference_image = 'functional.nii' >>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz','dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt'] >>> wimt.inputs.invert_affine = [1] >>> wimt.cmdline 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii -i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' """ _cmd = 'WarpImageMultiTransform' input_spec = WarpImageMultiTransformInputSpec output_spec = WarpImageMultiTransformOutputSpec def _gen_filename(self, name): if name == 'output_image': _, name, ext = split_filename( os.path.abspath(self.inputs.input_image)) return ''.join((name, self.inputs.out_postfix, ext)) return None def _format_arg(self, opt, spec, val): if opt == 'transformation_series': series = [] affine_counter = 0 for transformation in val: if "affine" in transformation.lower() and \ isdefined(self.inputs.invert_affine): affine_counter += 1 if affine_counter in self.inputs.invert_affine: series += '-i', series += [transformation] return ' '.join(series) return super(WarpImageMultiTransform, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.output_image): outputs['output_image'] = os.path.abspath(self.inputs.output_image) else: outputs['output_image'] = os.path.abspath( self._gen_filename('output_image')) return outputs class ApplyTransformsInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(2, 3, 4, argstr='--dimensionality %d', desc=('This option forces the image to be treated ' 'as a specified-dimensional image. If not ' 'specified, antsWarp tries to infer the ' 'dimensionality from the input image.')) input_image_type = traits.Enum(0, 1, 2, 3, argstr='--input-image-type %d', desc=('Option specifying the input image ' 'type of scalar (default), vector, ' 'tensor, or time series.')) input_image = File(argstr='--input %s', mandatory=True, desc=('image to apply transformation to (generally a ' 'coregistered functional)'), exists=True) output_image = traits.Str(argstr='--output %s', desc=('output file name'), genfile=True, hash_files=False) out_postfix = traits.Str("_trans", usedefault=True, desc=('Postfix that is appended to all output ' 'files (default = _trans)')) reference_image = File(argstr='--reference-image %s', mandatory=True, desc='reference image space that you wish to warp INTO', exists=True) interpolation = traits.Enum('Linear', 'NearestNeighbor', 'CosineWindowedSinc', 'WelchWindowedSinc', 'HammingWindowedSinc', 'LanczosWindowedSinc', 'MultiLabel', 'Gaussian', 'BSpline', argstr='%s', usedefault=True) # TODO: Implement these options for multilabel, gaussian, and bspline # interpolation_sigma = traits.Float(requires=['interpolation']) # interpolation_alpha = traits.Float(requires=['interpolation_sigma']) # bspline_order = traits.Int(3, requires=['interpolation']) transforms = InputMultiPath( File(exists=True), argstr='%s', mandatory=True, desc=('')) invert_transform_flags = InputMultiPath(traits.Bool()) default_value = traits.Float( 0.0, argstr='--default-value %d', usedefault=True) print_out_composite_warp_file = traits.Enum( 0, 1, requires=["output_image"], desc=('')) # TODO: Change to boolean class ApplyTransformsOutputSpec(TraitedSpec): output_image = File(exists=True, desc='Warped image') class ApplyTransforms(ANTSCommand): """ApplyTransforms, applied to an input image, transforms it according to a reference image and a transform (or a set of transforms). Examples -------- >>> from nipype.interfaces.ants import ApplyTransforms >>> at = ApplyTransforms() >>> at.inputs.dimension = 3 >>> at.inputs.input_image = 'moving1.nii' >>> at.inputs.reference_image = 'fixed1.nii' >>> at.inputs.output_image = 'deformed_moving1.nii' >>> at.inputs.interpolation = 'Linear' >>> at.inputs.default_value = 0 >>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz'] >>> at.inputs.invert_transform_flags = [False, False] >>> at.cmdline 'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii --transform [trans.mat,0] --transform [ants_Warp.nii.gz,0]' """ _cmd = 'antsApplyTransforms' input_spec = ApplyTransformsInputSpec output_spec = ApplyTransformsOutputSpec def _gen_filename(self, name): if name == 'output_image': output = self.inputs.output_image if not isdefined(output): _, name, ext = split_filename(self.inputs.input_image) output = name + self.inputs.out_postfix + ext return output return None def _getTransformFileNames(self): retval = [] for ii in range(len(self.inputs.transforms)): if isdefined(self.inputs.invert_transform_flags): if len(self.inputs.transforms) == len(self.inputs.invert_transform_flags): invert_code = 1 if self.inputs.invert_transform_flags[ ii] else 0 retval.append("--transform [%s,%d]" % (self.inputs.transforms[ii], invert_code)) else: raise Exception("ERROR: The useInverse list must have the same number of entries as the transformsFileName list.") else: retval.append("--transform %s" % self.inputs.transforms[ii]) return " ".join(retval) def _getOutputWarpedFileName(self): if isdefined(self.inputs.print_out_composite_warp_file): return "--output [%s,%s]" % (self._gen_filename("output_image"), self.inputs.print_out_composite_warp_file) else: return "--output %s" % (self._gen_filename("output_image")) def _format_arg(self, opt, spec, val): if opt == "output_image": return self._getOutputWarpedFileName() elif opt == "transforms": return self._getTransformFileNames() elif opt == 'interpolation': # TODO: handle multilabel, gaussian, and bspline options return '--interpolation %s' % self.inputs.interpolation return super(ApplyTransforms, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['output_image'] = os.path.abspath( self._gen_filename('output_image')) return outputs nipype-0.9.2/nipype/interfaces/ants/segmentation.py000066400000000000000000000263711227300005300224720ustar00rootroot00000000000000"""The ants module provides basic functions for interfacing with ants functions. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined) from ...utils.filemanip import split_filename from .base import ANTSCommand, ANTSCommandInputSpec import os from ...utils.filemanip import copyfile class AtroposInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, 4, argstr='--image-dimensionality %d', usedefault=True, desc='image dimension (2, 3, or 4)') intensity_images = InputMultiPath(File(exists=True), argstr="--intensity-image %s...", mandatory=True) mask_image = File(exists=True, argstr='--mask-image %s', mandatory=True) initialization = traits.Enum('Random', 'Otsu', 'KMeans', 'PriorProbabilityImages', 'PriorLabelImage', argstr="%s", requires=['number_of_tissue_classes'], mandatory=True) prior_probability_images = InputMultiPath(File(exists=True)) number_of_tissue_classes = traits.Int(mandatory=True) prior_weighting = traits.Float() prior_probability_threshold = traits.Float(requires=['prior_weighting']) likelihood_model = traits.Str(argstr="--likelihood-model %s") mrf_smoothing_factor = traits.Float(argstr="%s") mrf_radius = traits.List(traits.Int(), requires=['mrf_smoothing_factor']) icm_use_synchronous_update = traits.Bool(argstr="%s") maximum_number_of_icm_terations = traits.Int(requires=['icm_use_synchronous_update']) n_iterations = traits.Int(argstr="%s") convergence_threshold = traits.Float(requires=['n_iterations']) posterior_formulation = traits.Str(argstr="%s") use_mixture_model_proportions = traits.Bool(requires=['posterior_formulation']) out_classified_image_name = File(argstr="%s", genfile=True, hash_files=False) save_posteriors = traits.Bool() output_posteriors_name_template = traits.Str('POSTERIOR_%02d.nii.gz', usedefault=True) class AtroposOutputSpec(TraitedSpec): classified_image = File(exists=True) posteriors = OutputMultiPath(File(exist=True)) class Atropos(ANTSCommand): """A finite mixture modeling (FMM) segmentation approach with possibilities for specifying prior constraints. These prior constraints include the specification of a prior label image, prior probability images (one for each class), and/or an MRF prior to enforce spatial smoothing of the labels. Similar algorithms include FAST and SPM. Examples -------- >>> from nipype.interfaces.ants import Atropos >>> at = Atropos() >>> at.inputs.dimension = 3 >>> at.inputs.intensity_images = 'structural.nii' >>> at.inputs.mask_image = 'mask.nii' >>> at.inputs.initialization = 'PriorProbabilityImages' >>> at.inputs.prior_probability_images = ['rc1s1.nii', 'rc1s2.nii'] >>> at.inputs.number_of_tissue_classes = 2 >>> at.inputs.prior_weighting = 0.8 >>> at.inputs.prior_probability_threshold = 0.0000001 >>> at.inputs.likelihood_model = 'Gaussian' >>> at.inputs.mrf_smoothing_factor = 0.2 >>> at.inputs.mrf_radius = [1, 1, 1] >>> at.inputs.icm_use_synchronous_update = True >>> at.inputs.maximum_number_of_icm_terations = 1 >>> at.inputs.n_iterations = 5 >>> at.inputs.convergence_threshold = 0.000001 >>> at.inputs.posterior_formulation = 'Socrates' >>> at.inputs.use_mixture_model_proportions = True >>> at.inputs.save_posteriors = True >>> at.cmdline 'Atropos --image-dimensionality 3 --icm [1,1] --initialization PriorProbabilityImages[2,priors/priorProbImages%02d.nii,0.8,1e-07] --intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1]' """ input_spec = AtroposInputSpec output_spec = AtroposOutputSpec _cmd = 'Atropos' def _format_arg(self, opt, spec, val): if opt == 'initialization': retval = "--initialization %s[%d" % (val, self.inputs.number_of_tissue_classes) if val == "PriorProbabilityImages": _, _, ext = split_filename(self.inputs.prior_probability_images[0]) retval += ",priors/priorProbImages%02d" + ext + ",%g" % self.inputs.prior_weighting if isdefined(self.inputs.prior_probability_threshold): retval += ",%g" % self.inputs.prior_probability_threshold return retval + "]" if opt == 'mrf_smoothing_factor': retval = "--mrf [%g" % val if isdefined(self.inputs.mrf_radius): retval += ",%s" % 'x'.join([str(s) for s in self.inputs.mrf_radius]) return retval + "]" if opt == "icm_use_synchronous_update": retval = "--icm [%d" % val if isdefined(self.inputs.maximum_number_of_icm_terations): retval += ",%g" % self.inputs.maximum_number_of_icm_terations return retval + "]" if opt == "n_iterations": retval = "--convergence [%d" % val if isdefined(self.inputs.convergence_threshold): retval += ",%g" % self.inputs.convergence_threshold return retval + "]" if opt == "posterior_formulation": retval = "--posterior-formulation %s" % val if isdefined(self.inputs.use_mixture_model_proportions): retval += "[%d]" % self.inputs.use_mixture_model_proportions return retval if opt == "out_classified_image_name": retval = "--output [%s" % val if isdefined(self.inputs.save_posteriors): retval += ",%s" % self.inputs.output_posteriors_name_template return retval + "]" return super(ANTSCommand, self)._format_arg(opt, spec, val) def _run_interface(self, runtime): if self.inputs.initialization == "PriorProbabilityImages": priors_directory = os.path.join(os.getcwd(), "priors") if not os.path.exists(priors_directory): os.makedirs(priors_directory) _, _, ext = split_filename(self.inputs.prior_probability_images[0]) for i, f in enumerate(self.inputs.prior_probability_images): target = os.path.join(priors_directory, 'priorProbImages%02d' % (i + 1) + ext) if not (os.path.exists(target) and os.path.realpath(target) == os.path.abspath(f)): copyfile(os.path.abspath(f), os.path.join(priors_directory, 'priorProbImages%02d' % (i + 1) + ext)) runtime = super(Atropos, self)._run_interface(runtime) return runtime def _gen_filename(self, name): if name == 'out_classified_image_name': output = self.inputs.out_classified_image_name if not isdefined(output): _, name, ext = split_filename(self.inputs.intensity_images[0]) output = name + '_labeled' + ext return output return None def _list_outputs(self): outputs = self._outputs().get() outputs['classified_image'] = os.path.abspath(self._gen_filename('out_classified_image_name')) if isdefined(self.inputs.save_posteriors) and self.inputs.save_posteriors: outputs['posteriors'] = [] for i in range(self.inputs.number_of_tissue_classes): outputs['posteriors'].append(os.path.abspath(self.inputs.output_posteriors_name_template % (i + 1))) return outputs class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='--image-dimension %d', usedefault=True, desc='image dimension (2 or 3)') input_image = File(argstr='--input-image %s', mandatory=True, desc=('image to apply transformation to (generally a ' 'coregistered functional)')) mask_image = File(argstr='--mask-image %s') output_image = traits.Str(argstr='--output %s', desc=('output file name'), genfile=True, hash_files=False) bspline_fitting_distance = traits.Float(argstr="--bsline-fitting [%g]") shrink_factor = traits.Int(argstr="--shrink-factor %d") n_iterations = traits.List(traits.Int(), argstr="--convergence [ %s", sep="x", requires=['convergence_threshold'], position=1) convergence_threshold = traits.Float(argstr=",%g]", requires=['n_iterations'], position=2) class N4BiasFieldCorrectionOutputSpec(TraitedSpec): output_image = File(exists=True, desc='Warped image') class N4BiasFieldCorrection(ANTSCommand): """N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) retrospective bias correction algorithm. Based on the assumption that the corruption of the low frequency bias field can be modeled as a convolution of the intensity histogram by a Gaussian, the basic algorithmic protocol is to iterate between deconvolving the intensity histogram by a Gaussian, remapping the intensities, and then spatially smoothing this result by a B-spline modeling of the bias field itself. The modifications from and improvements obtained over the original N3 algorithm are described in the following paper: N. Tustison et al., N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging, 29(6):1310-1320, June 2010. Examples -------- >>> from nipype.interfaces.ants import N4BiasFieldCorrection >>> n4 = N4BiasFieldCorrection() >>> n4.inputs.dimension = 3 >>> n4.inputs.input_image = 'structural.nii' >>> n4.inputs.bspline_fitting_distance = 300 >>> n4.inputs.shrink_factor = 3 >>> n4.inputs.n_iterations = [50,50,30,20] >>> n4.inputs.convergence_threshold = 1e-6 >>> n4.cmdline 'N4BiasFieldCorrection --convergence [ 50x50x30x20 ,1e-06] --bsline-fitting [300] --image-dimension 3 --input-image structural.nii --output structural_corrected.nii --shrink-factor 3' """ _cmd = 'N4BiasFieldCorrection' input_spec = N4BiasFieldCorrectionInputSpec output_spec = N4BiasFieldCorrectionOutputSpec def _gen_filename(self, name): if name == 'output_image': output = self.inputs.output_image if not isdefined(output): _, name, ext = split_filename(self.inputs.input_image) output = name + '_corrected' + ext return output return None def _list_outputs(self): outputs = self._outputs().get() outputs['output_image'] = os.path.abspath(self._gen_filename('output_image')) return outputsnipype-0.9.2/nipype/interfaces/ants/setup.py000066400000000000000000000006521227300005300211270ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('ants', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/ants/tests/000077500000000000000000000000001227300005300205545ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_ANTS.py000066400000000000000000000050521227300005300237640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.registration import ANTS def test_ANTS_inputs(): input_map = dict(affine_gradient_descent_option=dict(argstr='%s', ), args=dict(argstr='%s', ), delta_time=dict(requires=['number_of_time_steps'], ), dimension=dict(argstr='%d', position=1, usedefault=False, ), environ=dict(nohash=True, usedefault=True, ), fixed_image=dict(mandatory=True, ), gradient_step_length=dict(requires=['transformation_model'], ), ignore_exception=dict(nohash=True, usedefault=True, ), metric=dict(mandatory=True, ), metric_weight=dict(requires=['metric'], ), mi_option=dict(argstr='--MI-option %s', sep='x', ), moving_image=dict(argstr='%s', mandatory=True, ), num_threads=dict(nohash=True, usedefault=True, ), number_of_affine_iterations=dict(argstr='--number-of-affine-iterations %s', sep='x', ), number_of_iterations=dict(argstr='--number-of-iterations %s', sep='x', ), number_of_time_steps=dict(requires=['gradient_step_length'], ), output_transform_prefix=dict(argstr='--output-naming %s', mandatory=True, usedefault=True, ), radius=dict(requires=['metric'], ), regularization=dict(argstr='%s', ), regularization_deformation_field_sigma=dict(requires=['regularization'], ), regularization_gradient_field_sigma=dict(requires=['regularization'], ), smoothing_sigmas=dict(argstr='--gaussian-smoothing-sigmas %s', sep='x', ), subsampling_factors=dict(argstr='--subsampling-factors %s', sep='x', ), symmetry_type=dict(requires=['delta_time'], ), terminal_output=dict(mandatory=True, nohash=True, ), transformation_model=dict(argstr='%s', mandatory=True, ), use_histogram_matching=dict(argstr='%s', usedefault=True, ), ) inputs = ANTS.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ANTS_outputs(): output_map = dict(affine_transform=dict(), inverse_warp_transform=dict(), metaheader=dict(), metaheader_raw=dict(), warp_transform=dict(), ) outputs = ANTS.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py000066400000000000000000000012711227300005300252620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.base import ANTSCommand def test_ANTSCommand_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), num_threads=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ANTSCommand.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py000066400000000000000000000033131227300005300263610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.resampling import ApplyTransforms def test_ApplyTransforms_inputs(): input_map = dict(args=dict(argstr='%s', ), default_value=dict(argstr='--default-value %d', usedefault=True, ), dimension=dict(argstr='--dimensionality %d', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_image=dict(argstr='--input %s', mandatory=True, ), input_image_type=dict(argstr='--input-image-type %d', ), interpolation=dict(argstr='%s', usedefault=True, ), invert_transform_flags=dict(), num_threads=dict(nohash=True, usedefault=True, ), out_postfix=dict(usedefault=True, ), output_image=dict(argstr='--output %s', genfile=True, hash_files=False, ), print_out_composite_warp_file=dict(requires=['output_image'], ), reference_image=dict(argstr='--reference-image %s', mandatory=True, ), terminal_output=dict(mandatory=True, nohash=True, ), transforms=dict(argstr='%s', mandatory=True, ), ) inputs = ApplyTransforms.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyTransforms_outputs(): output_map = dict(output_image=dict(), ) outputs = ApplyTransforms.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_Atropos.py000066400000000000000000000043771227300005300246570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.segmentation import Atropos def test_Atropos_inputs(): input_map = dict(args=dict(argstr='%s', ), convergence_threshold=dict(requires=['n_iterations'], ), dimension=dict(argstr='--image-dimensionality %d', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), icm_use_synchronous_update=dict(argstr='%s', ), ignore_exception=dict(nohash=True, usedefault=True, ), initialization=dict(argstr='%s', mandatory=True, requires=['number_of_tissue_classes'], ), intensity_images=dict(argstr='--intensity-image %s...', mandatory=True, ), likelihood_model=dict(argstr='--likelihood-model %s', ), mask_image=dict(argstr='--mask-image %s', mandatory=True, ), maximum_number_of_icm_terations=dict(requires=['icm_use_synchronous_update'], ), mrf_radius=dict(requires=['mrf_smoothing_factor'], ), mrf_smoothing_factor=dict(argstr='%s', ), n_iterations=dict(argstr='%s', ), num_threads=dict(nohash=True, usedefault=True, ), number_of_tissue_classes=dict(mandatory=True, ), out_classified_image_name=dict(argstr='%s', genfile=True, hash_files=False, ), output_posteriors_name_template=dict(usedefault=True, ), posterior_formulation=dict(argstr='%s', ), prior_probability_images=dict(), prior_probability_threshold=dict(requires=['prior_weighting'], ), prior_weighting=dict(), save_posteriors=dict(), terminal_output=dict(mandatory=True, nohash=True, ), use_mixture_model_proportions=dict(requires=['posterior_formulation'], ), ) inputs = Atropos.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Atropos_outputs(): output_map = dict(classified_image=dict(), posteriors=dict(), ) outputs = Atropos.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py000066400000000000000000000024421227300005300275760ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.utils import AverageAffineTransform def test_AverageAffineTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='%d', mandatory=True, position=0, usedefault=False, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), num_threads=dict(nohash=True, usedefault=True, ), output_affine_transform=dict(argstr='%s', mandatory=True, position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), transforms=dict(argstr='%s', mandatory=True, position=3, ), ) inputs = AverageAffineTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AverageAffineTransform_outputs(): output_map = dict(affine_transform=dict(), ) outputs = AverageAffineTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_AverageImages.py000066400000000000000000000024761227300005300257260ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.utils import AverageImages def test_AverageImages_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='%d', mandatory=True, position=0, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), images=dict(argstr='%s', mandatory=True, position=3, ), normalize=dict(argstr='%d', mandatory=True, position=2, ), num_threads=dict(nohash=True, usedefault=True, ), output_average_image=dict(argstr='%s', hash_files=False, position=1, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = AverageImages.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AverageImages_outputs(): output_map = dict(output_average_image=dict(), ) outputs = AverageImages.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py000066400000000000000000000034251227300005300257130ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.legacy import GenWarpFields def test_GenWarpFields_inputs(): input_map = dict(args=dict(argstr='%s', ), bias_field_correction=dict(argstr='-n 1', ), dimension=dict(argstr='-d %d', position=1, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), force_proceed=dict(argstr='-f 1', ), ignore_exception=dict(nohash=True, usedefault=True, ), input_image=dict(argstr='-i %s', copyfile=False, mandatory=True, ), inverse_warp_template_labels=dict(argstr='-l', ), max_iterations=dict(argstr='-m %s', sep='x', ), num_threads=dict(nohash=True, usedefault=True, ), out_prefix=dict(argstr='-o %s', usedefault=True, ), quality_check=dict(argstr='-q 1', ), reference_image=dict(argstr='-r %s', copyfile=True, mandatory=True, ), similarity_metric=dict(argstr='-s %s', ), terminal_output=dict(mandatory=True, nohash=True, ), transformation_model=dict(argstr='-t %s', usedefault=True, ), ) inputs = GenWarpFields.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GenWarpFields_outputs(): output_map = dict(affine_transformation=dict(), input_file=dict(), inverse_warp_field=dict(), output_file=dict(), warp_field=dict(), ) outputs = GenWarpFields.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_JacobianDeterminant.py000066400000000000000000000030211227300005300271120ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.utils import JacobianDeterminant def test_JacobianDeterminant_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='%d', mandatory=True, position=0, usedefault=False, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), norm_by_total=dict(argstr='%d', position=5, ), num_threads=dict(nohash=True, usedefault=True, ), output_prefix=dict(argstr='%s', genfile=True, hash_files=False, position=2, ), projection_vector=dict(argstr='%s', position=6, sep='x', ), template_mask=dict(argstr='%s', position=4, ), terminal_output=dict(mandatory=True, nohash=True, ), use_log=dict(argstr='%d', position=3, ), warp_file=dict(argstr='%s', mandatory=True, position=1, ), ) inputs = JacobianDeterminant.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_JacobianDeterminant_outputs(): output_map = dict(jacobian_image=dict(), ) outputs = JacobianDeterminant.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py000066400000000000000000000025121227300005300261620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.utils import MultiplyImages def test_MultiplyImages_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='%d', mandatory=True, position=0, usedefault=False, ), environ=dict(nohash=True, usedefault=True, ), first_input=dict(argstr='%s', mandatory=True, position=1, ), ignore_exception=dict(nohash=True, usedefault=True, ), num_threads=dict(nohash=True, usedefault=True, ), output_product_image=dict(argstr='%s', mandatory=True, position=3, ), second_input=dict(argstr='%s', mandatory=True, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MultiplyImages.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MultiplyImages_outputs(): output_map = dict(output_product_image=dict(), ) outputs = MultiplyImages.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py000066400000000000000000000032521227300005300272730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.segmentation import N4BiasFieldCorrection def test_N4BiasFieldCorrection_inputs(): input_map = dict(args=dict(argstr='%s', ), bspline_fitting_distance=dict(argstr='--bsline-fitting [%g]', ), convergence_threshold=dict(argstr=',%g]', position=2, requires=['n_iterations'], ), dimension=dict(argstr='--image-dimension %d', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_image=dict(argstr='--input-image %s', mandatory=True, ), mask_image=dict(argstr='--mask-image %s', ), n_iterations=dict(argstr='--convergence [ %s', position=1, requires=['convergence_threshold'], sep='x', ), num_threads=dict(nohash=True, usedefault=True, ), output_image=dict(argstr='--output %s', genfile=True, hash_files=False, ), shrink_factor=dict(argstr='--shrink-factor %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = N4BiasFieldCorrection.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_N4BiasFieldCorrection_outputs(): output_map = dict(output_image=dict(), ) outputs = N4BiasFieldCorrection.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_Registration.py000066400000000000000000000074571227300005300257040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.registration import Registration def test_Registration_inputs(): input_map = dict(args=dict(argstr='%s', ), collapse_linear_transforms_to_fixed_image_header=dict(argstr='%s', usedefault=True, ), collapse_output_transforms=dict(argstr='--collapse-output-transforms %d', usedefault=True, ), convergence_threshold=dict(requires=['number_of_iterations'], usedefault=True, ), convergence_window_size=dict(requires=['convergence_threshold'], usedefault=True, ), dimension=dict(argstr='--dimensionality %d', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), fixed_image=dict(mandatory=True, ), fixed_image_mask=dict(argstr='%s', ), ignore_exception=dict(nohash=True, usedefault=True, ), initial_moving_transform=dict(argstr='%s', xor=['initial_moving_transform_com'], ), initial_moving_transform_com=dict(argstr='%s', xor=['initial_moving_transform'], ), interpolation=dict(argstr='%s', usedefault=True, ), invert_initial_moving_transform=dict(requires=['initial_moving_transform'], xor=['initial_moving_transform_com'], ), metric=dict(mandatory=True, ), metric_item_trait=dict(), metric_stage_trait=dict(), metric_weight=dict(mandatory=True, requires=['metric'], usedefault=True, ), metric_weight_item_trait=dict(), metric_weight_stage_trait=dict(), moving_image=dict(mandatory=True, ), moving_image_mask=dict(requires=['fixed_image_mask'], ), num_threads=dict(nohash=True, usedefault=True, ), number_of_iterations=dict(), output_inverse_warped_image=dict(hash_files=False, requires=['output_warped_image'], ), output_transform_prefix=dict(argstr='%s', usedefault=True, ), output_warped_image=dict(hash_files=False, ), radius_bins_item_trait=dict(), radius_bins_stage_trait=dict(), radius_or_number_of_bins=dict(requires=['metric_weight'], usedefault=True, ), sampling_percentage=dict(requires=['sampling_strategy'], ), sampling_percentage_item_trait=dict(), sampling_percentage_stage_trait=dict(), sampling_strategy=dict(requires=['metric_weight'], ), sampling_strategy_item_trait=dict(), sampling_strategy_stage_trait=dict(), shrink_factors=dict(mandatory=True, ), sigma_units=dict(requires=['smoothing_sigmas'], ), smoothing_sigmas=dict(mandatory=True, ), terminal_output=dict(mandatory=True, nohash=True, ), transform_parameters=dict(), transforms=dict(argstr='%s', mandatory=True, ), use_estimate_learning_rate_once=dict(), use_histogram_matching=dict(usedefault=True, ), winsorize_lower_quantile=dict(argstr='%s', usedefault=True, ), winsorize_upper_quantile=dict(argstr='%s', usedefault=True, ), write_composite_transform=dict(argstr='--write-composite-transform %d', usedefault=True, ), ) inputs = Registration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Registration_outputs(): output_map = dict(composite_transform=dict(), forward_invert_flags=dict(), forward_transforms=dict(), inverse_composite_transform=dict(), inverse_warped_image=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), warped_image=dict(), ) outputs = Registration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py000066400000000000000000000034751227300005300300110ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.resampling import WarpImageMultiTransform def test_WarpImageMultiTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='%d', position=1, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_image=dict(argstr='%s', mandatory=True, position=2, ), invert_affine=dict(), num_threads=dict(nohash=True, usedefault=True, ), out_postfix=dict(hash_files=False, usedefault=True, xor=['output_image'], ), output_image=dict(argstr='%s', genfile=True, hash_files=False, position=3, xor=['out_postfix'], ), reference_image=dict(argstr='-R %s', xor=['tightest_box'], ), reslice_by_header=dict(argstr='--reslice-by-header', ), terminal_output=dict(mandatory=True, nohash=True, ), tightest_box=dict(argstr='--tightest-bounding-box', xor=['reference_image'], ), transformation_series=dict(argstr='%s', mandatory=True, ), use_bspline=dict(argstr='--use-Bspline', ), use_nearest=dict(argstr='--use-NN', ), ) inputs = WarpImageMultiTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_WarpImageMultiTransform_outputs(): output_map = dict(output_image=dict(), ) outputs = WarpImageMultiTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py000066400000000000000000000033541227300005300317770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.resampling import WarpTimeSeriesImageMultiTransform def test_WarpTimeSeriesImageMultiTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='%d', position=1, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_image=dict(argstr='%s', copyfile=True, mandatory=True, ), invert_affine=dict(), num_threads=dict(nohash=True, usedefault=True, ), out_postfix=dict(argstr='%s', usedefault=True, ), reference_image=dict(argstr='-R %s', xor=['tightest_box'], ), reslice_by_header=dict(argstr='--reslice-by-header', ), terminal_output=dict(mandatory=True, nohash=True, ), tightest_box=dict(argstr='--tightest-bounding-box', xor=['reference_image'], ), transformation_series=dict(argstr='%s', copyfile=False, mandatory=True, ), use_bspline=dict(argstr='--use-Bspline', ), use_nearest=dict(argstr='--use-NN', ), ) inputs = WarpTimeSeriesImageMultiTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_WarpTimeSeriesImageMultiTransform_outputs(): output_map = dict(output_image=dict(), ) outputs = WarpTimeSeriesImageMultiTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py000066400000000000000000000034441227300005300265710ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.legacy import antsIntroduction def test_antsIntroduction_inputs(): input_map = dict(args=dict(argstr='%s', ), bias_field_correction=dict(argstr='-n 1', ), dimension=dict(argstr='-d %d', position=1, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), force_proceed=dict(argstr='-f 1', ), ignore_exception=dict(nohash=True, usedefault=True, ), input_image=dict(argstr='-i %s', copyfile=False, mandatory=True, ), inverse_warp_template_labels=dict(argstr='-l', ), max_iterations=dict(argstr='-m %s', sep='x', ), num_threads=dict(nohash=True, usedefault=True, ), out_prefix=dict(argstr='-o %s', usedefault=True, ), quality_check=dict(argstr='-q 1', ), reference_image=dict(argstr='-r %s', copyfile=True, mandatory=True, ), similarity_metric=dict(argstr='-s %s', ), terminal_output=dict(mandatory=True, nohash=True, ), transformation_model=dict(argstr='-t %s', usedefault=True, ), ) inputs = antsIntroduction.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_antsIntroduction_outputs(): output_map = dict(affine_transformation=dict(), input_file=dict(), inverse_warp_field=dict(), output_file=dict(), warp_field=dict(), ) outputs = antsIntroduction.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py000066400000000000000000000035721227300005300275740ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.ants.legacy import buildtemplateparallel def test_buildtemplateparallel_inputs(): input_map = dict(args=dict(argstr='%s', ), bias_field_correction=dict(argstr='-n 1', ), dimension=dict(argstr='-d %d', position=1, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), gradient_step_size=dict(argstr='-g %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=-1, ), iteration_limit=dict(argstr='-i %d', usedefault=True, ), max_iterations=dict(argstr='-m %s', sep='x', ), num_cores=dict(argstr='-j %d', requires=['parallelization'], ), num_threads=dict(nohash=True, usedefault=True, ), out_prefix=dict(argstr='-o %s', usedefault=True, ), parallelization=dict(argstr='-c %d', usedefault=True, ), rigid_body_registration=dict(argstr='-r 1', ), similarity_metric=dict(argstr='-s %s', ), terminal_output=dict(mandatory=True, nohash=True, ), transformation_model=dict(argstr='-t %s', usedefault=True, ), use_first_as_target=dict(), ) inputs = buildtemplateparallel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_buildtemplateparallel_outputs(): output_map = dict(final_template_file=dict(), subject_outfiles=dict(), template_files=dict(), ) outputs = buildtemplateparallel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/ants/utils.py000066400000000000000000000175421227300005300211350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft = python sts = 4 ts = 4 sw = 4 et: """ANTS Apply Transforms interface Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from .base import ANTSCommand, ANTSCommandInputSpec from ..base import (TraitedSpec, File, traits, isdefined) from ...utils.filemanip import split_filename from nipype.interfaces.base import InputMultiPath class AverageAffineTransformInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)') output_affine_transform = File(argstr='%s', mandatory=True, position=1, desc='Outputfname.txt: the name of the resulting transform.') transforms = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=3, desc=('transforms to average')) class AverageAffineTransformOutputSpec(TraitedSpec): affine_transform = File(exists=True, desc='average transform file') class AverageAffineTransform(ANTSCommand): """ Examples -------- >>> from nipype.interfaces.ants import AverageAffineTransform >>> avg = AverageAffineTransform() >>> avg.inputs.dimension = 3 >>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat'] >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat' >>> avg.cmdline 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' """ _cmd = 'AverageAffineTransform' input_spec = AverageAffineTransformInputSpec output_spec = AverageAffineTransformOutputSpec def _format_arg(self, opt, spec, val): return super(AverageAffineTransform, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['affine_transform'] = os.path.abspath( self.inputs.output_affine_transform) return outputs class AverageImagesInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', mandatory=True, position=0, desc='image dimension (2 or 3)') output_average_image = File("average.nii", argstr='%s', position=1, desc='the name of the resulting image.', usedefault=True, hash_files=False) normalize = traits.Bool(argstr="%d", mandatory=True, position=2, desc='Normalize: if true, the 2nd image' + 'is divided by its mean. This will select the largest image to average into.') images = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=3, desc=('image to apply transformation to (generally a coregistered functional)')) class AverageImagesOutputSpec(TraitedSpec): output_average_image = File(exists=True, desc='average image file') class AverageImages(ANTSCommand): """ Examples -------- >>> from nipype.interfaces.ants import AverageImages >>> avg = AverageImages() >>> avg.inputs.dimension = 3 >>> avg.inputs.output_average_image = "average.nii.gz" >>> avg.inputs.normalize = True >>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii'] >>> avg.cmdline 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii' """ _cmd = 'AverageImages' input_spec = AverageImagesInputSpec output_spec = AverageImagesOutputSpec def _format_arg(self, opt, spec, val): return super(AverageImages, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['output_average_image'] = os.path.realpath( self.inputs.output_average_image) return outputs class MultiplyImagesInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)') first_input = File( argstr='%s', exists=True, mandatory=True, position=1, desc='image 1') second_input = traits.Either(File(exists=True), traits.Float, argstr='%s', mandatory=True, position=2, desc='image 2 or multiplication weight') output_product_image = File(argstr='%s', mandatory=True, position=3, desc='Outputfname.nii.gz: the name of the resulting image.') class MultiplyImagesOutputSpec(TraitedSpec): output_product_image = File(exists=True, desc='average image file') class MultiplyImages(ANTSCommand): """ Examples -------- >>> from nipype.interfaces.ants import MultiplyImages >>> test = MultiplyImages() >>> test.inputs.dimension = 3 >>> test.inputs.first_input = 'moving2.nii' >>> test.inputs.second_input = 0.25 >>> test.inputs.output_product_image = "out.nii" >>> test.cmdline 'MultiplyImages 3 moving2.nii 0.25 out.nii' """ _cmd = 'MultiplyImages' input_spec = MultiplyImagesInputSpec output_spec = MultiplyImagesOutputSpec def _format_arg(self, opt, spec, val): return super(MultiplyImages, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['output_product_image'] = os.path.abspath( self.inputs.output_product_image) return outputs class JacobianDeterminantInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)') warp_file = File(argstr='%s', exists=True, mandatory=True, position=1, desc='input warp file') output_prefix = File(argstr='%s', genfile=True, hash_files=False, position=2, desc=('prefix of the output image filename: ' 'PREFIX(log)jacobian.nii.gz')) use_log = traits.Enum(0, 1, argstr='%d', position=3, desc='log transform the jacobian determinant') template_mask = File(argstr='%s', exists=True, position=4, desc='template mask to adjust for head size') norm_by_total = traits.Enum(0, 1, argstr='%d', position=5, desc=('normalize jacobian by total in mask to ' 'adjust for head size')) projection_vector = traits.List(traits.Float(), argstr='%s', sep='x', position=6, desc='vector to project warp against') class JacobianDeterminantOutputSpec(TraitedSpec): jacobian_image = File(exists=True, desc='(log transformed) jacobian image') class JacobianDeterminant(ANTSCommand): """ Examples -------- >>> from nipype.interfaces.ants import JacobianDeterminant >>> jacobian = JacobianDeterminant() >>> jacobian.inputs.dimension = 3 >>> jacobian.inputs.warp_file = 'ants_Warp.nii.gz' >>> jacobian.inputs.output_prefix = 'Sub001_' >>> jacobian.inputs.use_log = 1 >>> jacobian.cmdline 'ANTSJacobian 3 ants_Warp.nii.gz Sub001_ 1' """ _cmd = 'ANTSJacobian' input_spec = JacobianDeterminantInputSpec output_spec = JacobianDeterminantOutputSpec def _gen_filename(self, name): if name == 'output_prefix': output = self.inputs.output_prefix if not isdefined(output): _, name, ext = split_filename(self.inputs.warp_file) output = name + '_' return output return None def _list_outputs(self): outputs = self._outputs().get() if self.inputs.use_log == 1: outputs['jacobian_image'] = os.path.abspath( self._gen_filename('output_prefix') + 'logjacobian.nii.gz') else: outputs['jacobian_image'] = os.path.abspath( self._gen_filename('output_prefix') + 'jacobian.nii.gz') return outputs nipype-0.9.2/nipype/interfaces/base.py000066400000000000000000001735061227300005300177450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package contains interfaces for using existing functionality in other packages Exaples FSL, matlab/SPM , afni Requires Packages to be installed """ from ConfigParser import NoOptionError from copy import deepcopy import datetime import errno import os import re import platform from socket import getfqdn from string import Template import select import subprocess import sys from textwrap import wrap from datetime import datetime as dt from dateutil.parser import parse as parseutc from warnings import warn from .traits_extension import (traits, Undefined, TraitDictObject, TraitListObject, TraitError, isdefined, File, Directory, has_metadata) from ..utils.filemanip import (md5, hash_infile, FileNotFoundError, hash_timestamp, save_json, split_filename) from ..utils.misc import is_container, trim, str2bool from ..utils.provenance import write_provenance from .. import config, logging, LooseVersion from .. import __version__ nipype_version = LooseVersion(__version__) iflogger = logging.getLogger('interface') __docformat__ = 'restructuredtext' def load_template(name): """Load a template from the script_templates directory Parameters ---------- name : str The name of the file to load Returns ------- template : string.Template """ full_fname = os.path.join(os.path.dirname(__file__), 'script_templates', name) template_file = open(full_fname) template = Template(template_file.read()) template_file.close() return template class Bunch(object): """Dictionary-like class that provides attribute-style access to it's items. A `Bunch` is a simple container that stores it's items as class attributes. Internally all items are stored in a dictionary and the class exposes several of the dictionary methods. Examples -------- >>> from nipype.interfaces.base import Bunch >>> inputs = Bunch(infile='subj.nii', fwhm=6.0, register_to_mean=True) >>> inputs Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=True) >>> inputs.register_to_mean = False >>> inputs Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=False) Notes ----- The Bunch pattern came from the Python Cookbook: .. [1] A. Martelli, D. Hudgeon, "Collecting a Bunch of Named Items", Python Cookbook, 2nd Ed, Chapter 4.18, 2005. """ def __init__(self, *args, **kwargs): self.__dict__.update(*args, **kwargs) def update(self, *args, **kwargs): """update existing attribute, or create new attribute Note: update is very much like HasTraits.set""" self.__dict__.update(*args, **kwargs) def items(self): """iterates over bunch attributes as key, value pairs""" return self.__dict__.items() def iteritems(self): """iterates over bunch attributes as key, value pairs""" warn('iteritems is deprecated, use items instead') return self.items() def get(self, *args): '''Support dictionary get() functionality ''' return self.__dict__.get(*args) def set(self, **kwargs): '''Support dictionary get() functionality ''' return self.__dict__.update(**kwargs) def dictcopy(self): """returns a deep copy of existing Bunch as a dictionary""" return deepcopy(self.__dict__) def __repr__(self): """representation of the sorted Bunch as a string Currently, this string representation of the `inputs` Bunch of interfaces is hashed to determine if the process' dirty-bit needs setting or not. Till that mechanism changes, only alter this after careful consideration. """ outstr = ['Bunch('] first = True for k, v in sorted(self.items()): if not first: outstr.append(', ') outstr.append('%s=%r' % (k, v)) first = False outstr.append(')') return ''.join(outstr) def _hash_infile(self, adict, key): # Inject file hashes into adict[key] stuff = adict[key] if not is_container(stuff): stuff = [stuff] file_list = [] for afile in stuff: if os.path.isfile(afile): md5obj = md5() fp = file(afile, 'rb') while True: data = fp.read(8192) if not data: break md5obj.update(data) fp.close() md5hex = md5obj.hexdigest() else: md5hex = None file_list.append((afile, md5hex)) return file_list def _get_bunch_hash(self): """Return a dictionary of our items with hashes for each file. Searches through dictionary items and if an item is a file, it calculates the md5 hash of the file contents and stores the file name and hash value as the new key value. However, the overall bunch hash is calculated only on the hash value of a file. The path and name of the file are not used in the overall hash calculation. Returns ------- dict_withhash : dict Copy of our dictionary with the new file hashes included with each file. hashvalue : str The md5 hash value of the `dict_withhash` """ infile_list = [] for key, val in self.items(): if is_container(val): # XXX - SG this probably doesn't catch numpy arrays # containing embedded file names either. if isinstance(val, dict): # XXX - SG should traverse dicts, but ignoring for now item = None else: if len(val) == 0: raise AttributeError('%s attribute is empty' % key) item = val[0] else: item = val try: if os.path.isfile(item): infile_list.append(key) except TypeError: # `item` is not a file or string. continue dict_withhash = self.dictcopy() dict_nofilename = self.dictcopy() for item in infile_list: dict_withhash[item] = self._hash_infile(dict_withhash, item) dict_nofilename[item] = [val[1] for val in dict_withhash[item]] # Sort the items of the dictionary, before hashing the string # representation so we get a predictable order of the # dictionary. sorted_dict = str(sorted(dict_nofilename.items())) return (dict_withhash, md5(sorted_dict).hexdigest()) def __pretty__(self, p, cycle): '''Support for the pretty module pretty is included in ipython.externals for ipython > 0.10''' if cycle: p.text('Bunch(...)') else: p.begin_group(6, 'Bunch(') first = True for k, v in sorted(self.items()): if not first: p.text(',') p.breakable() p.text(k + '=') p.pretty(v) first = False p.end_group(6, ')') class InterfaceResult(object): """Object that contains the results of running a particular Interface. Attributes ---------- version : version of this Interface result object (a readonly property) interface : class type A copy of the `Interface` class that was run to generate this result. inputs : a traits free representation of the inputs outputs : Bunch An `Interface` specific Bunch that contains all possible files that are generated by the interface. The `outputs` are used as the `inputs` to another node when interfaces are used in the pipeline. runtime : Bunch Contains attributes that describe the runtime environment when the `Interface` was run. Contains the attributes: * cmdline : The command line string that was executed * cwd : The directory the ``cmdline`` was executed in. * stdout : The output of running the ``cmdline``. * stderr : Any error messages output from running ``cmdline``. * returncode : The code returned from running the ``cmdline``. """ def __init__(self, interface, runtime, inputs=None, outputs=None, provenance=None): self._version = 2.0 self.interface = interface self.runtime = runtime self.inputs = inputs self.outputs = outputs self.provenance = provenance @property def version(self): return self._version class BaseTraitedSpec(traits.HasTraits): """Provide a few methods necessary to support nipype interface api The inputs attribute of interfaces call certain methods that are not available in traits.HasTraits. These are provided here. new metadata: * usedefault : set this to True if the default value of the trait should be used. Unless this is set, the attributes are set to traits.Undefined new attribute: * get_hashval : returns a tuple containing the state of the trait as a dict and hashvalue corresponding to dict. XXX Reconsider this in the long run, but it seems like the best solution to move forward on the refactoring. """ def __init__(self, **kwargs): """ Initialize handlers and inputs""" # NOTE: In python 2.6, object.__init__ no longer accepts input # arguments. HasTraits does not define an __init__ and # therefore these args were being ignored. #super(TraitedSpec, self).__init__(*args, **kwargs) super(BaseTraitedSpec, self).__init__(**kwargs) traits.push_exception_handler(reraise_exceptions=True) undefined_traits = {} for trait in self.copyable_trait_names(): if not self.traits()[trait].usedefault: undefined_traits[trait] = Undefined self.trait_set(trait_change_notify=False, **undefined_traits) self._generate_handlers() self.set(**kwargs) def items(self): """ Name, trait generator for user modifiable traits """ for name in sorted(self.copyable_trait_names()): yield name, self.traits()[name] def __repr__(self): """ Return a well-formatted representation of the traits """ outstr = [] for name, value in sorted(self.trait_get().items()): outstr.append('%s = %s' % (name, value)) return '\n' + '\n'.join(outstr) + '\n' def _generate_handlers(self): """Find all traits with the 'xor' metadata and attach an event handler to them. """ has_xor = dict(xor=lambda t: t is not None) xors = self.trait_names(**has_xor) for elem in xors: self.on_trait_change(self._xor_warn, elem) has_requires = dict(requires=lambda t: t is not None) requires = self.trait_names(**has_requires) for elem in requires: self.on_trait_change(self._requires_warn, elem) has_deprecation = dict(deprecated=lambda t: t is not None) deprecated = self.trait_names(**has_deprecation) for elem in deprecated: self.on_trait_change(self._deprecated_warn, elem) def _xor_warn(self, obj, name, old, new): """ Generates warnings for xor traits """ if isdefined(new): trait_spec = self.traits()[name] # for each xor, set to default_value for trait_name in trait_spec.xor: if trait_name == name: # skip ourself continue if isdefined(getattr(self, trait_name)): self.trait_set(trait_change_notify=False, **{'%s' % name: Undefined}) msg = ('Input "%s" is mutually exclusive with input "%s", ' 'which is already set') % (name, trait_name) raise IOError(msg) def _requires_warn(self, obj, name, old, new): """Part of the xor behavior """ if isdefined(new): trait_spec = self.traits()[name] msg = None for trait_name in trait_spec.requires: if not isdefined(getattr(self, trait_name)): if not msg: msg = 'Input %s requires inputs: %s' \ % (name, ', '.join(trait_spec.requires)) if msg: warn(msg) def _deprecated_warn(self, obj, name, old, new): """Checks if a user assigns a value to a deprecated trait """ if isdefined(new): trait_spec = self.traits()[name] msg1 = ('Input %s in interface %s is deprecated.' % (name, self.__class__.__name__.split('InputSpec')[0])) msg2 = ('Will be removed or raise an error as of release %s' % trait_spec.deprecated) if trait_spec.new_name: if trait_spec.new_name not in self.copyable_trait_names(): raise TraitError(msg1 + ' Replacement trait %s not found' % trait_spec.new_name) msg3 = 'It has been replaced by %s.' % trait_spec.new_name else: msg3 = '' msg = ' '.join((msg1, msg2, msg3)) if LooseVersion(str(trait_spec.deprecated)) < nipype_version: raise TraitError(msg) else: warn(msg) if trait_spec.new_name: warn('Unsetting %s and setting %s.' % (name, trait_spec.new_name)) self.trait_set(trait_change_notify=False, **{'%s' % name: Undefined, '%s' % trait_spec.new_name: new}) def _hash_infile(self, adict, key): """ Inject file hashes into adict[key]""" stuff = adict[key] if not is_container(stuff): stuff = [stuff] file_list = [] for afile in stuff: if is_container(afile): hashlist = self._hash_infile({'infiles': afile}, 'infiles') hash = [val[1] for val in hashlist] else: if config.get('execution', 'hash_method').lower() == 'timestamp': hash = hash_timestamp(afile) elif config.get('execution', 'hash_method').lower() == 'content': hash = hash_infile(afile) else: raise Exception("Unknown hash method: %s" % config.get('execution', 'hash_method')) file_list.append((afile, hash)) return file_list def get(self, **kwargs): """ Returns traited class as a dict Augments the trait get function to return a dictionary without notification handles """ out = super(BaseTraitedSpec, self).get(**kwargs) out = self._clean_container(out, Undefined) return out def get_traitsfree(self, **kwargs): """ Returns traited class as a dict Augments the trait get function to return a dictionary without any traits. The dictionary does not contain any attributes that were Undefined """ out = super(BaseTraitedSpec, self).get(**kwargs) out = self._clean_container(out, skipundefined=True) return out def _clean_container(self, object, undefinedval=None, skipundefined=False): """Convert a traited obejct into a pure python representation. """ if isinstance(object, TraitDictObject) or isinstance(object, dict): out = {} for key, val in object.items(): if isdefined(val): out[key] = self._clean_container(val, undefinedval) else: if not skipundefined: out[key] = undefinedval elif (isinstance(object, TraitListObject) or isinstance(object, list) or isinstance(object, tuple)): out = [] for val in object: if isdefined(val): out.append(self._clean_container(val, undefinedval)) else: if not skipundefined: out.append(undefinedval) else: out.append(None) if isinstance(object, tuple): out = tuple(out) else: if isdefined(object): out = object else: if not skipundefined: out = undefinedval return out def get_hashval(self, hash_method=None): """Return a dictionary of our items with hashes for each file. Searches through dictionary items and if an item is a file, it calculates the md5 hash of the file contents and stores the file name and hash value as the new key value. However, the overall bunch hash is calculated only on the hash value of a file. The path and name of the file are not used in the overall hash calculation. Returns ------- dict_withhash : dict Copy of our dictionary with the new file hashes included with each file. hashvalue : str The md5 hash value of the traited spec """ dict_withhash = {} dict_nofilename = {} for name, val in sorted(self.get().items()): if isdefined(val): trait = self.trait(name) if has_metadata(trait.trait_type, "nohash", True): continue hash_files = (not has_metadata(trait.trait_type, "hash_files", False) and not has_metadata(trait.trait_type, "name_source")) dict_nofilename[name] = \ self._get_sorteddict(val, hash_method=hash_method, hash_files=hash_files) dict_withhash[name] = \ self._get_sorteddict(val, True, hash_method=hash_method, hash_files=hash_files) return (dict_withhash, md5(str(dict_nofilename)).hexdigest()) def _get_sorteddict(self, object, dictwithhash=False, hash_method=None, hash_files=True): if isinstance(object, dict): out = {} for key, val in sorted(object.items()): if isdefined(val): out[key] = \ self._get_sorteddict(val, dictwithhash, hash_method=hash_method, hash_files=hash_files) elif isinstance(object, (list, tuple)): out = [] for val in object: if isdefined(val): out.append(self._get_sorteddict(val, dictwithhash, hash_method=hash_method, hash_files=hash_files)) if isinstance(object, tuple): out = tuple(out) else: if isdefined(object): if (hash_files and isinstance(object, str) and os.path.isfile(object)): if hash_method is None: hash_method = config.get('execution', 'hash_method') if hash_method.lower() == 'timestamp': hash = hash_timestamp(object) elif hash_method.lower() == 'content': hash = hash_infile(object) else: raise Exception("Unknown hash method: %s" % hash_method) if dictwithhash: out = (object, hash) else: out = hash elif isinstance(object, float): out = '%.10f' % object else: out = object return out class DynamicTraitedSpec(BaseTraitedSpec): """ A subclass to handle dynamic traits This class is a workaround for add_traits and clone_traits not functioning well together. """ def __deepcopy__(self, memo): """ bug in deepcopy for HasTraits results in weird cloning behavior for added traits """ id_self = id(self) if id_self in memo: return memo[id_self] dup_dict = deepcopy(self.get(), memo) # access all keys for key in self.copyable_trait_names(): _ = getattr(self, key) # clone once dup = self.clone_traits(memo=memo) for key in self.copyable_trait_names(): try: _ = getattr(dup, key) except: pass # clone twice dup = self.clone_traits(memo=memo) dup.set(**dup_dict) return dup class TraitedSpec(BaseTraitedSpec): """ Create a subclass with strict traits. This is used in 90% of the cases. """ _ = traits.Disallow class Interface(object): """This is an abstract definition for Interface objects. It provides no functionality. It defines the necessary attributes and methods all Interface objects should have. """ input_spec = None # A traited input specification output_spec = None # A traited output specification _can_resume = False # defines if the interface can reuse partial results # after interruption @property def can_resume(self): return self._can_resume _always_run = False # should the interface be always run even if the # inputs were not changed? @property def always_run(self): return self._always_run def __init__(self, **inputs): """Initialize command with given args and inputs.""" raise NotImplementedError @classmethod def help(cls): """ Prints class help""" raise NotImplementedError @classmethod def _inputs_help(cls): """ Prints inputs help""" raise NotImplementedError @classmethod def _outputs_help(cls): """ Prints outputs help""" raise NotImplementedError @classmethod def _outputs(cls): """ Initializes outputs""" raise NotImplementedError @property def version(self): raise NotImplementedError def run(self): """Execute the command.""" raise NotImplementedError def aggregate_outputs(self, runtime=None, needed_outputs=None): """Called to populate outputs""" raise NotImplementedError def _list_outputs(self): """ List expected outputs""" raise NotImplementedError def _get_filecopy_info(self): """ Provides information about file inputs to copy or link to cwd. Necessary for pipeline operation """ raise NotImplementedError class BaseInterfaceInputSpec(TraitedSpec): ignore_exception = traits.Bool(False, desc="Print an error message instead \ of throwing an exception in case the interface fails to run", usedefault=True, nohash=True) class BaseInterface(Interface): """Implements common interface functionality. Implements ---------- * Initializes inputs/outputs from input_spec/output_spec * Provides help based on input_spec and output_spec * Checks for mandatory inputs before running an interface * Runs an interface and returns results * Determines which inputs should be copied or linked to cwd This class does not implement aggregate_outputs, input_spec or output_spec. These should be defined by derived classes. This class cannot be instantiated. """ input_spec = BaseInterfaceInputSpec _version = None _additional_metadata = [] def __init__(self, **inputs): if not self.input_spec: raise Exception('No input_spec in class: %s' % self.__class__.__name__) self.inputs = self.input_spec(**inputs) @classmethod def help(cls, returnhelp=False): """ Prints class help """ if cls.__doc__: #docstring = cls.__doc__.split('\n') #docstring = [trim(line, '') for line in docstring] docstring = trim(cls.__doc__).split('\n') + [''] else: docstring = [''] allhelp = '\n'.join(docstring + cls._inputs_help() + [''] + cls._outputs_help() + ['']) if returnhelp: return allhelp else: print(allhelp) @classmethod def _get_trait_desc(self, inputs, name, spec): desc = spec.desc xor = spec.xor requires = spec.requires manhelpstr = ['\t%s' % name] try: setattr(inputs, name, None) except TraitError as excp: def_val = '' if getattr(spec, 'usedefault'): def_arg = getattr(spec, 'default_value')()[1] def_val = ', nipype default value: %s' % str(def_arg) line = "(%s%s)" % (excp.info, def_val) manhelpstr = wrap(line, 70, initial_indent=manhelpstr[0]+': ', subsequent_indent='\t\t ') if desc: for line in desc.split('\n'): line = re.sub("\s+", " ", line) manhelpstr += wrap(line, 70, initial_indent='\t\t', subsequent_indent='\t\t') if xor: line = '%s' % ', '.join(xor) manhelpstr += wrap(line, 70, initial_indent='\t\tmutually_exclusive: ', subsequent_indent='\t\t ') if requires: others = [field for field in requires if field != name] line = '%s' % ', '.join(others) manhelpstr += wrap(line, 70, initial_indent='\t\trequires: ', subsequent_indent='\t\t ') return manhelpstr @classmethod def _inputs_help(cls): """ Prints description for input parameters """ helpstr = ['Inputs::'] inputs = cls.input_spec() if len(inputs.traits(transient=None).items()) == 0: helpstr += ['', '\tNone'] return helpstr manhelpstr = ['', '\t[Mandatory]'] mandatory_items = inputs.traits(mandatory=True) for name, spec in sorted(mandatory_items.items()): manhelpstr += cls._get_trait_desc(inputs, name, spec) opthelpstr = ['', '\t[Optional]'] for name, spec in sorted(inputs.traits(transient=None).items()): if spec in mandatory_items: continue opthelpstr += cls._get_trait_desc(inputs, name, spec) if manhelpstr: helpstr += manhelpstr if opthelpstr: helpstr += opthelpstr return helpstr @classmethod def _outputs_help(cls): """ Prints description for output parameters """ helpstr = ['Outputs::', ''] if cls.output_spec: outputs = cls.output_spec() for name, spec in sorted(outputs.traits(transient=None).items()): helpstr += cls._get_trait_desc(outputs, name, spec) if len(helpstr) == 2: helpstr += ['\tNone'] return helpstr def _outputs(self): """ Returns a bunch containing output fields for the class """ outputs = None if self.output_spec: outputs = self.output_spec() return outputs @classmethod def _get_filecopy_info(cls): """ Provides information about file inputs to copy or link to cwd. Necessary for pipeline operation """ info = [] if cls.input_spec is None: return info metadata = dict(copyfile=lambda t: t is not None) for name, spec in sorted(cls.input_spec().traits(**metadata).items()): info.append(dict(key=name, copy=spec.copyfile)) return info def _check_requires(self, spec, name, value): """ check if required inputs are satisfied """ if spec.requires: values = [not isdefined(getattr(self.inputs, field)) for field in spec.requires] if any(values) and isdefined(value): msg = ("%s requires a value for input '%s' because one of %s " "is set. For a list of required inputs, see %s.help()" % (self.__class__.__name__, name, ', '.join(spec.requires), self.__class__.__name__)) raise ValueError(msg) def _check_xor(self, spec, name, value): """ check if mutually exclusive inputs are satisfied """ if spec.xor: values = [isdefined(getattr(self.inputs, field)) for field in spec.xor] if not any(values) and not isdefined(value): msg = ("%s requires a value for one of the inputs '%s'. " "For a list of required inputs, see %s.help()" % (self.__class__.__name__, ', '.join(spec.xor), self.__class__.__name__)) raise ValueError(msg) def _check_mandatory_inputs(self): """ Raises an exception if a mandatory input is Undefined """ for name, spec in self.inputs.traits(mandatory=True).items(): value = getattr(self.inputs, name) self._check_xor(spec, name, value) if not isdefined(value) and spec.xor is None: msg = ("%s requires a value for input '%s'. " "For a list of required inputs, see %s.help()" % (self.__class__.__name__, name, self.__class__.__name__)) raise ValueError(msg) if isdefined(value): self._check_requires(spec, name, value) for name, spec in self.inputs.traits(mandatory=None, transient=None).items(): self._check_requires(spec, name, getattr(self.inputs, name)) def _check_version_requirements(self, trait_object, raise_exception=True): """ Raises an exception on version mismatch """ unavailable_traits = [] version = LooseVersion(str(self.version)) if not version: return # check minimum version check = dict(min_ver=lambda t: t is not None) names = trait_object.trait_names(**check) for name in names: min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) if min_ver > version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue if raise_exception: raise Exception('Trait %s (%s) (version %s < required %s)' % (name, self.__class__.__name__, version, min_ver)) check = dict(max_ver=lambda t: t is not None) names = trait_object.trait_names(**check) for name in names: max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) if max_ver < version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue if raise_exception: raise Exception('Trait %s (%s) (version %s > required %s)' % (name, self.__class__.__name__, version, max_ver)) return unavailable_traits def _run_interface(self, runtime): """ Core function that executes interface """ raise NotImplementedError def run(self, **inputs): """Execute this interface. This interface will not raise an exception if runtime.returncode is non-zero. Parameters ---------- inputs : allows the interface settings to be updated Returns ------- results : an InterfaceResult object containing a copy of the instance that was executed, provenance information and, if successful, results """ self.inputs.set(**inputs) self._check_mandatory_inputs() self._check_version_requirements(self.inputs) interface = self.__class__ # initialize provenance tracking env = deepcopy(os.environ.data) runtime = Bunch(cwd=os.getcwd(), returncode=None, duration=None, environ=env, startTime=dt.isoformat(dt.utcnow()), endTime=None, platform=platform.platform(), hostname=getfqdn(), version=self.version) try: runtime = self._run_interface(runtime) outputs = self.aggregate_outputs(runtime) runtime.endTime = dt.isoformat(dt.utcnow()) timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) runtime.duration = timediff.days * 86400 + timediff.seconds + \ timediff.microseconds/100000. results = InterfaceResult(interface, runtime, inputs=self.inputs.get_traitsfree(), outputs=outputs) prov_record = None if str2bool(config.get('execution', 'write_provenance')): prov_record = write_provenance(results) results.provenance = prov_record except Exception, e: runtime.endTime = dt.isoformat(dt.utcnow()) timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) runtime.duration = timediff.days * 86400 + timediff.seconds + \ timediff.microseconds/100000. if len(e.args) == 0: e.args = ("") message = "\nInterface %s failed to run." % self.__class__.__name__ if config.has_option('logging', 'interface_level') and \ config.get('logging', 'interface_level').lower() == 'debug': inputs_str = "Inputs:" + str(self.inputs) + "\n" else: inputs_str = '' if len(e.args) == 1 and isinstance(e.args[0], str): e.args = (e.args[0] + " ".join([message, inputs_str]),) else: e.args += (message, ) if inputs_str != '': e.args += (inputs_str, ) #exception raising inhibition for special cases import traceback runtime.traceback = traceback.format_exc() runtime.traceback_args = e.args inputs = None try: inputs = self.inputs.get_traitsfree() except Exception, e: pass results = InterfaceResult(interface, runtime, inputs=inputs) prov_record = None if str2bool(config.get('execution', 'write_provenance')): try: prov_record = write_provenance(results) except Exception: prov_record = None results.provenance = prov_record if hasattr(self.inputs, 'ignore_exception') and \ isdefined(self.inputs.ignore_exception) and \ self.inputs.ignore_exception: pass else: raise return results def _list_outputs(self): """ List the expected outputs """ if self.output_spec: raise NotImplementedError else: return None def aggregate_outputs(self, runtime=None, needed_outputs=None): """ Collate expected outputs and check for existence """ predicted_outputs = self._list_outputs() outputs = self._outputs() if predicted_outputs: _unavailable_outputs = [] if outputs: _unavailable_outputs = \ self._check_version_requirements(self._outputs()) for key, val in predicted_outputs.items(): if needed_outputs and key not in needed_outputs: continue if key in _unavailable_outputs: raise KeyError(('Output trait %s not available in version ' '%s of interface %s. Please inform ' 'developers.') % (key, self.version, self.__class__.__name__)) try: setattr(outputs, key, val) _ = getattr(outputs, key) except TraitError, error: if hasattr(error, 'info') and \ error.info.startswith("an existing"): msg = ("File/Directory '%s' not found for %s output " "'%s'." % (val, self.__class__.__name__, key)) raise FileNotFoundError(msg) else: raise error return outputs @property def version(self): if self._version is None: if str2bool(config.get('execution', 'stop_on_unknown_version')): raise ValueError('Interface %s has no version information' % self.__class__.__name__) return self._version class Stream(object): """Function to capture stdout and stderr streams with timestamps stackoverflow.com/questions/4984549/merge-and-sync-stdout-and-stderr/5188359 """ def __init__(self, name, impl): self._name = name self._impl = impl self._buf = '' self._rows = [] self._lastidx = 0 def fileno(self): "Pass-through for file descriptor." return self._impl.fileno() def read(self, drain=0): "Read from the file descriptor. If 'drain' set, read until EOF." while self._read(drain) is not None: if not drain: break def _read(self, drain): "Read from the file descriptor" fd = self.fileno() buf = os.read(fd, 4096) if not buf and not self._buf: return None if '\n' not in buf: if not drain: self._buf += buf return [] # prepend any data previously read, then split into lines and format buf = self._buf + buf if '\n' in buf: tmp, rest = buf.rsplit('\n', 1) else: tmp = buf rest = None self._buf = rest now = datetime.datetime.now().isoformat() rows = tmp.split('\n') self._rows += [(now, '%s %s:%s' % (self._name, now, r), r) for r in rows] for idx in range(self._lastidx, len(self._rows)): iflogger.info(self._rows[idx][1]) self._lastidx = len(self._rows) def run_command(runtime, output=None, timeout=0.01): """Run a command, read stdout and stderr, prefix with timestamp. The returned runtime contains a merged stdout+stderr log with timestamps """ PIPE = subprocess.PIPE proc = subprocess.Popen(runtime.cmdline, stdout=PIPE, stderr=PIPE, shell=True, cwd=runtime.cwd, env=runtime.environ) result = {} errfile = os.path.join(runtime.cwd, 'stderr.nipype') outfile = os.path.join(runtime.cwd, 'stdout.nipype') if output == 'stream': streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] def _process(drain=0): try: res = select.select(streams, [], [], timeout) except select.error, e: iflogger.info(str(e)) if e[0] == errno.EINTR: return else: raise else: for stream in res[0]: stream.read(drain) while proc.returncode is None: proc.poll() _process() _process(drain=1) # collect results, merge and return result = {} temp = [] for stream in streams: rows = stream._rows temp += rows result[stream._name] = [r[2] for r in rows] temp.sort() result['merged'] = [r[1] for r in temp] if output == 'allatonce': stdout, stderr = proc.communicate() result['stdout'] = stdout.split('\n') result['stderr'] = stderr.split('\n') result['merged'] = '' if output == 'file': stderr = open(errfile, 'wt') stdout = open(outfile, 'wt') proc = subprocess.Popen(runtime.cmdline, stdout=stdout, stderr=stderr, shell=True, cwd=runtime.cwd, env=runtime.environ) ret_code = proc.wait() stderr.flush() stdout.flush() result['stdout'] = [line.strip() for line in open(outfile).readlines()] result['stderr'] = [line.strip() for line in open(errfile).readlines()] result['merged'] = '' if output == 'none': proc.communicate() result['stdout'] = [] result['stderr'] = [] result['merged'] = '' runtime.stderr = '\n'.join(result['stderr']) runtime.stdout = '\n'.join(result['stdout']) runtime.merged = result['merged'] runtime.returncode = proc.returncode return runtime def get_dependencies(name, environ): """Return library dependencies of a dynamically linked executable Uses otool on darwin, ldd on linux. Currently doesn't support windows. """ PIPE = subprocess.PIPE if sys.platform == 'darwin': proc = subprocess.Popen('otool -L `which %s`' % name, stdout=PIPE, stderr=PIPE, shell=True, env=environ) elif 'linux' in sys.platform: proc = subprocess.Popen('ldd `which %s`' % name, stdout=PIPE, stderr=PIPE, shell=True, env=environ) else: return 'Platform %s not supported' % sys.platform o, e = proc.communicate() return o.rstrip() class CommandLineInputSpec(BaseInterfaceInputSpec): args = traits.Str(argstr='%s', desc='Additional parameters to the command') environ = traits.DictStrStr(desc='Environment variables', usedefault=True, nohash=True) terminal_output = traits.Enum('stream', 'allatonce', 'file', 'none', desc=('Control terminal output: `stream` - ' 'displays to terminal immediately, ' '`allatonce` - waits till command is ' 'finished to display output, `file` - ' 'writes output to file, `none` - output' ' is ignored'), nohash=True, mandatory=True) class CommandLine(BaseInterface): """Implements functionality to interact with command line programs class must be instantiated with a command argument Parameters ---------- command : string define base immutable `command` you wish to run args : string, optional optional arguments passed to base `command` Examples -------- >>> from nipype.interfaces.base import CommandLine >>> cli = CommandLine(command='ls', environ={'DISPLAY': ':1'}) >>> cli.inputs.args = '-al' >>> cli.cmdline 'ls -al' >>> cli.inputs.trait_get() # doctest: +NORMALIZE_WHITESPACE {'ignore_exception': False, 'terminal_output': 'stream', 'environ': {'DISPLAY': ':1'}, 'args': '-al'} >>> cli.inputs.get_hashval() ({'args': '-al'}, 'a2f45e04a34630c5f33a75ea2a533cdd') """ input_spec = CommandLineInputSpec _cmd = None _version = None _terminal_output = 'stream' def __init__(self, command=None, **inputs): super(CommandLine, self).__init__(**inputs) self._environ = None if not hasattr(self, '_cmd'): self._cmd = None if self.cmd is None and command is None: raise Exception("Missing command") if command: self._cmd = command self.inputs.on_trait_change(self._terminal_output_update, 'terminal_output') if not isdefined(self.inputs.terminal_output): self.inputs.terminal_output = self._terminal_output else: self._terminal_output_update() def _terminal_output_update(self): self._terminal_output = self.inputs.terminal_output @classmethod def set_default_terminal_output(cls, output_type): """Set the default output type for FSL classes. This method is used to set the default output type for all fSL subclasses. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.output_type. """ if output_type in ['stream', 'allatonce', 'file', 'none']: cls._terminal_output = output_type else: raise AttributeError('Invalid terminal output_type: %s' % output_type) @property def cmd(self): """sets base command, immutable""" return self._cmd @property def cmdline(self): """ `command` plus any arguments (args) validates arguments and generates command line""" self._check_mandatory_inputs() allargs = self._parse_inputs() allargs.insert(0, self.cmd) return ' '.join(allargs) def raise_exception(self, runtime): message = "Command:\n" + runtime.cmdline + "\n" message += "Standard output:\n" + runtime.stdout + "\n" message += "Standard error:\n" + runtime.stderr + "\n" message += "Return code: " + str(runtime.returncode) raise RuntimeError(message) @classmethod def help(cls, returnhelp=False): allhelp = super(CommandLine, cls).help(returnhelp=True) allhelp = "Wraps command **%s**\n\n" % cls._cmd + allhelp if returnhelp: return allhelp else: print(allhelp) def _get_environ(self): out_environ = {} try: display_var = config.get('execution', 'display_variable') out_environ = {'DISPLAY': display_var} except NoOptionError: pass iflogger.debug(out_environ) if isdefined(self.inputs.environ): out_environ.update(self.inputs.environ) return out_environ def version_from_command(self, flag='-v'): cmdname = self.cmd.split()[0] if self._exists_in_path(cmdname): env = deepcopy(os.environ.data) out_environ = self._get_environ() env.update(out_environ) proc = subprocess.Popen(' '.join((cmdname, flag)), shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) o, e = proc.communicate() return o def _run_interface(self, runtime, correct_return_codes=[0]): """Execute command via subprocess Parameters ---------- runtime : passed by the run function Returns ------- runtime : updated runtime information adds stdout, stderr, merged, cmdline, dependencies, command_path """ setattr(runtime, 'stdout', None) setattr(runtime, 'stderr', None) setattr(runtime, 'cmdline', self.cmdline) out_environ = self._get_environ() runtime.environ.update(out_environ) executable_name = self.cmd.split()[0] exist_val, cmd_path = self._exists_in_path(executable_name, runtime.environ) if not exist_val: raise IOError("%s could not be found on host %s" % (self.cmd.split()[0], runtime.hostname)) setattr(runtime, 'command_path', cmd_path) setattr(runtime, 'dependencies', get_dependencies(executable_name, runtime.environ)) runtime = run_command(runtime, output=self.inputs.terminal_output) if runtime.returncode is None or \ runtime.returncode not in correct_return_codes: self.raise_exception(runtime) return runtime def _exists_in_path(self, cmd, environ): ''' Based on a code snippet from http://orip.org/2009/08/python-checking-if-executable-exists-in.html ''' if 'PATH' in environ: input_environ = environ.get("PATH") else: input_environ = os.environ.get("PATH", "") extensions = os.environ.get("PATHEXT", "").split(os.pathsep) for directory in input_environ.split(os.pathsep): base = os.path.join(directory, cmd) options = [base] + [(base + ext) for ext in extensions] for filename in options: if os.path.exists(filename): return True, filename return False, None def _format_arg(self, name, trait_spec, value): """A helper function for _parse_inputs Formats a trait containing argstr metadata """ argstr = trait_spec.argstr iflogger.debug('%s_%s' % (name, str(value))) if trait_spec.is_trait_type(traits.Bool) and "%" not in argstr: if value: # Boolean options have no format string. Just append options # if True. return argstr else: return None # traits.Either turns into traits.TraitCompound and does not have any # inner_traits elif trait_spec.is_trait_type(traits.List) \ or (trait_spec.is_trait_type(traits.TraitCompound) and isinstance(value, list)): # This is a bit simple-minded at present, and should be # construed as the default. If more sophisticated behavior # is needed, it can be accomplished with metadata (e.g. # format string for list member str'ification, specifying # the separator, etc.) # Depending on whether we stick with traitlets, and whether or # not we beef up traitlets.List, we may want to put some # type-checking code here as well sep = trait_spec.sep if sep is None: sep = ' ' if argstr.endswith('...'): # repeatable option # --id %d... will expand to # --id 1 --id 2 --id 3 etc.,. argstr = argstr.replace('...', '') return sep.join([argstr % elt for elt in value]) else: return argstr % sep.join(str(elt) for elt in value) else: # Append options using format string. return argstr % value def _filename_from_source(self, name): trait_spec = self.inputs.trait(name) retval = getattr(self.inputs, name) if not isdefined(retval) or "%s" in retval: if not trait_spec.name_source: return retval if isdefined(retval) and "%s" in retval: name_template = retval else: name_template = trait_spec.name_template if not name_template: name_template = "%s_generated" if isinstance(trait_spec.name_source, list): for ns in trait_spec.name_source: if isdefined(getattr(self.inputs, ns)): name_source = ns break else: name_source = trait_spec.name_source source = getattr(self.inputs, name_source) while isinstance(source, list): source = source[0] _, base, _ = split_filename(source) retval = name_template % base _, _, ext = split_filename(retval) if trait_spec.keep_extension and ext: return retval return self._overload_extension(retval) return retval def _gen_filename(self, name): raise NotImplementedError def _overload_extension(self, value): return value def _list_outputs(self): metadata = dict(name_source=lambda t: t is not None) traits = self.inputs.traits(**metadata) if traits: outputs = self.output_spec().get() for name, trait_spec in traits.iteritems(): out_name = name if trait_spec.output_name != None: out_name = trait_spec.output_name outputs[out_name] = \ os.path.abspath(self._filename_from_source(name)) return outputs def _parse_inputs(self, skip=None): """Parse all inputs using the ``argstr`` format string in the Trait. Any inputs that are assigned (not the default_value) are formatted to be added to the command line. Returns ------- all_args : list A list of all inputs formatted for the command line. """ all_args = [] initial_args = {} final_args = {} metadata = dict(argstr=lambda t: t is not None) for name, spec in sorted(self.inputs.traits(**metadata).items()): if skip and name in skip: continue value = getattr(self.inputs, name) if spec.genfile or spec.name_source: value = self._filename_from_source(name) if not isdefined(value): value = self._gen_filename(name) if not isdefined(value): continue arg = self._format_arg(name, spec, value) if arg is None: continue pos = spec.position if pos is not None: if pos >= 0: initial_args[pos] = arg else: final_args[pos] = arg else: all_args.append(arg) first_args = [arg for pos, arg in sorted(initial_args.items())] last_args = [arg for pos, arg in sorted(final_args.items())] return first_args + all_args + last_args class StdOutCommandLineInputSpec(CommandLineInputSpec): out_file = File(argstr="> %s", position=-1, genfile=True) class StdOutCommandLine(CommandLine): input_spec = StdOutCommandLineInputSpec def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): raise NotImplementedError class MpiCommandLineInputSpec(CommandLineInputSpec): use_mpi = traits.Bool(False, desc="Whether or not to run the command with mpiexec", usedefault=True) n_procs = traits.Int(desc="Num processors to specify to mpiexec. Do not " "specify if this is managed externally (e.g. through " "SGE)") class MpiCommandLine(CommandLine): '''Implements functionality to interact with command line programs that can be run with MPI (i.e. using 'mpiexec'). Examples -------- >>> from nipype.interfaces.base import MpiCommandLine >>> mpi_cli = MpiCommandLine(command='my_mpi_prog') >>> mpi_cli.inputs.args = '-v' >>> mpi_cli.cmdline 'my_mpi_prog -v' >>> mpi_cli.inputs.use_mpi = True >>> mpi_cli.inputs.n_procs = 8 >>> mpi_cli.cmdline 'mpiexec -n 8 my_mpi_prog -v' ''' input_spec = MpiCommandLineInputSpec @property def cmdline(self): """Adds 'mpiexec' to begining of command""" result = [] if self.inputs.use_mpi: result.append('mpiexec') if self.inputs.n_procs: result.append('-n %d' % self.inputs.n_procs) result.append(super(MpiCommandLine, self).cmdline) return ' '.join(result) class SEMLikeCommandLine(CommandLine): """In SEM derived interface all outputs have corresponding inputs. However, some SEM commands create outputs that are not defined in the XML. In those cases one has to create a subclass of the autogenerated one and overload the _list_outputs method. _outputs_from_inputs should still be used but only for the reduced (by excluding those that do not have corresponding inputs list of outputs. """ def _list_outputs(self): outputs = self.output_spec().get() return self._outputs_from_inputs(outputs) def _outputs_from_inputs(self, outputs): for name in outputs.keys(): corresponding_input = getattr(self.inputs, name) if isdefined(corresponding_input): if (isinstance(corresponding_input, bool) and corresponding_input): outputs[name] = \ os.path.abspath(self._outputs_filenames[name]) else: if isinstance(corresponding_input, list): outputs[name] = [os.path.abspath(inp) for inp in corresponding_input] else: outputs[name] = os.path.abspath(corresponding_input) return outputs def _format_arg(self, name, spec, value): if name in self._outputs_filenames.keys(): if isinstance(value, bool): if value: value = os.path.abspath(self._outputs_filenames[name]) else: return "" return super(SEMLikeCommandLine, self)._format_arg(name, spec, value) class MultiPath(traits.List): """ Abstract class - shared functionality of input and output MultiPath """ def validate(self, object, name, value): if not isdefined(value) or \ (isinstance(value, list) and len(value) == 0): return Undefined newvalue = value if not isinstance(value, list) \ or (self.inner_traits() and isinstance(self.inner_traits()[0].trait_type, traits.List) and not isinstance(self.inner_traits()[0].trait_type, InputMultiPath) and isinstance(value, list) and value and not isinstance(value[0], list)): newvalue = [value] value = super(MultiPath, self).validate(object, name, newvalue) if len(value) > 0: return value self.error(object, name, value) class OutputMultiPath(MultiPath): """ Implements a user friendly traits that accepts one or more paths to files or directories. This is the output version which return a single string whenever possible (when it was set to a single value or a list of length 1). Default value of this trait is _Undefined. It does not accept empty lists. XXX This should only be used as a final resort. We should stick to established Traits to the extent possible. XXX This needs to be vetted by somebody who understands traits >>> from nipype.interfaces.base import OutputMultiPath >>> class A(TraitedSpec): ... foo = OutputMultiPath(File(exists=False)) >>> a = A() >>> a.foo >>> a.foo = '/software/temp/foo.txt' >>> a.foo '/software/temp/foo.txt' >>> a.foo = ['/software/temp/foo.txt'] >>> a.foo '/software/temp/foo.txt' >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] >>> a.foo ['/software/temp/foo.txt', '/software/temp/goo.txt'] """ def get(self, object, name): value = self.get_value(object, name) if len(value) == 0: return Undefined elif len(value) == 1: return value[0] else: return value def set(self, object, name, value): self.set_value(object, name, value) class InputMultiPath(MultiPath): """ Implements a user friendly traits that accepts one or more paths to files or directories. This is the input version which always returns a list. Default value of this trait is _Undefined. It does not accept empty lists. XXX This should only be used as a final resort. We should stick to established Traits to the extent possible. XXX This needs to be vetted by somebody who understands traits >>> from nipype.interfaces.base import InputMultiPath >>> class A(TraitedSpec): ... foo = InputMultiPath(File(exists=False)) >>> a = A() >>> a.foo >>> a.foo = '/software/temp/foo.txt' >>> a.foo ['/software/temp/foo.txt'] >>> a.foo = ['/software/temp/foo.txt'] >>> a.foo ['/software/temp/foo.txt'] >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] >>> a.foo ['/software/temp/foo.txt', '/software/temp/goo.txt'] """ pass nipype-0.9.2/nipype/interfaces/c3.py000066400000000000000000000031011227300005300173170ustar00rootroot00000000000000"""The ants module provides basic functions for interfacing with ants functions. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import (CommandLineInputSpec, traits, TraitedSpec, File, SEMLikeCommandLine) class C3dAffineToolInputSpec(CommandLineInputSpec): reference_file = File(exists=True, argstr="-ref %s", position=1) source_file = File(exists=True, argstr='-src %s', position=2) transform_file = File(exists=True, argstr='%s', position=3) itk_transform = traits.Either(traits.Bool, File(), hash_files=False, desc="Export ITK transform.", argstr="-oitk %s", position=5) fsl2ras = traits.Bool(argstr='-fsl2ras', position=4) class C3dAffineToolOutputSpec(TraitedSpec): itk_transform = File(exists=True) class C3dAffineTool(SEMLikeCommandLine): """Converts fsl-style Affine registration into ANTS compatible itk format Example ======= >>> from nipype.interfaces.c3 import C3dAffineTool >>> c3 = C3dAffineTool() >>> c3.inputs.source_file = 'cmatrix.mat' >>> c3.inputs.itk_transform = 'affine.txt' >>> c3.inputs.fsl2ras = True >>> c3.cmdline 'c3d_affine_tool -src cmatrix.mat -fsl2ras -oitk affine.txt' """ input_spec = C3dAffineToolInputSpec output_spec = C3dAffineToolOutputSpec _cmd = 'c3d_affine_tool' _outputs_filenames = {'itk_transform': 'affine.txt'} nipype-0.9.2/nipype/interfaces/camino/000077500000000000000000000000001227300005300177135ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/camino/__init__.py000066400000000000000000000013231227300005300220230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Camino top level namespace """ from .connectivity import Conmat from .convert import (Image2Voxel, FSL2Scheme, VtkStreamlines, ProcStreamlines, TractShredder, DT2NIfTI, NIfTIDT2Camino, AnalyzeHeader) from .dti import (DTIFit, ModelFit, DTLUTGen, PicoPDFs, Track, TrackPICo, TrackBayesDirac, TrackDT, TrackBallStick, TrackBootstrap, ComputeFractionalAnisotropy, ComputeMeanDiffusivity, ComputeTensorTrace, ComputeEigensystem, DTMetric) from .calib import (SFPICOCalibData, SFLUTGen) from .odf import (QBallMX, LinRecon, SFPeaks) nipype-0.9.2/nipype/interfaces/camino/calib.py000066400000000000000000000275721227300005300213540ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from nipype.interfaces.base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, File, StdOutCommandLine, StdOutCommandLineInputSpec, isdefined) from nipype.utils.filemanip import split_filename class SFPICOCalibDataInputSpec(StdOutCommandLineInputSpec): snr = traits.Float(argstr='-snr %f', units='NA', desc=('Specifies the signal-to-noise ratio of the ' 'non-diffusion-weighted measurements to use in simulations.')) scheme_file = File(exists=True, argstr='-schemefile %s', mandatory=True, desc='Specifies the scheme file for the diffusion MRI data') info_file = File(desc='The name to be given to the information output filename.', argstr='-infooutputfile %s', mandatory=True, genfile=True, hash_files=False) # Genfile and hash_files? trace = traits.Float(argstr='-trace %f', units='NA', desc='Trace of the diffusion tensor(s) used in the test function.') onedtfarange = traits.List(traits.Float, argstr='-onedtfarange %s', minlen=2, maxlen=2, units='NA', desc=('Minimum and maximum FA for the single tensor ' 'synthetic data.')) onedtfastep = traits.Float(argstr='-onedtfastep %f', units='NA', desc=('FA step size controlling how many steps there are ' 'between the minimum and maximum FA settings.')) twodtfarange = traits.List(traits.Float, argstr='-twodtfarange %s', minlen=2, maxlen=2, units='NA', desc=('Minimum and maximum FA for the two tensor ' 'synthetic data. FA is varied for both tensors ' 'to give all the different permutations.')) twodtfastep = traits.Float(argstr='-twodtfastep %f', units='NA', desc=('FA step size controlling how many steps there are ' 'between the minimum and maximum FA settings ' 'for the two tensor cases.')) twodtanglerange = traits.List(traits.Float, argstr='-twodtanglerange %s', minlen=2, maxlen=2, units='NA', desc=('Minimum and maximum crossing angles ' 'between the two fibres.')) twodtanglestep = traits.Float(argstr='-twodtanglestep %f', units='NA', desc=('Angle step size controlling how many steps there are ' 'between the minimum and maximum crossing angles for ' 'the two tensor cases.')) twodtmixmax = traits.Float(argstr='-twodtmixmax %f', units='NA', desc=('Mixing parameter controlling the proportion of one fibre population ' 'to the other. The minimum mixing parameter is (1 - twodtmixmax).')) twodtmixstep = traits.Float(argstr='-twodtmixstep %f', units='NA', desc=('Mixing parameter step size for the two tensor cases. ' 'Specify how many mixing parameter increments to use.')) seed = traits.Float(argstr='-seed %f', units='NA', desc='Specifies the random seed to use for noise generation in simulation trials.') class SFPICOCalibDataOutputSpec(TraitedSpec): PICOCalib = File(exists=True, desc='Calibration dataset') calib_info = File(exists=True, desc='Calibration dataset') class SFPICOCalibData(StdOutCommandLine): """ Generates Spherical Function PICo Calibration Data. SFPICOCalibData creates synthetic data for use with SFLUTGen. The synthetic data is generated using a mixture of gaussians, in the same way datasynth generates data. Each voxel of data models a slightly different fibre configuration (varying FA and fibre- crossings) and undergoes a random rotation to help account for any directional bias in the chosen acquisition scheme. A second file, which stores information about the datafile, is generated along with the datafile. Example 1 --------- To create a calibration dataset using the default settings >>> import nipype.interfaces.camino as cam >>> calib = cam.SFPICOCalibData() >>> calib.inputs.scheme_file = 'A.scheme' >>> calib.inputs.snr = 20 >>> calib.inputs.info_file = 'PICO_calib.info' >>> calib.run() # doctest: +SKIP The default settings create a large dataset (249,231 voxels), of which 3401 voxels contain a single fibre population per voxel and the rest of the voxels contain two fibre-populations. The amount of data produced can be varied by specifying the ranges and steps of the parameters for both the one and two fibre datasets used. Example 2 --------- To create a custom calibration dataset >>> import nipype.interfaces.camino as cam >>> calib = cam.SFPICOCalibData() >>> calib.inputs.scheme_file = 'A.scheme' >>> calib.inputs.snr = 20 >>> calib.inputs.info_file = 'PICO_calib.info' >>> calib.inputs.twodtfarange = [0.3, 0.9] >>> calib.inputs.twodtfastep = 0.02 >>> calib.inputs.twodtanglerange = [0, 0.785] >>> calib.inputs.twodtanglestep = 0.03925 >>> calib.inputs.twodtmixmax = 0.8 >>> calib.inputs.twodtmixstep = 0.1 >>> calib.run() # doctest: +SKIP This would provide 76,313 voxels of synthetic data, where 3401 voxels simulate the one fibre cases and 72,912 voxels simulate the various two fibre cases. However, care should be taken to ensure that enough data is generated for calculating the LUT. # doctest: +SKIP """ _cmd = 'sfpicocalibdata' input_spec=SFPICOCalibDataInputSpec output_spec=SFPICOCalibDataOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['PICOCalib'] = os.path.abspath(self._gen_outfilename()) outputs['calib_info'] = os.path.abspath(self.inputs.info_file) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.scheme_file) return name + '_PICOCalib.Bfloat' class SFLUTGenInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, desc='Voxel-order data of the spherical functions peaks.') info_file = File(argstr='-infofile %s', mandatory=True, desc=('The Info file that corresponds to the calibration ' 'datafile used in the reconstruction.')) outputstem = traits.Str('LUT', argstr='-outputstem %s', desc=('Define the name of the generated luts. The form of the filenames will be ' '[outputstem]_oneFibreSurfaceCoeffs.Bdouble and ' '[outputstem]_twoFibreSurfaceCoeffs.Bdouble'), usedefault=True) pdf = traits.Enum('bingham', 'watson', argstr='-pdf %s', desc=('Sets the distribution to use for the calibration. The default is the Bingham ' 'distribution, which allows elliptical probability density contours. ' 'Currently supported options are: ' ' bingham - The Bingham distribution, which allows elliptical probability ' ' density contours. ' ' watson - The Watson distribution. This distribution is rotationally symmetric.'), usedefault=True) binincsize = traits.Int(argstr='-binincsize %d', units='NA', desc=('Sets the size of the bins. In the case of 2D histograms such as the ' 'Bingham, the bins are always square. Default is 1.')) minvectsperbin = traits.Int(argstr='-minvectsperbin %d', units='NA', desc=('Specifies the minimum number of fibre-orientation estimates a bin ' 'must contain before it is used in the lut line/surface generation. ' 'Default is 50. If you get the error "no fibre-orientation estimates ' 'in histogram!", the calibration data set is too small to get enough ' 'samples in any of the histogram bins. You can decrease the minimum ' 'number per bin to get things running in quick tests, but the sta- ' 'tistics will not be reliable and for serious applications, you need ' 'to increase the size of the calibration data set until the error goes.')) directmap = traits.Bool(argstr='-directmap', desc=('Use direct mapping between the eigenvalues and the distribution parameters ' 'instead of the log of the eigenvalues.')) order = traits.Int(argstr='-order %d', units='NA', desc=('The order of the polynomial fitting the surface. Order 1 is linear. ' 'Order 2 (default) is quadratic.')) class SFLUTGenOutputSpec(TraitedSpec): lut_one_fibre = File(exists=True, desc='PICo lut for one-fibre model') lut_two_fibres = File(exists=True, desc='PICo lut for two-fibre model') class SFLUTGen(StdOutCommandLine): """ Generates PICo lookup tables (LUT) for multi-fibre methods such as PASMRI and Q-Ball. SFLUTGen creates the lookup tables for the generalized multi-fibre implementation of the PICo tractography algorithm. The outputs of this utility are either surface or line coefficients up to a given order. The calibration can be performed for different distributions, such as the Bingham and Watson distributions. This utility uses calibration data generated from SFPICOCalibData and peak information created by SFPeaks. The utility outputs two lut's, *_oneFibreSurfaceCoeffs.Bdouble and *_twoFibreSurfaceCoeffs.Bdouble. Each of these files contains big- endian doubles as standard. The format of the output is: dimensions (1 for Watson, 2 for Bingham) order (the order of the polynomial) coefficient_1 coefficient_2 ... coefficient_N In the case of the Watson, there is a single set of coefficients, which are ordered: constant, x, x^2, ..., x^order. In the case of the Bingham, there are two sets of coefficients (one for each surface), ordered so that: for j = 1 to order for k = 1 to order coeff_i = x^j * y^k where j+k < order Example --------- To create a calibration dataset using the default settings >>> import nipype.interfaces.camino as cam >>> lutgen = cam.SFLUTGen() >>> lutgen.inputs.in_file = 'QSH_peaks.Bdouble' >>> lutgen.inputs.info_file = 'PICO_calib.info' >>> lutgen.run() # doctest: +SKIP """ _cmd = 'sflutgen' input_spec=SFLUTGenInputSpec output_spec=SFLUTGenOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['lut_one_fibre'] = self.inputs.outputstem + '_oneFibreSurfaceCoeffs.Bdouble' outputs['lut_two_fibres'] = self.inputs.outputstem + '_twoFibreSurfaceCoeffs.Bdouble' return outputs def _gen_outfilename(self): return '/dev/null' nipype-0.9.2/nipype/interfaces/camino/connectivity.py000066400000000000000000000142461227300005300230120ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import (traits, TraitedSpec, File, CommandLine, CommandLineInputSpec, isdefined) from nipype.utils.filemanip import split_filename import os class ConmatInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1, desc='Streamlines as generated by the Track interface') target_file = File(exists=True, argstr='-targetfile %s', mandatory=True, position=2, desc='An image containing targets, as used in ProcStreamlines interface.') scalar_file = File(exists=True, argstr='-scalarfile %s', position=3, desc=('Optional scalar file for computing tract-based statistics. ' 'Must be in the same space as the target file.'), requires=['tract_stat']) targetname_file = File(exists=True, argstr='-targetnamefile %s', position=4, desc=('Optional names of targets. This file should contain one entry per line, ' 'with the target intensity followed by the name, separated by white space. ' 'For example: ' ' 1 some_brain_region ' ' 2 some_other_region ' 'These names will be used in the output. The names themselves should not ' 'contain spaces or commas. The labels may be in any order but the output ' 'matrices will be ordered by label intensity.')) tract_stat = traits.Enum("mean", "min", "max", "sum", "median", "var", argstr='-tractstat %s', units='NA', desc=("Tract statistic to use. See TractStats for other options."), requires=['scalar_file']) output_root = File(argstr='-outputroot %s', genfile=True, desc=('filename root prepended onto the names of the output files. ' 'The extension will be determined from the input.')) class ConmatOutputSpec(TraitedSpec): conmat_sc = File(exists=True, desc='Connectivity matrix in CSV file.') conmat_ts = File(desc='Tract statistics in CSV file.') class Conmat(CommandLine): """ Creates a connectivity matrix using a 3D label image (the target image) and a set of streamlines. The connectivity matrix records how many stream- lines connect each pair of targets, and optionally the mean tractwise statistic (eg tract-averaged FA, or length). The output is a comma separated variable file or files. The first row of the output matrix is label names. Label names may be defined by the user, otherwise they are assigned based on label intensity. Starting from the seed point, we move along the streamline until we find a point in a labeled region. This is done in both directions from the seed point. Streamlines are counted if they connect two target regions, one on either side of the seed point. Only the labeled region closest to the seed is counted, for example if the input contains two streamlines: 1: A-----B------SEED---C 2: A--------SEED----------- then the output would be A,B,C 0,0,0 0,0,1 0,1,0 There are zero connections to A because in streamline 1, the connection to B is closer to the seed than the connection to A, and in streamline 2 there is no region reached in the other direction. The connected target regions can have the same label, as long as the seed point is outside of the labeled region and both ends connect to the same label (which may be in different locations). Therefore this is allowed: A------SEED-------A Such fibers will add to the diagonal elements of the matrix. To remove these entries, run procstreamlines with -endpointfile before running conmat. If the seed point is inside a labled region, it counts as one end of the connection. So ----[SEED inside A]---------B counts as a connection between A and B, while C----[SEED inside A]---------B counts as a connection between A and C, because C is closer to the seed point. In all cases, distance to the seed point is defined along the streamline path. Example 1 --------- To create a standard connectivity matrix based on streamline counts. >>> import nipype.interfaces.camino as cam >>> conmat = cam.Conmat() >>> conmat.inputs.in_file = 'tracts.Bdouble' >>> conmat.inputs.target_file = 'atlas.nii.gz' >>> conmat.run() # doctest: +SKIP Example 1 --------- To create a standard connectivity matrix and mean tractwise FA statistics. >>> import nipype.interfaces.camino as cam >>> conmat = cam.Conmat() >>> conmat.inputs.in_file = 'tracts.Bdouble' >>> conmat.inputs.target_file = 'atlas.nii.gz' >>> conmat.inputs.scalar_file = 'fa.nii.gz' >>> conmat.run() # doctest: +SKIP """ _cmd = 'conmat' input_spec=ConmatInputSpec output_spec=ConmatOutputSpec def _list_outputs(self): outputs = self.output_spec().get() output_root = self._gen_outputroot() outputs['conmat_sc'] = os.path.abspath(output_root + "sc.csv") outputs['conmat_ts'] = os.path.abspath(output_root + "ts.csv") return outputs def _gen_outfilename(self): return self._gen_outputroot() def _gen_outputroot(self): output_root = self.inputs.output_root if not isdefined(output_root): output_root = self._gen_filename('output_root') return output_root def _gen_filename(self, name): if name == 'output_root': _, filename , _ = split_filename(self.inputs.in_file) filename = filename + "_" return filename nipype-0.9.2/nipype/interfaces/camino/convert.py000066400000000000000000000672671227300005300217670ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from nipype.interfaces.base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, File, StdOutCommandLine, StdOutCommandLineInputSpec, isdefined) from nipype.utils.filemanip import split_filename class Image2VoxelInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='-4dimage %s', mandatory=True, position=1, desc='4d image file') #TODO convert list of files on the fly # imagelist = File(exists=True, argstr='-imagelist %s', # mandatory=True, position=1, # desc='Name of a file containing a list of 3D images') # # imageprefix = traits.Str(argstr='-imageprefix %s', position=3, # desc='Path to prepend onto filenames in the imagelist.') out_type = traits.Enum("float", "char", "short", "int", "long", "double", argstr='-outputdatatype %s', position=2, desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', usedefault=True) class Image2VoxelOutputSpec(TraitedSpec): voxel_order = File(exists=True, desc='path/name of 4D volume in voxel order') class Image2Voxel(StdOutCommandLine): """ Converts Analyze / NIFTI / MHA files to voxel order. Converts scanner-order data in a supported image format to voxel-order data. Either takes a 4D file (all measurements in single image) or a list of 3D images. Examples -------- >>> import nipype.interfaces.camino as cmon >>> img2vox = cmon.Image2Voxel() >>> img2vox.inputs.in_file = '4d_dwi.nii' >>> img2vox.run() # doctest: +SKIP """ _cmd = 'image2voxel' input_spec = Image2VoxelInputSpec output_spec = Image2VoxelOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['voxel_order'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '.B'+ self.inputs.out_type class FSL2SchemeInputSpec(StdOutCommandLineInputSpec): bvec_file = File(exists=True, argstr='-bvecfile %s', mandatory=True, position=1, desc='b vector file') bval_file = File(exists=True, argstr='-bvalfile %s', mandatory=True, position=2, desc='b value file') numscans = traits.Int(argstr='-numscans %d', units='NA', desc="Output all measurements numerous (n) times, used when combining multiple scans from the same imaging session.") interleave = traits.Bool(argstr='-interleave', desc="Interleave repeated scans. Only used with -numscans.") bscale = traits.Float(argstr='-bscale %d', units='NA', desc="Scaling factor to convert the b-values into different units. Default is 10^6.") diffusiontime = traits.Float(argstr = '-diffusiontime %f', units = 'NA', desc="Diffusion time") flipx = traits.Bool(argstr='-flipx', desc="Negate the x component of all the vectors.") flipy = traits.Bool(argstr='-flipy', desc="Negate the y component of all the vectors.") flipz = traits.Bool(argstr='-flipz', desc="Negate the z component of all the vectors.") usegradmod = traits.Bool(argstr='-usegradmod', desc="Use the gradient magnitude to scale b. This option has no effect if your gradient directions have unit magnitude.") class FSL2SchemeOutputSpec(TraitedSpec): scheme = File(exists=True, desc='Scheme file') class FSL2Scheme(StdOutCommandLine): """ Converts b-vectors and b-values from FSL format to a Camino scheme file. Examples -------- >>> import nipype.interfaces.camino as cmon >>> makescheme = cmon.FSL2Scheme() >>> makescheme.inputs.bvec_file = 'bvecs' >>> makescheme.inputs.bvec_file = 'bvals' >>> makescheme.run() # doctest: +SKIP """ _cmd = 'fsl2scheme' input_spec=FSL2SchemeInputSpec output_spec=FSL2SchemeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['scheme'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.bvec_file) return name + '.scheme' class VtkStreamlinesInputSpec(StdOutCommandLineInputSpec): inputmodel = traits.Enum('raw', 'voxels', argstr='-inputmodel %s', desc='input model type (raw or voxels)', usedefault=True) in_file = File(exists=True, argstr=' < %s', mandatory=True, position=-2, desc='data file') voxeldims = traits.List(traits.Int, desc = 'voxel dimensions in mm', argstr='-voxeldims %s', minlen=3, maxlen=3, position=4, units='mm') seed_file = File(exists=False, argstr='-seedfile %s', position=1, desc='image containing seed points') target_file = File(exists=False, argstr='-targetfile %s', position=2, desc='image containing integer-valued target regions') scalar_file = File(exists=False, argstr='-scalarfile %s', position=3, desc='image that is in the same physical space as the tracts') colourorient = traits.Bool(argstr='-colourorient', desc="Each point on the streamline is coloured by the local orientation.") interpolatescalars = traits.Bool(argstr='-interpolatescalars', desc="the scalar value at each point on the streamline is calculated by trilinear interpolation") interpolate = traits.Bool(argstr='-interpolate', desc="the scalar value at each point on the streamline is calculated by trilinear interpolation") class VtkStreamlinesOutputSpec(TraitedSpec): vtk = File(exists=True, desc='Streamlines in VTK format') class VtkStreamlines(StdOutCommandLine): """ Use vtkstreamlines to convert raw or voxel format streamlines to VTK polydata Examples -------- >>> import nipype.interfaces.camino as cmon >>> vtk = cmon.VtkStreamlines() >>> vtk.inputs.in_file = 'tract_data.Bfloat' >>> vtk.inputs.voxeldims = [1,1,1] >>> vtk.run() # doctest: +SKIP """ _cmd = 'vtkstreamlines' input_spec=VtkStreamlinesInputSpec output_spec=VtkStreamlinesOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['vtk'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '.vtk' class ProcStreamlinesInputSpec(StdOutCommandLineInputSpec): inputmodel = traits.Enum('raw', 'voxels', argstr='-inputmodel %s', desc='input model type (raw or voxels)', usedefault=True) in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1, desc='data file') maxtractpoints= traits.Int(argstr='-maxtractpoints %d', units='NA', desc="maximum number of tract points") mintractpoints= traits.Int(argstr='-mintractpoints %d', units='NA', desc="minimum number of tract points") maxtractlength= traits.Int(argstr='-maxtractlength %d', units='mm', desc="maximum length of tracts") mintractlength= traits.Int(argstr='-mintractlength %d', units='mm', desc="minimum length of tracts") datadims = traits.List(traits.Int, desc = 'data dimensions in voxels', argstr='-datadims %s', minlen=3, maxlen=3, units='voxels') voxeldims = traits.List(traits.Int, desc = 'voxel dimensions in mm', argstr='-voxeldims %s', minlen=3, maxlen=3, units='mm') seedpointmm = traits.List(traits.Int, desc = 'The coordinates of a single seed point for tractography in mm', argstr='-seedpointmm %s', minlen=3, maxlen=3, units='mm') seedpointvox = traits.List(traits.Int, desc = 'The coordinates of a single seed point for tractography in voxels', argstr='-seedpointvox %s', minlen=3, maxlen=3, units='voxels') seedfile = File(exists=False, argstr='-seedfile %s', desc='Image Containing Seed Points') regionindex = traits.Int(argstr='-regionindex %d', units='mm', desc="index of specific region to process") iterations = traits.Float(argstr='-iterations %d', units='NA', desc="Number of streamlines generated for each seed. Not required when outputting streamlines, but needed to create PICo images. The default is 1 if the output is streamlines, and 5000 if the output is connection probability images.") targetfile = File(exists=False, argstr='-targetfile %s', desc='Image containing target volumes.') allowmultitargets = traits.Bool(argstr='-allowmultitargets', desc="Allows streamlines to connect to multiple target volumes.") directional = traits.List(traits.Int, desc = 'Splits the streamlines at the seed point and computes separate connection probabilities for each segment. Streamline segments are grouped according to their dot product with the vector (X, Y, Z). The ideal vector will be tangential to the streamline trajectory at the seed, such that the streamline projects from the seed along (X, Y, Z) and -(X, Y, Z). However, it is only necessary for the streamline trajectory to not be orthogonal to (X, Y, Z).', argstr='-directional %s', minlen=3, maxlen=3, units='NA') waypointfile = File(exists=False, argstr='-waypointfile %s', desc='Image containing waypoints. Waypoints are defined as regions of the image with the same intensity, where 0 is background and any value > 0 is a waypoint.') truncateloops = traits.Bool(argstr='-truncateloops', desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, it is truncated upon a second entry to the waypoint.") discardloops = traits.Bool(argstr='-discardloops', desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, the entire streamline is discarded upon a second entry to the waypoint.") exclusionfile = File(exists=False, argstr='-exclusionfile %s', desc='Image containing exclusion ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.') truncateinexclusion = traits.Bool(argstr='-truncateinexclusion', desc="Retain segments of a streamline before entry to an exclusion ROI.") endpointfile = File(exists=False, argstr='-endpointfile %s', desc='Image containing endpoint ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.') resamplestepsize = traits.Float(argstr='-resamplestepsize %d', units='NA', desc="Each point on a streamline is tested for entry into target, exclusion or waypoint volumes. If the length between points on a tract is not much smaller than the voxel length, then streamlines may pass through part of a voxel without being counted. To avoid this, the program resamples streamlines such that the step size is one tenth of the smallest voxel dimension in the image. This increases the size of raw or oogl streamline output and incurs some performance penalty. The resample resolution can be controlled with this option or disabled altogether by passing a negative step size or by passing the -noresample option.") noresample = traits.Bool(argstr='-noresample', desc="Disables resampling of input streamlines. Resampling is automatically disabled if the input model is voxels.") outputtracts = traits.Bool(argstr='-outputtracts', desc="Output streamlines in raw binary format.") outputroot = File(exists=False, argstr='-outputroot %s', desc='root directory for output') gzip = traits.Bool(argstr='-gzip', desc="save the output image in gzip format") outputcp = traits.Bool(argstr='-outputcp', desc="output the connection probability map (Analyze image, float)") outputsc = traits.Bool(argstr='-outputsc', desc="output the connection probability map (raw streamlines, int)") outputacm = traits.Bool(argstr='-outputacm', desc="output all tracts in a single connection probability map (Analyze image)") outputcbs = traits.Bool(argstr='-outputcbs', desc="outputs connectivity-based segmentation maps; requires target outputfile") class ProcStreamlinesOutputSpec(TraitedSpec): proc = File(exists=True, desc='Processed Streamlines') class ProcStreamlines(StdOutCommandLine): """ Process streamline data This program does post-processing of streamline output from track. It can either output streamlines or connection probability maps. * http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php?n=Man.procstreamlines Examples -------- >>> import nipype.interfaces.camino as cmon >>> proc = cmon.ProcStreamlines() >>> proc.inputs.in_file = 'tract_data.Bfloat' >>> proc.run() # doctest: +SKIP """ _cmd = 'procstreamlines' input_spec=ProcStreamlinesInputSpec output_spec=ProcStreamlinesOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['proc'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_proc' class TractShredderInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='< %s', mandatory=True, position=-2, desc='tract file') offset = traits.Int(argstr='%d', units='NA', desc='initial offset of offset tracts', position=1) bunchsize = traits.Int(argstr='%d', units='NA', desc='reads and outputs a group of bunchsize tracts', position=2) space = traits.Int(argstr='%d', units='NA', desc='skips space tracts', position=3) class TractShredderOutputSpec(TraitedSpec): shredded = File(exists=True, desc='Shredded tract file') class TractShredder(StdOutCommandLine): """ Extracts bunches of streamlines. tractshredder works in a similar way to shredder, but processes streamlines instead of scalar data. The input is raw streamlines, in the format produced by track or procstreamlines. The program first makes an initial offset of offset tracts. It then reads and outputs a group of bunchsize tracts, skips space tracts, and repeats until there is no more input. Examples -------- >>> import nipype.interfaces.camino as cmon >>> shred = cmon.TractShredder() >>> shred.inputs.in_file = 'tract_data.Bfloat' >>> shred.inputs.offset = 0 >>> shred.inputs.bunchsize = 1 >>> shred.inputs.space = 2 >>> shred.run() # doctest: +SKIP """ _cmd = 'tractshredder' input_spec=TractShredderInputSpec output_spec=TractShredderOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['shredded'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + "_shredded" class DT2NIfTIInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1, desc='tract file') output_root = File(argstr='-outputroot %s', position=2, genfile=True, desc='filename root prepended onto the names of three output files.') header_file = File(exists=True, argstr='-header %s', mandatory=True, position=3, desc=' A Nifti .nii or .hdr file containing the header information') class DT2NIfTIOutputSpec(TraitedSpec): dt = File(exists=True, desc='diffusion tensors in NIfTI format') exitcode = File(exists=True, desc='exit codes from Camino reconstruction in NIfTI format') lns0 = File(exists=True, desc='estimated lns0 from Camino reconstruction in NIfTI format') class DT2NIfTI(CommandLine): """ Converts camino tensor data to NIfTI format Reads Camino diffusion tensors, and converts them to NIFTI format as three .nii files. """ _cmd = 'dt2nii' input_spec=DT2NIfTIInputSpec output_spec=DT2NIfTIOutputSpec def _list_outputs(self): outputs = self.output_spec().get() output_root = self._gen_outputroot() outputs["dt"] = os.path.abspath(output_root + "dt.nii") outputs["exitcode"] = os.path.abspath(output_root + "exitcode.nii") outputs["lns0"] = os.path.abspath(output_root + "lns0.nii") return outputs def _gen_outfilename(self): return self._gen_outputroot() def _gen_outputroot(self): output_root = self.inputs.output_root if not isdefined(output_root): output_root = self._gen_filename('output_root') return output_root def _gen_filename(self, name): if name == 'output_root': _, filename , _ = split_filename(self.inputs.in_file) filename = filename + "_" return filename class NIfTIDT2CaminoInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1, desc='A NIFTI-1 dataset containing diffusion tensors. The tensors are assumed to be ' 'in lower-triangular order as specified by the NIFTI standard for the storage of ' 'symmetric matrices. This file should be either a .nii or a .hdr file.') s0_file = File(argstr='-s0 %s', exists=True, desc='File containing the unweighted signal for each voxel, may be a raw binary ' 'file (specify type with -inputdatatype) or a supported image file.') lns0_file = File(argstr='-lns0 %s', exists=True, desc='File containing the log of the unweighted signal for each voxel, may be a ' 'raw binary file (specify type with -inputdatatype) or a supported image file.') bgmask = File(argstr='-bgmask %s', exists=True, desc='Binary valued brain / background segmentation, may be a raw binary file ' '(specify type with -maskdatatype) or a supported image file.') scaleslope = traits.Float(argstr='-scaleslope %s', desc='A value v in the diffusion tensor is scaled to v * s + i. This is ' 'applied after any scaling specified by the input image. Default is 1.0.') scaleinter = traits.Float(argstr='-scaleinter %s', desc='A value v in the diffusion tensor is scaled to v * s + i. This is ' 'applied after any scaling specified by the input image. Default is 0.0.') uppertriangular = traits.Bool(argstr='-uppertriangular %s', desc = 'Specifies input in upper-triangular (VTK style) order.') class NIfTIDT2CaminoOutputSpec(TraitedSpec): out_file = File(desc='diffusion tensors data in Camino format') class NIfTIDT2Camino(CommandLine): """ Converts NIFTI-1 diffusion tensors to Camino format. The program reads the NIFTI header but does not apply any spatial transformations to the data. The NIFTI intensity scaling parameters are applied. The output is the tensors in Camino voxel ordering: [exit, ln(S0), dxx, dxy, dxz, dyy, dyz, dzz]. The exit code is set to 0 unless a background mask is supplied, in which case the code is 0 in brain voxels and -1 in background voxels. The value of ln(S0) in the output is taken from a file if one is supplied, otherwise it is set to 0. NOTE FOR FSL USERS - FSL's dtifit can output NIFTI tensors, but they are not stored in the usual way (which is using NIFTI_INTENT_SYMMATRIX). FSL's tensors follow the ITK / VTK "upper-triangular" convention, so you will need to use the -uppertriangular option to convert these correctly. """ _cmd = 'niftidt2camino' input_spec=NIfTIDT2CaminoInputSpec output_spec=NIfTIDT2CaminoOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = self._gen_filename('out_file') return outputs def _gen_filename(self, name): if name == 'out_file': _, filename , _ = split_filename(self.inputs.in_file) return filename class AnalyzeHeaderInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='< %s', mandatory=True, position=1, desc='Tensor-fitted data filename') scheme_file = File(exists=True, argstr='%s', position=2, desc=('Camino scheme file (b values / vectors, ' 'see camino.fsl2scheme)')) readheader = File(exists=True, argstr='-readheader %s', position=3, desc=('Reads header information from file and prints to ' 'stdout. If this option is not specified, then the ' 'program writes a header based on the other ' 'arguments.')) printimagedims = File(exists=True, argstr='-printimagedims %s', position=3, desc=('Prints image data and voxel dimensions as ' 'Camino arguments and exits.')) # How do we implement both file and enum (for the program) in one argument? # Is this option useful anyway? #-printprogargs #Prints data dimension (and type, if relevant) arguments for a specific # Camino program, where prog is one of shredder, scanner2voxel, # vcthreshselect, pdview, track. printprogargs = File(exists=True, argstr='-printprogargs %s', position=3, desc=('Prints data dimension (and type, if relevant) ' 'arguments for a specific Camino program, where ' 'prog is one of shredder, scanner2voxel, ' 'vcthreshselect, pdview, track.')) printintelbyteorder = File(exists=True, argstr='-printintelbyteorder %s', position=3, desc=('Prints 1 if the header is little-endian, ' '0 otherwise.')) printbigendian = File(exists=True, argstr='-printbigendian %s', position=3, desc=('Prints 1 if the header is big-endian, 0 ' 'otherwise.')) initfromheader = File(exists=True, argstr='-initfromheader %s', position=3, desc=('Reads header information from file and ' 'intializes a new header with the values read ' 'from the file. You may replace any ' 'combination of fields in the new header by ' 'specifying subsequent options.')) data_dims = traits.List(traits.Int, desc = 'data dimensions in voxels', argstr='-datadims %s', minlen=3, maxlen=3, units='voxels') voxel_dims = traits.List(traits.Float, desc = 'voxel dimensions in mm', argstr='-voxeldims %s', minlen=3, maxlen=3, units='mm') centre = traits.List(traits.Int, argstr='-centre %s', minlen=3, maxlen=3, units='mm', desc=('Voxel specifying origin of Talairach ' 'coordinate system for SPM, default [0 0 0].')) picoseed = traits.List(traits.Int, argstr='-picoseed %s', minlen=3, maxlen=3, desc=('Voxel specifying the seed (for PICo maps), ' 'default [0 0 0].'), units='mm') nimages = traits.Int(argstr='-nimages %d', units='NA', desc="Number of images in the img file. Default 1.") datatype = traits.Enum('byte', 'char', '[u]short', '[u]int', 'float', 'complex', 'double', argstr='-datatype %s', desc=('The char datatype is 8 bit (not the 16 bit ' 'char of Java), as specified by the Analyze ' '7.5 standard. The byte, ushort and uint ' 'types are not part of the Analyze ' 'specification but are supported by SPM.'), mandatory=True) offset = traits.Int(argstr='-offset %d', units='NA', desc=('According to the Analyze 7.5 standard, this is ' 'the byte offset in the .img file at which ' 'voxels start. This value can be negative to ' 'specify that the absolute value is applied for ' 'every image in the file.')) greylevels = traits.List(traits.Int, argstr='-gl %s', minlen=2, maxlen=2, desc=('Minimum and maximum greylevels. Stored as ' 'shorts in the header.'), units='NA') scaleslope = traits.Float(argstr='-scaleslope %d', units='NA', desc=('Intensities in the image are scaled by ' 'this factor by SPM and MRICro. Default is ' '1.0.')) scaleinter = traits.Float(argstr='-scaleinter %d', units='NA', desc=('Constant to add to the image intensities. ' 'Used by SPM and MRIcro.')) description = traits.String(argstr='-description %s', desc=('Short description - No spaces, max ' 'length 79 bytes. Will be null ' 'terminated automatically.')) intelbyteorder = traits.Bool(argstr='-intelbyteorder', desc=("Write header in intel byte order " "(little-endian).")) networkbyteorder = traits.Bool(argstr='-networkbyteorder', desc=("Write header in network byte order " "(big-endian). This is the default " "for new headers.")) class AnalyzeHeaderOutputSpec(TraitedSpec): header = File(exists=True, desc='Analyze header') class AnalyzeHeader(StdOutCommandLine): """ Create or read an Analyze 7.5 header file. Analyze image header, provides support for the most common header fields. Some fields, such as patient_id, are not currently supported. The program allows three nonstandard options: the field image_dimension.funused1 is the image scale. The intensity of each pixel in the associated .img file is (image value from file) * scale. Also, the origin of the Talairach coordinates (midline of the anterior commisure) are encoded in the field data_history.originator. These changes are included for compatibility with SPM. All headers written with this program are big endian by default. Example ------- >>> import nipype.interfaces.camino as cmon >>> hdr = cmon.AnalyzeHeader() >>> hdr.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> hdr.inputs.scheme_file = 'A.scheme' >>> hdr.inputs.data_dims = [256,256,256] >>> hdr.inputs.voxel_dims = [1,1,1] >>> hdr.run() # doctest: +SKIP """ _cmd = 'analyzeheader' input_spec=AnalyzeHeaderInputSpec output_spec=AnalyzeHeaderOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['header'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + ".hdr" nipype-0.9.2/nipype/interfaces/camino/dti.py000066400000000000000000001216371227300005300210570ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, File, StdOutCommandLine, StdOutCommandLineInputSpec, isdefined) from nipype.utils.filemanip import split_filename import os class DTIFitInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=1, desc='voxel-order data filename') bgmask = File(argstr='-bgmask %s', exists=True, desc=('Provides the name of a file containing a background mask computed using, ' 'for example, FSL bet2 program. The mask file contains zero in background ' 'voxels and non-zero in foreground.')) scheme_file = File(exists=True, argstr='%s', mandatory=True, position=2, desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') non_linear = traits.Bool(argstr='-nonlinear', position=3, desc="Use non-linear fitting instead of the default linear regression to the log measurements. ") class DTIFitOutputSpec(TraitedSpec): tensor_fitted = File(exists=True, desc='path/name of 4D volume in voxel order') class DTIFit(StdOutCommandLine): """ Reads diffusion MRI data, acquired using the acquisition scheme detailed in the scheme file, from the data file. Use non-linear fitting instead of the default linear regression to the log measurements. The data file stores the diffusion MRI data in voxel order with the measurements stored in big-endian format and ordered as in the scheme file. The default input data type is four-byte float. The default output data type is eight-byte double. See modelfit and camino for the format of the data file and scheme file. The program fits the diffusion tensor to each voxel and outputs the results, in voxel order and as big-endian eight-byte doubles, to the standard output. The program outputs eight values in each voxel: [exit code, ln(S(0)), D_xx, D_xy, D_xz, D_yy, D_yz, D_zz]. An exit code of zero indicates no problems. For a list of other exit codes, see modelfit(1). The entry S(0) is an estimate of the signal at q=0. Example ------- >>> import nipype.interfaces.camino as cmon >>> fit = cmon.DTIFit() >>> fit.inputs.scheme_file = 'A.scheme' >>> fit.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> fit.run() # doctest: +SKIP """ _cmd = 'dtfit' input_spec=DTIFitInputSpec output_spec=DTIFitOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['tensor_fitted'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_DT.Bdouble' class DTMetricInputSpec(CommandLineInputSpec): eigen_data = File(exists=True, argstr='-inputfile %s', mandatory=True, desc='voxel-order data filename') metric = traits.Enum('fa','md','rd','l1', 'l2', 'l3', 'tr', 'ra', '2dfa','cl','cp','cs', argstr='-stat %s', mandatory=True, desc=('Specifies the metric to compute. Possible choices are: ' '"fa", "md", "rd", "l1", "l2", "l3", "tr", "ra", "2dfa", "cl", "cp" or "cs".')) inputdatatype = traits.Enum('double', 'float', 'long', 'int', 'short', 'char', argstr='-inputdatatype %s', usedefault=True, desc=('Specifies the data type of the input data. ' 'The data type can be any of the following strings: ' '"char", "short", "int", "long", "float" or "double".' 'Default is double data type')) outputdatatype = traits.Enum('double', 'float', 'long', 'int', 'short', 'char', argstr='-outputdatatype %s', usedefault=True, desc=('Specifies the data type of the output data. ' 'The data type can be any of the following strings: ' '"char", "short", "int", "long", "float" or "double".' 'Default is double data type')) data_header = File(argstr='-header %s', exists=True, desc=('A Nifti .nii or .nii.gz file containing the header information. ' 'Usually this will be the header of the raw data file from which ' 'the diffusion tensors were reconstructed.')) outputfile = File(argstr='-outputfile %s', genfile=True, desc=('Output name. Output will be a .nii.gz file if data_header is provided and' 'in voxel order with outputdatatype datatype (default: double) otherwise.')) class DTMetricOutputSpec(TraitedSpec): metric_stats = File(exists=True, desc='Diffusion Tensor statistics of the chosen metric') class DTMetric(CommandLine): """ Computes tensor metric statistics based on the eigenvalues l1 >= l2 >= l3 typically obtained from ComputeEigensystem. The full list of statistics is: = (l1 - l2) / l1 , a measure of linearity = (l2 - l3) / l1 , a measure of planarity = l3 / l1 , a measure of isotropy with: cl + cp + cs = 1 = first eigenvalue = second eigenvalue = third eigenvalue = l1 + l2 + l3 = tr / 3 = (l2 + l3) / 2 = fractional anisotropy. (Basser et al, J Magn Reson B 1996) = relative anisotropy (Basser et al, J Magn Reson B 1996) <2dfa> = 2D FA of the two minor eigenvalues l2 and l3 i.e. sqrt( 2 * [(l2 - )^2 + (l3 - )^2] / (l2^2 + l3^2) ) with: = (l2 + l3) / 2 Example ------- Compute the CP planar metric as float data type. >>> import nipype.interfaces.camino as cam >>> dtmetric = cam.DTMetric() >>> dtmetric.inputs.eigen_data = 'dteig.Bdouble' >>> dtmetric.inputs.metric = 'cp' >>> dtmetric.inputs.outputdatatype = 'float' >>> dtmetric.run() # doctest: +SKIP """ _cmd = 'dtshape' input_spec=DTMetricInputSpec output_spec=DTMetricOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['metric_stats'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): return self._gen_outputfile() def _gen_outputfile(self): outputfile = self.inputs.outputfile if not isdefined(outputfile): outputfile = self._gen_filename('outputfile') return outputfile def _gen_filename(self, name): if name == 'outputfile': _, name , _ = split_filename(self.inputs.eigen_data) metric = self.inputs.metric datatype= self.inputs.outputdatatype if isdefined(self.inputs.data_header): filename = name + '_' + metric + '.nii.gz' else: filename = name + '_' + metric + '.B' + datatype return filename class ModelFitInputSpec(StdOutCommandLineInputSpec): def _gen_model_options(): #@NoSelf """ Generate all possible permutations of < multi - tensor > < single - tensor > options """ single_tensor = ['dt', 'restore', 'algdt', 'nldt_pos', 'nldt', 'ldt_wtd'] multi_tensor = ['cylcyl', 'cylcyl_eq', 'pospos', 'pospos_eq', 'poscyl', 'poscyl_eq', 'cylcylcyl', 'cylcylcyl_eq', 'pospospos', 'pospospos_eq', 'posposcyl', 'posposcyl_eq', 'poscylcyl', 'poscylcyl_eq'] other = ['adc', 'ball_stick'] model_list = single_tensor model_list.extend(other) model_list.extend([multi + ' ' + single for multi in multi_tensor for single in single_tensor]) return model_list model = traits.Enum(_gen_model_options(), argstr='-model %s', mandatory=True, desc='Specifies the model to be fit to the data.') in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, desc='voxel-order data filename') inputdatatype = traits.Enum('float', 'char', 'short', 'int', 'long', 'double', argstr='-inputdatatype %s', desc='Specifies the data type of the input file: "char", "short", "int", "long", "float" or "double". The input file must have BIG-ENDIAN ordering. By default, the input type is "float".') scheme_file = File(exists=True, argstr='-schemefile %s', mandatory=True, desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') outputfile = File(argstr='-outputfile %s', desc='Filename of the output file.') outlier = File(argstr='-outliermap %s', exists=True, desc='Specifies the name of the file to contain the outlier map generated by the RESTORE algorithm.') noisemap = File(argstr='-noisemap %s', exists=True, desc='Specifies the name of the file to contain the estimated noise variance on the diffusion-weighted signal, generated by a weighted tensor fit. The data type of this file is big-endian double.') residualmap = File(argstr='-residualmap %s', exists=True, desc='Specifies the name of the file to contain the weighted residual errors after computing a weighted linear tensor fit. One value is produced per measurement, in voxel order.The data type of this file is big-endian double. Images of the residuals for each measurement can be extracted with shredder.') sigma = traits.Float(argstr='-sigma %G', desc='Specifies the standard deviation of the noise in the data. Required by the RESTORE algorithm.') bgthresh = traits.Float(argstr='-bgthresh %G', desc='Sets a threshold on the average q=0 measurement to separate foreground and background. The program does not process background voxels, but outputs the same number of values in background voxels and foreground voxels. Each value is zero in background voxels apart from the exit code which is -1.') bgmask = File(argstr='-bgmask %s', exists=True, desc='Provides the name of a file containing a background mask computed using, for example, FSL\'s bet2 program. The mask file contains zero in background voxels and non-zero in foreground.') cfthresh = traits.Float(argstr='-csfthresh %G', desc='Sets a threshold on the average q=0 measurement to determine which voxels are CSF. This program does not treat CSF voxels any different to other voxels.') fixedmodq = traits.List(traits.Float, argstr='-fixedmod %s', minlen=4, maxlen=4, desc='Specifies a spherical acquisition scheme with M measurements with q=0 and N measurements with |q|=Q and diffusion time tau. The N measurements with |q|=Q have unique directions. The program reads in the directions from the files in directory PointSets.') fixedbvalue = traits.List(traits.Float, argstr='-fixedbvalue %s', minlen=3, maxlen=3, desc='As above, but specifies . The resulting scheme is the same whether you specify b directly or indirectly using -fixedmodq.') tau = traits.Float(argstr='-tau %G', desc='Sets the diffusion time separately. This overrides the diffusion time specified in a scheme file or by a scheme index for both the acquisition scheme and in the data synthesis.') class ModelFitOutputSpec(TraitedSpec): fitted_data = File(exists=True, desc='output file of 4D volume in voxel order') class ModelFit(StdOutCommandLine): """ Fits models of the spin-displacement density to diffusion MRI measurements. This is an interface to various model fitting routines for diffusion MRI data that fit models of the spin-displacement density function. In particular, it will fit the diffusion tensor to a set of measurements as well as various other models including two or three-tensor models. The program can read input data from a file or can generate synthetic data using various test functions for testing and simulations. Example ------- >>> import nipype.interfaces.camino as cmon >>> fit = cmon.ModelFit() >>> fit.model = 'dt' >>> fit.inputs.scheme_file = 'A.scheme' >>> fit.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> fit.run() # doctest: +SKIP """ _cmd = 'modelfit' input_spec=ModelFitInputSpec output_spec=ModelFitOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['fitted_data'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_fit.Bdouble' class DTLUTGenInputSpec(StdOutCommandLineInputSpec): lrange = traits.List(traits.Float, desc = 'Index to one-tensor LUTs. This is the ratio L1/L3 and L2 / L3.' \ 'The LUT is square, with half the values calculated (because L2 / L3 cannot be less than L1 / L3 by definition).' \ 'The minimum must be >= 1. For comparison, a ratio L1 / L3 = 10 with L2 / L3 = 1 corresponds to an FA of 0.891, '\ 'and L1 / L3 = 15 with L2 / L3 = 1 corresponds to an FA of 0.929. The default range is 1 to 10.', \ argstr='-lrange %s', minlen=2, maxlen=2, position=1, units='NA') frange = traits.List(traits.Float, desc = 'Index to two-tensor LUTs. This is the fractional anisotropy \ of the two tensors. The default is 0.3 to 0.94', \ argstr='-frange %s', minlen=2, maxlen=2, position=1, units='NA') step = traits.Float(argstr='-step %f', units='NA', desc='Distance between points in the LUT.' \ 'For example, if lrange is 1 to 10 and the step is 0.1, LUT entries will be computed ' \ 'at L1 / L3 = 1, 1.1, 1.2 ... 10.0 and at L2 / L3 = 1.0, 1.1 ... L1 / L3.' \ 'For single tensor LUTs, the default step is 0.2, for two-tensor LUTs it is 0.02.') samples = traits.Int(argstr='-samples %d', units='NA', desc='The number of synthetic measurements to generate at each point in the LUT. The default is 2000.') snr = traits.Float(argstr='-snr %f', units='NA', desc='The signal to noise ratio of the unweighted (q = 0) measurements.'\ 'This should match the SNR (in white matter) of the images that the LUTs are used with.') bingham = traits.Bool(argstr='-bingham', desc="Compute a LUT for the Bingham PDF. This is the default.") acg = traits.Bool(argstr='-acg', desc="Compute a LUT for the ACG PDF.") watson = traits.Bool(argstr='-watson', desc="Compute a LUT for the Watson PDF.") inversion = traits.Int(argstr='-inversion %d', units='NA', desc='Index of the inversion to use. The default is 1 (linear single tensor inversion).') trace = traits.Float(argstr='-trace %G', units='NA', desc='Trace of the diffusion tensor(s) used in the test function in the LUT generation. The default is 2100E-12 m^2 s^-1.') scheme_file = File(argstr='-schemefile %s', mandatory=True, position=2, desc='The scheme file of the images to be processed using this LUT.') class DTLUTGenOutputSpec(TraitedSpec): dtLUT = File(exists=True, desc='Lookup Table') class DTLUTGen(StdOutCommandLine): """ Calibrates the PDFs for PICo probabilistic tractography. This program needs to be run once for every acquisition scheme. It outputs a lookup table that is used by the dtpicoparams program to find PICo PDF parameters for an image. The default single tensor LUT contains parameters of the Bingham distribution and is generated by supplying a scheme file and an estimated signal to noise in white matter regions of the (q=0) image. The default inversion is linear (inversion index 1). Advanced users can control several options, including the extent and resolution of the LUT, the inversion index, and the type of PDF. See dtlutgen(1) for details. Example ------- >>> import nipype.interfaces.camino as cmon >>> dtl = cmon.DTLUTGen() >>> dtl.inputs.snr = 16 >>> dtl.inputs.scheme_file = 'A.scheme' >>> dtl.run() # doctest: +SKIP """ _cmd = 'dtlutgen' input_spec=DTLUTGenInputSpec output_spec=DTLUTGenOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['dtLUT'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.scheme_file) return name + '.dat' class PicoPDFsInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='< %s', mandatory=True, position=1, desc='voxel-order data filename') inputmodel = traits.Enum('dt', 'multitensor', 'pds', argstr='-inputmodel %s', position=2, desc='input model type', usedefault=True) luts = traits.List(File(exists=True), argstr='-luts %s', minlen=1, maxlen=3, mandatory=True, desc='Files containing the lookup tables.'\ 'For tensor data, one lut must be specified for each type of inversion used in the image (one-tensor, two-tensor, three-tensor).'\ 'For pds, the number of LUTs must match -numpds (it is acceptable to use the same LUT several times - see example, above).'\ 'These LUTs may be generated with dtlutgen.') pdf = traits.Enum('bingham', 'watson', 'acg', argstr='-pdf %s', position=4, desc=' Specifies the PDF to use. There are three choices:'\ 'watson - The Watson distribution. This distribution is rotationally symmetric.'\ 'bingham - The Bingham distributionn, which allows elliptical probability density contours.'\ 'acg - The Angular Central Gaussian distribution, which also allows elliptical probability density contours', usedefault=True) directmap = traits.Bool(argstr='-directmap', desc="Only applicable when using pds as the inputmodel. Use direct mapping between the eigenvalues and the distribution parameters instead of the log of the eigenvalues.") maxcomponents = traits.Int(argstr='-maxcomponents %d', units='NA', desc='The maximum number of tensor components in a voxel (default 2) for multitensor data.'\ 'Currently, only the default is supported, but future releases may allow the input of three-tensor data using this option.') numpds = traits.Int(argstr='-numpds %d', units='NA', desc='The maximum number of PDs in a voxel (default 3) for PD data.' \ 'This option determines the size of the input and output voxels.' \ 'This means that the data file may be large enough to accomodate three or more PDs,'\ 'but does not mean that any of the voxels are classified as containing three or more PDs.') class PicoPDFsOutputSpec(TraitedSpec): pdfs = File(exists=True, desc='path/name of 4D volume in voxel order') class PicoPDFs(StdOutCommandLine): """ Constructs a spherical PDF in each voxel for probabilistic tractography. Example ------- >>> import nipype.interfaces.camino as cmon >>> pdf = cmon.PicoPDFs() >>> pdf.inputs.inputmodel = 'dt' >>> pdf.inputs.luts = ['lut_file'] >>> pdf.inputs.in_file = 'voxel-order_data.Bfloat' >>> pdf.run() # doctest: +SKIP """ _cmd = 'picopdfs' input_spec=PicoPDFsInputSpec output_spec=PicoPDFsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['pdfs'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_pdfs.Bdouble' class TrackInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1, desc='input data file') seed_file = File(exists=True, argstr='-seedfile %s', position=2, desc='seed file') inputmodel = traits.Enum('dt', 'multitensor', 'sfpeak', 'pico', 'repbs_dt', 'repbs_multitensor', 'ballstick', 'wildbs_dt', 'bayesdirac', 'bayesdirac_dt', argstr='-inputmodel %s', desc='input model type', usedefault=True) inputdatatype = traits.Enum('float', 'double', argstr='-inputdatatype %s', desc='input file type') gzip = traits.Bool(argstr='-gzip', desc="save the output image in gzip format") maxcomponents = traits.Int(argstr='-maxcomponents %d', units='NA', desc="The maximum number of tensor components in a voxel. This determines the size of the input file and does not say anything about the voxel classification. The default is 2 if the input model is multitensor and 1 if the input model is dt.") numpds = traits.Int(argstr='-numpds %d', units='NA', desc="The maximum number of PDs in a voxel for input models sfpeak and pico. The default is 3 for input model sfpeak and 1 for input model pico. This option determines the size of the voxels in the input file and does not affect tracking. For tensor data, use the -maxcomponents option.") data_dims = traits.List(traits.Int, desc='data dimensions in voxels', argstr='-datadims %s', minlen=3, maxlen=3, units='voxels') voxel_dims = traits.List(traits.Float, desc='voxel dimensions in mm', argstr='-voxeldims %s', minlen=3, maxlen=3, units='mm') ipthresh = traits.Float(argstr='-ipthresh %f', desc='Curvature threshold for tracking, expressed as the minimum dot product between two streamline orientations calculated over the length of a voxel. If the dot product between the previous and current directions is less than this threshold, then the streamline terminates. The default setting will terminate fibres that curve by more than 80 degrees. Set this to -1.0 to disable curvature checking completely.') curvethresh = traits.Float(argstr='-curvethresh %f', desc='Curvature threshold for tracking, expressed as the maximum angle (in degrees) between between two streamline orientations calculated over the length of a voxel. If the angle is greater than this, then the streamline terminates.') anisthresh = traits.Float(argstr='-anisthresh %f', desc='Terminate fibres that enter a voxel with lower anisotropy than the threshold.') anisfile = File(argstr='-anisfile %s', exists=True, desc='File containing the anisotropy map. This is required to apply an anisotropy threshold with non tensor data. If the map issupplied it is always used, even in tensor data.') outputtracts = traits.Enum('float', 'double', 'oogl', argstr='-outputtracts %s', desc='output tract file type') out_file = File(argstr='-outputfile %s', position= -1, genfile=True, desc='output data file') output_root = File(exists=False, argstr='-outputroot %s', position= -1, desc='root directory for output') class TrackOutputSpec(TraitedSpec): tracked = File(exists=True, desc='output file containing reconstructed tracts') class Track(CommandLine): """ Performs tractography using one of the following models: dt', 'multitensor', 'pds', 'pico', 'bootstrap', 'ballstick', 'bayesdirac' Example ------- >>> import nipype.interfaces.camino as cmon >>> track = cmon.Track() >>> track.inputs.inputmodel = 'dt' >>> track.inputs.in_file = 'data.Bfloat' >>> track.inputs.seed_file = 'seed_mask.nii' >>> track.run() # doctest: +SKIP """ _cmd = 'track' input_spec = TrackInputSpec output_spec = TrackOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['tracked'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_tracked' class TrackDT(Track): """ Performs streamline tractography using tensor data Example ------- >>> import nipype.interfaces.camino as cmon >>> track = cmon.TrackDT() >>> track.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> track.inputs.seed_file = 'seed_mask.nii' >>> track.run() # doctest: +SKIP """ def __init__(self, command=None, **inputs): inputs["inputmodel"] = "dt" return super(TrackDT, self).__init__(command, **inputs) class TrackPICoInputSpec(TrackInputSpec): pdf = traits.Enum('bingham', 'watson', 'acg', argstr='-pdf %s', desc='Specifies the model for PICo parameters. The default is "bingham.') iterations = traits.Int(argstr='-iterations %d', units='NA', desc="Number of streamlines to generate at each seed point. The default is 5000.") class TrackPICo(Track): """ Performs streamline tractography using the Probabilistic Index of Connectivity (PICo) algorithm Example ------- >>> import nipype.interfaces.camino as cmon >>> track = cmon.TrackPICo() >>> track.inputs.in_file = 'pdfs.Bfloat' >>> track.inputs.seed_file = 'seed_mask.nii' >>> track.run() # doctest: +SKIP """ input_spec = TrackPICoInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "pico" return super(TrackPICo, self).__init__(command, **inputs) class TrackBayesDiracInputSpec(TrackInputSpec): scheme_file = File(argstr='-schemefile %s', mandatory=True, exists=True, desc=('The scheme file corresponding to the data being ' 'processed.')) iterations = traits.Int(argstr='-iterations %d', units='NA', desc=("Number of streamlines to generate at each " "seed point. The default is 5000.")) pdf = traits.Enum('bingham', 'watson', 'acg', argstr='-pdf %s', desc='Specifies the model for PICo priors (not the curvature priors). The default is "bingham".') pointset = traits.Int(argstr='-pointset %s', desc='Index to the point set to use for Bayesian likelihood calculation. The index specifies a set of evenly distributed points on the unit sphere, where each point x defines two possible step directions (x or -x) for the streamline path. A larger number indexes a larger point set, which gives higher angular resolution at the expense of computation time. The default is index 1, which gives 1922 points, index 0 gives 1082 points, index 2 gives 3002 points.') datamodel = traits.Enum('cylsymmdt', 'ballstick', argstr='-datamodel %s', desc='Model of the data for Bayesian tracking. The default model is "cylsymmdt", a diffusion tensor with cylindrical symmetry about e_1, ie L1 >= L_2 = L_3. The other model is "ballstick", the partial volume model (see ballstickfit).') curvepriork = traits.Float(argstr='-curvepriork %G', desc='Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of k make curvature less likely.') curvepriorg = traits.Float(argstr='-curvepriorg %G', desc='Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of g make curvature less likely.') extpriorfile = File(exists=True, argstr='-extpriorfile %s', desc='Path to a PICo image produced by picopdfs. The PDF in each voxel is used as a prior for the fibre orientation in Bayesian tracking. The prior image must be in the same space as the diffusion data.') extpriordatatype = traits.Enum('float', 'double', argstr='-extpriordatatype %s', desc='Datatype of the prior image. The default is "double".') class TrackBayesDirac(Track): """ Performs streamline tractography using a Bayesian tracking with Dirac priors Example ------- >>> import nipype.interfaces.camino as cmon >>> track = cmon.TrackBayesDirac() >>> track.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> track.inputs.seed_file = 'seed_mask.nii' >>> track.inputs.scheme_file = 'bvecs.scheme' >>> track.run() # doctest: +SKIP """ input_spec = TrackBayesDiracInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "bayesdirac" return super(TrackBayesDirac, self).__init__(command, **inputs) class TrackBallStick(Track): """ Performs streamline tractography using ball-stick fitted data Example ------- >>> import nipype.interfaces.camino as cmon >>> track = cmon.TrackBallStick() >>> track.inputs.in_file = 'ballstickfit_data.Bfloat' >>> track.inputs.seed_file = 'seed_mask.nii' >>> track.run() # doctest: +SKIP """ def __init__(self, command=None, **inputs): inputs["inputmodel"] = "ballstick" return super(TrackBallStick, self).__init__(command, **inputs) class TrackBootstrapInputSpec(TrackInputSpec): scheme_file = File(argstr='-schemefile %s', mandatory=True, exists=True, desc='The scheme file corresponding to the data being processed.') iterations = traits.Int(argstr='-iterations %d', units='NA', desc="Number of streamlines to generate at each seed point.") inversion = traits.Int(argstr='-inversion %s', desc = 'Tensor reconstruction algorithm for repetition bootstrapping. Default is 1 (linear reconstruction, single tensor).') bsdatafiles = traits.List(File(exists=True), mandatory=True, argstr='-bsdatafile %s', desc='Specifies files containing raw data for repetition bootstrapping. Use -inputfile for wild bootstrap data.') bgmask = File(argstr='-bgmask %s', exists=True, desc = 'Provides the name of a file containing a background mask computed using, for example, FSL\'s bet2 program. The mask file contains zero in background voxels and non-zero in foreground.') class TrackBootstrap(Track): """ Performs bootstrap streamline tractography using mulitple scans of the same subject Example ------- >>> import nipype.interfaces.camino as cmon >>> track = cmon.TrackBootstrap() >>> track.inputs.inputmodel='repbs_dt' >>> track.inputs.scheme_file = 'bvecs.scheme' >>> track.inputs.bsdatafiles = ['fitted_data1.Bfloat', 'fitted_data2.Bfloat'] >>> track.inputs.seed_file = 'seed_mask.nii' >>> track.run() # doctest: +SKIP """ input_spec = TrackBootstrapInputSpec def __init__(self, command=None, **inputs): return super(TrackBootstrap, self).__init__(command, **inputs) class ComputeMeanDiffusivityInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='< %s', mandatory=True, position=1, desc='Tensor-fitted data filename') scheme_file = File(exists=True, argstr='%s', position=2, desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') out_file = File(argstr="> %s", position=-1, genfile=True) inputmodel = traits.Enum('dt', 'twotensor', 'threetensor', argstr='-inputmodel %s', desc='Specifies the model that the input tensor data contains parameters for.' \ 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), '\ '"threetensor" (three-tensor data). By default, the program assumes that the input data '\ 'contains a single diffusion tensor in each voxel.') inputdatatype = traits.Enum('char', 'short', 'int', 'long', 'float', 'double', argstr='-inputdatatype %s', desc='Specifies the data type of the input file. The data type can be any of the' \ 'following strings: "char", "short", "int", "long", "float" or "double".') outputdatatype = traits.Enum('char', 'short', 'int', 'long', 'float', 'double', argstr='-outputdatatype %s', desc='Specifies the data type of the output data. The data type can be any of the' \ 'following strings: "char", "short", "int", "long", "float" or "double".') class ComputeMeanDiffusivityOutputSpec(TraitedSpec): md = File(exists=True, desc='Mean Diffusivity Map') class ComputeMeanDiffusivity(StdOutCommandLine): """ Computes the mean diffusivity (trace/3) from diffusion tensors. Example ------- >>> import nipype.interfaces.camino as cmon >>> md = cmon.ComputeMeanDiffusivity() >>> md.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> md.inputs.scheme_file = 'A.scheme' >>> md.run() # doctest: +SKIP """ _cmd = 'md' input_spec=ComputeMeanDiffusivityInputSpec output_spec=ComputeMeanDiffusivityOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["md"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + "_MD.img" #Need to change to self.inputs.outputdatatype class ComputeFractionalAnisotropyInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='< %s', mandatory=True, position=1, desc='Tensor-fitted data filename') scheme_file = File(exists=True, argstr='%s', position=2, desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') inputmodel = traits.Enum('dt', 'twotensor', 'threetensor', 'multitensor', argstr='-inputmodel %s', desc='Specifies the model that the input tensor data contains parameters for.' \ 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), '\ '"threetensor" (three-tensor data). By default, the program assumes that the input data '\ 'contains a single diffusion tensor in each voxel.') inputdatatype = traits.Enum('char', 'short', 'int', 'long', 'float', 'double', argstr='-inputdatatype %s', desc='Specifies the data type of the input file. The data type can be any of the' \ 'following strings: "char", "short", "int", "long", "float" or "double".') outputdatatype = traits.Enum('char', 'short', 'int', 'long', 'float', 'double', argstr='-outputdatatype %s', desc='Specifies the data type of the output data. The data type can be any of the' \ 'following strings: "char", "short", "int", "long", "float" or "double".') class ComputeFractionalAnisotropyOutputSpec(TraitedSpec): fa = File(exists=True, desc='Fractional Anisotropy Map') class ComputeFractionalAnisotropy(StdOutCommandLine): """ Computes the fractional anisotropy of tensors. Reads diffusion tensor (single, two-tensor or three-tensor) data from the standard input, computes the fractional anisotropy (FA) of each tensor and outputs the results to the standard output. For multiple-tensor data the program outputs the FA of each tensor, so for three-tensor data, for example, the output contains three fractional anisotropy values per voxel. Example ------- >>> import nipype.interfaces.camino as cmon >>> fa = cmon.ComputeFractionalAnisotropy() >>> fa.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> fa.inputs.scheme_file = 'A.scheme' >>> fa.run() # doctest: +SKIP """ _cmd = 'fa' input_spec=ComputeFractionalAnisotropyInputSpec output_spec=ComputeFractionalAnisotropyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['fa'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_FA.Bdouble' #Need to change to self.inputs.outputdatatype class ComputeTensorTraceInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='< %s', mandatory=True, position=1, desc='Tensor-fitted data filename') scheme_file = File(exists=True, argstr='%s', position=2, desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') inputmodel = traits.Enum('dt', 'twotensor', 'threetensor', 'multitensor', argstr='-inputmodel %s', desc='Specifies the model that the input tensor data contains parameters for.' \ 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), '\ '"threetensor" (three-tensor data). By default, the program assumes that the input data '\ 'contains a single diffusion tensor in each voxel.') inputdatatype = traits.Enum('char', 'short', 'int', 'long', 'float', 'double', argstr='-inputdatatype %s', desc='Specifies the data type of the input file. The data type can be any of the' \ 'following strings: "char", "short", "int", "long", "float" or "double".') outputdatatype = traits.Enum('char', 'short', 'int', 'long', 'float', 'double', argstr='-outputdatatype %s', desc='Specifies the data type of the output data. The data type can be any of the' \ 'following strings: "char", "short", "int", "long", "float" or "double".') class ComputeTensorTraceOutputSpec(TraitedSpec): trace = File(exists=True, desc='Trace of the diffusion tensor') class ComputeTensorTrace(StdOutCommandLine): """ Computes the trace of tensors. Reads diffusion tensor (single, two-tensor or three-tensor) data from the standard input, computes the trace of each tensor, i.e., three times the mean diffusivity, and outputs the results to the standard output. For multiple-tensor data the program outputs the trace of each tensor, so for three-tensor data, for example, the output contains three values per voxel. Divide the output by three to get the mean diffusivity. Example ------- >>> import nipype.interfaces.camino as cmon >>> trace = cmon.ComputeTensorTrace() >>> trace.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> trace.inputs.scheme_file = 'A.scheme' >>> trace.run() # doctest: +SKIP """ _cmd = 'trd' input_spec=ComputeTensorTraceInputSpec output_spec=ComputeTensorTraceOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['trace'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_TrD.img' #Need to change to self.inputs.outputdatatype class ComputeEigensystemInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='< %s', mandatory=True, position=1, desc='Tensor-fitted data filename') inputmodel = traits.Enum('dt', 'multitensor', argstr='-inputmodel %s', desc='Specifies the model that the input data contains parameters for. Possible model types are: "dt" (diffusion-tensor data) and "multitensor"') maxcomponents = traits.Int(argstr='-maxcomponents %d', desc='The maximum number of tensor components in a voxel of the input data.') inputdatatype = traits.Enum('double', 'float', 'long', 'int', 'short', 'char', argstr='-inputdatatype %s', usedefault=True, desc=('Specifies the data type of the input data. ' 'The data type can be any of the following strings: ' '"char", "short", "int", "long", "float" or "double".' 'Default is double data type')) outputdatatype = traits.Enum('double', 'float', 'long', 'int', 'short', 'char', argstr='-outputdatatype %s', usedefault=True, desc=('Specifies the data type of the output data. ' 'The data type can be any of the following strings: ' '"char", "short", "int", "long", "float" or "double".' 'Default is double data type')) class ComputeEigensystemOutputSpec(TraitedSpec): eigen = File(exists=True, desc='Trace of the diffusion tensor') class ComputeEigensystem(StdOutCommandLine): """ Computes the eigensystem from tensor fitted data. Reads diffusion tensor (single, two-tensor, three-tensor or multitensor) data from the standard input, computes the eigenvalues and eigenvectors of each tensor and outputs the results to the standard output. For multiple-tensor data the program outputs the eigensystem of each tensor. For each tensor the program outputs: {l_1, e_11, e_12, e_13, l_2, e_21, e_22, e_33, l_3, e_31, e_32, e_33}, where l_1 >= l_2 >= l_3 and e_i = (e_i1, e_i2, e_i3) is the eigenvector with eigenvalue l_i. For three-tensor data, for example, the output contains thirty-six values per voxel. Example ------- >>> import nipype.interfaces.camino as cmon >>> dteig = cmon.ComputeEigensystem() >>> dteig.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> dteig.run() # doctest: +SKIP """ _cmd = 'dteig' input_spec=ComputeEigensystemInputSpec output_spec=ComputeEigensystemOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["eigen"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) datatype= self.inputs.outputdatatype return name + '_eig.B' + datatype nipype-0.9.2/nipype/interfaces/camino/odf.py000066400000000000000000000364531227300005300210500ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from nipype.interfaces.base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, File, StdOutCommandLine, StdOutCommandLineInputSpec, isdefined) from nipype.utils.filemanip import split_filename class QBallMXInputSpec(StdOutCommandLineInputSpec): basistype = traits.Enum('rbf', 'sh', argstr='-basistype %s', desc=('Basis function type. "rbf" to use radial basis functions ' '"sh" to use spherical harmonics'), usedefault=True) scheme_file = File(exists=True, argstr='-schemefile %s', mandatory=True, desc='Specifies the scheme file for the diffusion MRI data') order = traits.Int(argstr='-order %d', units='NA', desc=('Specific to sh. Maximum order of the spherical harmonic series. ' 'Default is 4.')) rbfpointset = traits.Int(argstr='-rbfpointset %d', units='NA', desc=('Specific to rbf. Sets the number of radial basis functions to use. ' 'The value specified must be present in the Pointsets directory. ' 'The default value is 246.')) rbfsigma = traits.Float(argstr='-rbfsigma %f', units='NA', desc=('Specific to rbf. Sets the width of the interpolating basis functions. ' 'The default value is 0.2618 (15 degrees).')) smoothingsigma = traits.Float(argstr='-smoothingsigma %f', units='NA', desc=('Specific to rbf. Sets the width of the smoothing basis functions. ' 'The default value is 0.1309 (7.5 degrees).')) class QBallMXOutputSpec(TraitedSpec): qmat = File(exists=True, desc='Q-Ball reconstruction matrix') class QBallMX(StdOutCommandLine): """ Generates a reconstruction matrix for Q-Ball. Used in LinRecon with the same scheme file to reconstruct data. Example 1 --------- To create a linear transform matrix using Spherical Harmonics (sh). >>> import nipype.interfaces.camino as cam >>> qballmx = cam.QBallMX() >>> qballmx.inputs.scheme_file = 'A.scheme' >>> qballmx.inputs.basistype = 'sh' >>> qballmx.inputs.order = 6 >>> qballmx.run() # doctest: +SKIP Example 2 --------- To create a linear transform matrix using Radial Basis Functions (rbf). This command uses the default setting of rbf sigma = 0.2618 (15 degrees), data smoothing sigma = 0.1309 (7.5 degrees), rbf pointset 246 >>> import nipype.interfaces.camino as cam >>> qballmx = cam.QBallMX() >>> qballmx.inputs.scheme_file = 'A.scheme' >>> qballmx.run() # doctest: +SKIP The linear transform matrix from any of these two examples can then be run over each voxel using LinRecon >>> qballcoeffs = cam.LinRecon() >>> qballcoeffs.inputs.in_file = 'SubjectA.Bfloat' >>> qballcoeffs.inputs.scheme_file = 'A.scheme' >>> qballcoeffs.inputs.qball_mat = 'A_qmat.Bdouble' >>> qballcoeffs.inputs.normalize = True >>> qballcoeffs.inputs.bgmask = 'brain_mask.nii' >>> qballcoeffs.run() # doctest: +SKIP """ _cmd = 'qballmx' input_spec=QBallMXInputSpec output_spec=QBallMXOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['qmat'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.scheme_file) return name + '_qmat.Bdouble' class LinReconInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=1, desc='voxel-order data filename') scheme_file = File(exists=True, argstr='%s', mandatory=True, position=2, desc='Specifies the scheme file for the diffusion MRI data') qball_mat = File(exists=True, argstr='%s', mandatory=True, position=3, desc='Linear transformation matrix.') normalize = traits.Bool(argstr='-normalize', desc=('Normalize the measurements and discard ' 'the zero measurements before the linear transform.')) log = traits.Bool(argstr='-log', desc=('Transform the log measurements rather than the ' 'measurements themselves')) bgmask = File(exists=True, argstr='-bgmask %s', desc='background mask') class LinReconOutputSpec(TraitedSpec): recon_data = File(exists=True, desc='Transformed data') class LinRecon(StdOutCommandLine): """ Runs a linear transformation in each voxel. Reads a linear transformation from the matrix file assuming the imaging scheme specified in the scheme file. Performs the linear transformation on the data in every voxel and outputs the result to the standard output. The ouput in every voxel is actually: [exit code, ln(S(0)), p1, ..., pR] where p1, ..., pR are the parameters of the reconstruction. Possible exit codes are: 0. No problems. 6. Bad data replaced by substitution of zero. The matrix must be R by N+M where N+M is the number of measurements and R is the number of parameters of the reconstruction. The matrix file contains binary double-precision floats. The matrix elements are stored row by row. Example --------- First run QBallMX and create a linear transform matrix using Spherical Harmonics (sh). >>> import nipype.interfaces.camino as cam >>> qballmx = cam.QBallMX() >>> qballmx.inputs.scheme_file = 'A.scheme' >>> qballmx.inputs.basistype = 'sh' >>> qballmx.inputs.order = 4 >>> qballmx.run() # doctest: +SKIP Then run it over each voxel using LinRecon >>> qballcoeffs = cam.LinRecon() >>> qballcoeffs.inputs.in_file = 'SubjectA.Bfloat' >>> qballcoeffs.inputs.scheme_file = 'A.scheme' >>> qballcoeffs.inputs.qball_mat = 'A_qmat.Bdouble' >>> qballcoeffs.inputs.normalize = True >>> qballcoeffs.run() # doctest: +SKIP """ _cmd = 'linrecon' input_spec=LinReconInputSpec output_spec=LinReconOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['recon_data'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.scheme_file) return name + '_recondata.Bdouble' class SFPeaksInputSpec(StdOutCommandLineInputSpec): in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, desc='Voxel-order data of spherical functions') inputmodel = traits.Enum('sh', 'maxent', 'rbf', argstr='-inputmodel %s', mandatory=True, desc=('Type of functions input via in_file. Currently supported options are: ' ' sh - Spherical harmonic series. Specify the maximum order of the SH series ' ' with the "order" attribute if different from the default of 4. ' ' maxent - Maximum entropy representations output by MESD. The reconstruction ' ' directions input to MESD must be specified. By default this is the ' ' same set of gradient directions (excluding zero gradients) in the ' ' scheme file, so specify the "schemefile" attribute unless the ' ' "mepointset" attribute was set in MESD. ' ' rbf - Sums of radial basis functions. Specify the pointset with the attribute ' ' "rbfpointset" if different from the default. See QBallMX.')) order = traits.Int(argstr='-order %d', units='NA', desc='Specific to sh. Maximum order of the spherical harmonic series.') scheme_file = File(exists=True, argstr='%s', desc='Specific to maxent. Specifies the scheme file.') rbfpointset = traits.Int(argstr='-rbfpointset %d', units='NA', desc=('Specific to rbf. Sets the number of radial basis functions to use. ' 'The value specified must be present in the Pointsets directory. ' 'The default value is 246.')) mepointset = traits.Int(argstr='-mepointset %d', units='NA', desc=('Use a set of directions other than those in the scheme file for the deconvolution ' 'kernel. The number refers to the number of directions on the unit sphere. ' 'For example, "mepointset = 54" uses the directions in "camino/PointSets/Elec054.txt" ' 'Use this option only if you told MESD to use a custom set of directions with the same ' 'option. Otherwise, specify the scheme file with the "schemefile" attribute.')) numpds = traits.Int(argstr='-numpds %d', units='NA', desc='The largest number of peak directions to output in each voxel.') noconsistencycheck = traits.Bool(argstr='-noconsistencycheck', desc='Turns off the consistency check. The output shows all consistencies as true.') searchradius = traits.Float(argstr='-searchradius %f', units='NA', desc='The search radius in the peak finding algorithm. The default is 0.4 (cf. "density")') density = traits.Int(argstr='-density %d', units='NA', desc=('The number of randomly rotated icosahedra to use in constructing the set of points for ' 'random sampling in the peak finding algorithm. Default is 1000, which works well for very ' 'spiky maxent functions. For other types of function, it is reasonable to set the density ' 'much lower and increase the search radius slightly, which speeds up the computation.')) pointset = traits.Int(argstr='-pointset %d', units='NA', desc=('To sample using an evenly distributed set of points instead. The integer can be ' '0, 1, ..., 7. Index 0 gives 1082 points, 1 gives 1922, 2 gives 3002, 3 gives 4322, ' '4 gives 5882, 5 gives 8672, 6 gives 12002, 7 gives 15872.')) pdthresh = traits.Float(argstr='-pdthresh %f', units='NA', desc=('Base threshold on the actual peak direction strength divided by the mean of the ' 'function. The default is 1.0 (the peak must be equal or greater than the mean).')) stdsfrommean = traits.Float(argstr='-stdsfrommean %f', units='NA', desc=('This is the number of standard deviations of the function to be added to the ' '"pdthresh" attribute in the peak directions pruning.')) class SFPeaksOutputSpec(TraitedSpec): peaks = File(exists=True, desc='Peaks of the spherical functions.') class SFPeaks(StdOutCommandLine): """ Finds the peaks of spherical functions. This utility reads coefficients of the spherical functions and outputs a list of peak directions of the function. It computes the value of the function at each of a set of sample points. Then it finds local maxima by finding all points at which the function is larger than for any other point within a fixed search radius (the default is 0.4). The utility then uses Powell's algorithm to optimize the position of each local maximum. Finally the utility removes duplicates and tiny peaks with function value smaller than some threshold, which is the mean of the function plus some number of standard deviations. By default the program checks for con- sistency with a second set of starting points, but skips the optimization step. To speed up execution, you can turn off the con- sistency check by setting the noconsistencycheck flag to True. By default, the utility constructs a set of sample points by randomly rotating a unit icosahedron repeatedly (the default is 1000 times, which produces a set of 6000 points) and concatenating the lists of vertices. The 'pointset = ' attribute can tell the utility to use an evenly distributed set of points (index 0 gives 1082 points, 1 gives 1922, 2 gives 4322, 3 gives 8672, 4 gives 15872, 5 gives 32762, 6 gives 72032), which is quicker, because you can get away with fewer points. We estimate that you can use a factor of 2.5 less evenly distributed points than randomly distributed points and still expect similar performance levels. The output for each voxel is: - exitcode (inherited from the input data). - ln(A(0)) - number of peaks found. - flag for consistency with a repeated run (number of directions is the same and the directions are the same to within a threshold.) - mean(f). - std(f). - direction 1 (x, y, z, f, H00, H01, H10, H11). - direction 2 (x, y, z, f, H00, H01, H10, H11). - direction 3 (x, y, z, f, H00, H01, H10, H11). H is the Hessian of f at the peak. It is the matrix: [d^2f/ds^2 d^2f/dsdt] [d^2f/dtds d^2f/dt^2] = [H00 H01] [H10 H11] where s and t are orthogonal coordinates local to the peak. By default the maximum number of peak directions output in each voxel is three. If less than three directions are found, zeros are output for later directions. The peaks are ordered by the value of the function at the peak. If more than the maximum number of directions are found only the strongest ones are output. The maximum number can be changed setting the 'numpds' attribute. The utility can read various kinds of spherical function, but must be told what kind of function is input using the 'inputmodel' attribute. The description of the 'inputmodel' attribute lists additional information required by SFPeaks for each input model. Example --------- First run QBallMX and create a linear transform matrix using Spherical Harmonics (sh). >>> import nipype.interfaces.camino as cam >>> sf_peaks = cam.SFPeaks() >>> sf_peaks.inputs.in_file = 'A_recon_params.Bdouble' >>> sf_peaks.inputs.inputmodel = 'sh' >>> sf_peaks.inputs.order = 4 >>> sf_peaks.inputs.density = 100 >>> sf_peaks.inputs.searchradius = 1.0 >>> sf_peaks.run() # doctest: +SKIP """ _cmd = 'sfpeaks' input_spec=SFPeaksInputSpec output_spec=SFPeaksOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['peaks'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_peaks.Bdouble' nipype-0.9.2/nipype/interfaces/camino/setup.py000066400000000000000000000006511227300005300214270ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('camino', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/camino/tests/000077500000000000000000000000001227300005300210555ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py000066400000000000000000000045641227300005300262430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import AnalyzeHeader def test_AnalyzeHeader_inputs(): input_map = dict(args=dict(argstr='%s', ), centre=dict(argstr='-centre %s', units='mm', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), datatype=dict(argstr='-datatype %s', mandatory=True, ), description=dict(argstr='-description %s', ), environ=dict(nohash=True, usedefault=True, ), greylevels=dict(argstr='-gl %s', units='NA', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', mandatory=True, position=1, ), initfromheader=dict(argstr='-initfromheader %s', position=3, ), intelbyteorder=dict(argstr='-intelbyteorder', ), networkbyteorder=dict(argstr='-networkbyteorder', ), nimages=dict(argstr='-nimages %d', units='NA', ), offset=dict(argstr='-offset %d', units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), picoseed=dict(argstr='-picoseed %s', units='mm', ), printbigendian=dict(argstr='-printbigendian %s', position=3, ), printimagedims=dict(argstr='-printimagedims %s', position=3, ), printintelbyteorder=dict(argstr='-printintelbyteorder %s', position=3, ), printprogargs=dict(argstr='-printprogargs %s', position=3, ), readheader=dict(argstr='-readheader %s', position=3, ), scaleinter=dict(argstr='-scaleinter %d', units='NA', ), scaleslope=dict(argstr='-scaleslope %d', units='NA', ), scheme_file=dict(argstr='%s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = AnalyzeHeader.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AnalyzeHeader_outputs(): output_map = dict(header=dict(), ) outputs = AnalyzeHeader.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py000066400000000000000000000025431227300005300273530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import ComputeEigensystem def test_ComputeEigensystem_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', usedefault=True, ), inputmodel=dict(argstr='-inputmodel %s', ), maxcomponents=dict(argstr='-maxcomponents %d', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), outputdatatype=dict(argstr='-outputdatatype %s', usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ComputeEigensystem.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ComputeEigensystem_outputs(): output_map = dict(eigen=dict(), ) outputs = ComputeEigensystem.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py000066400000000000000000000025421227300005300312300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import ComputeFractionalAnisotropy def test_ComputeFractionalAnisotropy_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), outputdatatype=dict(argstr='-outputdatatype %s', ), scheme_file=dict(argstr='%s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ComputeFractionalAnisotropy.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ComputeFractionalAnisotropy_outputs(): output_map = dict(fa=dict(), ) outputs = ComputeFractionalAnisotropy.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py000066400000000000000000000025111227300005300301600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import ComputeMeanDiffusivity def test_ComputeMeanDiffusivity_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), outputdatatype=dict(argstr='-outputdatatype %s', ), scheme_file=dict(argstr='%s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ComputeMeanDiffusivity.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ComputeMeanDiffusivity_outputs(): output_map = dict(md=dict(), ) outputs = ComputeMeanDiffusivity.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py000066400000000000000000000024701227300005300273070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import ComputeTensorTrace def test_ComputeTensorTrace_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), outputdatatype=dict(argstr='-outputdatatype %s', ), scheme_file=dict(argstr='%s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ComputeTensorTrace.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ComputeTensorTrace_outputs(): output_map = dict(trace=dict(), ) outputs = ComputeTensorTrace.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_Conmat.py000066400000000000000000000026521227300005300247440ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.connectivity import Conmat def test_Conmat_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), output_root=dict(argstr='-outputroot %s', genfile=True, ), scalar_file=dict(argstr='-scalarfile %s', position=3, requires=['tract_stat'], ), target_file=dict(argstr='-targetfile %s', mandatory=True, position=2, ), targetname_file=dict(argstr='-targetnamefile %s', position=4, ), terminal_output=dict(mandatory=True, nohash=True, ), tract_stat=dict(argstr='-tractstat %s', requires=['scalar_file'], units='NA', ), ) inputs = Conmat.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Conmat_outputs(): output_map = dict(conmat_sc=dict(), conmat_ts=dict(), ) outputs = Conmat.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py000066400000000000000000000022641227300005300247450ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import DT2NIfTI def test_DT2NIfTI_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), header_file=dict(argstr='-header %s', mandatory=True, position=3, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), output_root=dict(argstr='-outputroot %s', genfile=True, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DT2NIfTI.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DT2NIfTI_outputs(): output_map = dict(dt=dict(), exitcode=dict(), lns0=dict(), ) outputs = DT2NIfTI.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_DTIFit.py000066400000000000000000000023301227300005300245770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import DTIFit def test_DTIFit_inputs(): input_map = dict(args=dict(argstr='%s', ), bgmask=dict(argstr='-bgmask %s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), non_linear=dict(argstr='-nonlinear', position=3, ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), scheme_file=dict(argstr='%s', mandatory=True, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DTIFit.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTIFit_outputs(): output_map = dict(tensor_fitted=dict(), ) outputs = DTIFit.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py000066400000000000000000000031501227300005300250430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import DTLUTGen def test_DTLUTGen_inputs(): input_map = dict(acg=dict(argstr='-acg', ), args=dict(argstr='%s', ), bingham=dict(argstr='-bingham', ), environ=dict(nohash=True, usedefault=True, ), frange=dict(argstr='-frange %s', position=1, units='NA', ), ignore_exception=dict(nohash=True, usedefault=True, ), inversion=dict(argstr='-inversion %d', units='NA', ), lrange=dict(argstr='-lrange %s', position=1, units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), samples=dict(argstr='-samples %d', units='NA', ), scheme_file=dict(argstr='-schemefile %s', mandatory=True, position=2, ), snr=dict(argstr='-snr %f', units='NA', ), step=dict(argstr='-step %f', units='NA', ), terminal_output=dict(mandatory=True, nohash=True, ), trace=dict(argstr='-trace %G', units='NA', ), watson=dict(argstr='-watson', ), ) inputs = DTLUTGen.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTLUTGen_outputs(): output_map = dict(dtLUT=dict(), ) outputs = DTLUTGen.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_DTMetric.py000066400000000000000000000024601227300005300251730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import DTMetric def test_DTMetric_inputs(): input_map = dict(args=dict(argstr='%s', ), data_header=dict(argstr='-header %s', ), eigen_data=dict(argstr='-inputfile %s', mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputdatatype=dict(argstr='-inputdatatype %s', usedefault=True, ), metric=dict(argstr='-stat %s', mandatory=True, ), outputdatatype=dict(argstr='-outputdatatype %s', usedefault=True, ), outputfile=dict(argstr='-outputfile %s', genfile=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DTMetric.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTMetric_outputs(): output_map = dict(metric_stats=dict(), ) outputs = DTMetric.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py000066400000000000000000000030561227300005300253550ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import FSL2Scheme def test_FSL2Scheme_inputs(): input_map = dict(args=dict(argstr='%s', ), bscale=dict(argstr='-bscale %d', units='NA', ), bval_file=dict(argstr='-bvalfile %s', mandatory=True, position=2, ), bvec_file=dict(argstr='-bvecfile %s', mandatory=True, position=1, ), diffusiontime=dict(argstr='-diffusiontime %f', units='NA', ), environ=dict(nohash=True, usedefault=True, ), flipx=dict(argstr='-flipx', ), flipy=dict(argstr='-flipy', ), flipz=dict(argstr='-flipz', ), ignore_exception=dict(nohash=True, usedefault=True, ), interleave=dict(argstr='-interleave', ), numscans=dict(argstr='-numscans %d', units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), usegradmod=dict(argstr='-usegradmod', ), ) inputs = FSL2Scheme.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FSL2Scheme_outputs(): output_map = dict(scheme=dict(), ) outputs = FSL2Scheme.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py000066400000000000000000000022361227300005300256430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import Image2Voxel def test_Image2Voxel_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-4dimage %s', mandatory=True, position=1, ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), out_type=dict(argstr='-outputdatatype %s', position=2, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Image2Voxel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Image2Voxel_outputs(): output_map = dict(voxel_order=dict(), ) outputs = Image2Voxel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_LinRecon.py000066400000000000000000000024741227300005300252360ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.odf import LinRecon def test_LinRecon_inputs(): input_map = dict(args=dict(argstr='%s', ), bgmask=dict(argstr='-bgmask %s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), log=dict(argstr='-log', ), normalize=dict(argstr='-normalize', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), qball_mat=dict(argstr='%s', mandatory=True, position=3, ), scheme_file=dict(argstr='%s', mandatory=True, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = LinRecon.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_LinRecon_outputs(): output_map = dict(recon_data=dict(), ) outputs = LinRecon.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_ModelFit.py000066400000000000000000000033621227300005300252250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import ModelFit def test_ModelFit_inputs(): input_map = dict(args=dict(argstr='%s', ), bgmask=dict(argstr='-bgmask %s', ), bgthresh=dict(argstr='-bgthresh %G', ), cfthresh=dict(argstr='-csfthresh %G', ), environ=dict(nohash=True, usedefault=True, ), fixedbvalue=dict(argstr='-fixedbvalue %s', ), fixedmodq=dict(argstr='-fixedmod %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, ), inputdatatype=dict(argstr='-inputdatatype %s', ), model=dict(argstr='-model %s', mandatory=True, ), noisemap=dict(argstr='-noisemap %s', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), outlier=dict(argstr='-outliermap %s', ), outputfile=dict(argstr='-outputfile %s', ), residualmap=dict(argstr='-residualmap %s', ), scheme_file=dict(argstr='-schemefile %s', mandatory=True, ), sigma=dict(argstr='-sigma %G', ), tau=dict(argstr='-tau %G', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ModelFit.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ModelFit_outputs(): output_map = dict(fitted_data=dict(), ) outputs = ModelFit.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py000066400000000000000000000025711227300005300260750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import NIfTIDT2Camino def test_NIfTIDT2Camino_inputs(): input_map = dict(args=dict(argstr='%s', ), bgmask=dict(argstr='-bgmask %s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), lns0_file=dict(argstr='-lns0 %s', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), s0_file=dict(argstr='-s0 %s', ), scaleinter=dict(argstr='-scaleinter %s', ), scaleslope=dict(argstr='-scaleslope %s', ), terminal_output=dict(mandatory=True, nohash=True, ), uppertriangular=dict(argstr='-uppertriangular %s', ), ) inputs = NIfTIDT2Camino.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_NIfTIDT2Camino_outputs(): output_map = dict(out_file=dict(), ) outputs = NIfTIDT2Camino.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py000066400000000000000000000026671227300005300251400ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import PicoPDFs def test_PicoPDFs_inputs(): input_map = dict(args=dict(argstr='%s', ), directmap=dict(argstr='-directmap', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', mandatory=True, position=1, ), inputmodel=dict(argstr='-inputmodel %s', position=2, usedefault=True, ), luts=dict(argstr='-luts %s', mandatory=True, ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), pdf=dict(argstr='-pdf %s', position=4, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = PicoPDFs.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PicoPDFs_outputs(): output_map = dict(pdfs=dict(), ) outputs = PicoPDFs.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py000066400000000000000000000055661227300005300266440ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import ProcStreamlines def test_ProcStreamlines_inputs(): input_map = dict(allowmultitargets=dict(argstr='-allowmultitargets', ), args=dict(argstr='%s', ), datadims=dict(argstr='-datadims %s', units='voxels', ), directional=dict(argstr='-directional %s', units='NA', ), discardloops=dict(argstr='-discardloops', ), endpointfile=dict(argstr='-endpointfile %s', ), environ=dict(nohash=True, usedefault=True, ), exclusionfile=dict(argstr='-exclusionfile %s', ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), iterations=dict(argstr='-iterations %d', units='NA', ), maxtractlength=dict(argstr='-maxtractlength %d', units='mm', ), maxtractpoints=dict(argstr='-maxtractpoints %d', units='NA', ), mintractlength=dict(argstr='-mintractlength %d', units='mm', ), mintractpoints=dict(argstr='-mintractpoints %d', units='NA', ), noresample=dict(argstr='-noresample', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), outputacm=dict(argstr='-outputacm', ), outputcbs=dict(argstr='-outputcbs', ), outputcp=dict(argstr='-outputcp', ), outputroot=dict(argstr='-outputroot %s', ), outputsc=dict(argstr='-outputsc', ), outputtracts=dict(argstr='-outputtracts', ), regionindex=dict(argstr='-regionindex %d', units='mm', ), resamplestepsize=dict(argstr='-resamplestepsize %d', units='NA', ), seedfile=dict(argstr='-seedfile %s', ), seedpointmm=dict(argstr='-seedpointmm %s', units='mm', ), seedpointvox=dict(argstr='-seedpointvox %s', units='voxels', ), targetfile=dict(argstr='-targetfile %s', ), terminal_output=dict(mandatory=True, nohash=True, ), truncateinexclusion=dict(argstr='-truncateinexclusion', ), truncateloops=dict(argstr='-truncateloops', ), voxeldims=dict(argstr='-voxeldims %s', units='mm', ), waypointfile=dict(argstr='-waypointfile %s', ), ) inputs = ProcStreamlines.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ProcStreamlines_outputs(): output_map = dict(proc=dict(), ) outputs = ProcStreamlines.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_QBallMX.py000066400000000000000000000025561227300005300247660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.odf import QBallMX def test_QBallMX_inputs(): input_map = dict(args=dict(argstr='%s', ), basistype=dict(argstr='-basistype %s', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), order=dict(argstr='-order %d', units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), rbfpointset=dict(argstr='-rbfpointset %d', units='NA', ), rbfsigma=dict(argstr='-rbfsigma %f', units='NA', ), scheme_file=dict(argstr='-schemefile %s', mandatory=True, ), smoothingsigma=dict(argstr='-smoothingsigma %f', units='NA', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = QBallMX.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_QBallMX_outputs(): output_map = dict(qmat=dict(), ) outputs = QBallMX.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py000066400000000000000000000030031227300005300250410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.calib import SFLUTGen def test_SFLUTGen_inputs(): input_map = dict(args=dict(argstr='%s', ), binincsize=dict(argstr='-binincsize %d', units='NA', ), directmap=dict(argstr='-directmap', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, ), info_file=dict(argstr='-infofile %s', mandatory=True, ), minvectsperbin=dict(argstr='-minvectsperbin %d', units='NA', ), order=dict(argstr='-order %d', units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), outputstem=dict(argstr='-outputstem %s', usedefault=True, ), pdf=dict(argstr='-pdf %s', usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SFLUTGen.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SFLUTGen_outputs(): output_map = dict(lut_one_fibre=dict(), lut_two_fibres=dict(), ) outputs = SFLUTGen.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py000066400000000000000000000037001227300005300262260ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.calib import SFPICOCalibData def test_SFPICOCalibData_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), info_file=dict(argstr='-infooutputfile %s', genfile=True, hash_files=False, mandatory=True, ), onedtfarange=dict(argstr='-onedtfarange %s', units='NA', ), onedtfastep=dict(argstr='-onedtfastep %f', units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), scheme_file=dict(argstr='-schemefile %s', mandatory=True, ), seed=dict(argstr='-seed %f', units='NA', ), snr=dict(argstr='-snr %f', units='NA', ), terminal_output=dict(mandatory=True, nohash=True, ), trace=dict(argstr='-trace %f', units='NA', ), twodtanglerange=dict(argstr='-twodtanglerange %s', units='NA', ), twodtanglestep=dict(argstr='-twodtanglestep %f', units='NA', ), twodtfarange=dict(argstr='-twodtfarange %s', units='NA', ), twodtfastep=dict(argstr='-twodtfastep %f', units='NA', ), twodtmixmax=dict(argstr='-twodtmixmax %f', units='NA', ), twodtmixstep=dict(argstr='-twodtmixstep %f', units='NA', ), ) inputs = SFPICOCalibData.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SFPICOCalibData_outputs(): output_map = dict(PICOCalib=dict(), calib_info=dict(), ) outputs = SFPICOCalibData.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_SFPeaks.py000066400000000000000000000034271227300005300250200ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.odf import SFPeaks def test_SFPeaks_inputs(): input_map = dict(args=dict(argstr='%s', ), density=dict(argstr='-density %d', units='NA', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, ), inputmodel=dict(argstr='-inputmodel %s', mandatory=True, ), mepointset=dict(argstr='-mepointset %d', units='NA', ), noconsistencycheck=dict(argstr='-noconsistencycheck', ), numpds=dict(argstr='-numpds %d', units='NA', ), order=dict(argstr='-order %d', units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), pdthresh=dict(argstr='-pdthresh %f', units='NA', ), pointset=dict(argstr='-pointset %d', units='NA', ), rbfpointset=dict(argstr='-rbfpointset %d', units='NA', ), scheme_file=dict(argstr='%s', ), searchradius=dict(argstr='-searchradius %f', units='NA', ), stdsfrommean=dict(argstr='-stdsfrommean %f', units='NA', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SFPeaks.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SFPeaks_outputs(): output_map = dict(peaks=dict(), ) outputs = SFPeaks.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_Track.py000066400000000000000000000035501227300005300245650ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import Track def test_Track_inputs(): input_map = dict(anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), environ=dict(nohash=True, usedefault=True, ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), ipthresh=dict(argstr='-ipthresh %f', ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict(argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict(argstr='-seedfile %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = Track.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Track_outputs(): output_map = dict(tracked=dict(), ) outputs = Track.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py000066400000000000000000000036251227300005300263610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import TrackBallStick def test_TrackBallStick_inputs(): input_map = dict(anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), environ=dict(nohash=True, usedefault=True, ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), ipthresh=dict(argstr='-ipthresh %f', ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict(argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict(argstr='-seedfile %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = TrackBallStick.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TrackBallStick_outputs(): output_map = dict(tracked=dict(), ) outputs = TrackBallStick.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py000066400000000000000000000046231227300005300265160ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import TrackBayesDirac def test_TrackBayesDirac_inputs(): input_map = dict(anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curvepriorg=dict(argstr='-curvepriorg %G', ), curvepriork=dict(argstr='-curvepriork %G', ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), datamodel=dict(argstr='-datamodel %s', ), environ=dict(nohash=True, usedefault=True, ), extpriordatatype=dict(argstr='-extpriordatatype %s', ), extpriorfile=dict(argstr='-extpriorfile %s', ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), ipthresh=dict(argstr='-ipthresh %f', ), iterations=dict(argstr='-iterations %d', units='NA', ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict(argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), pdf=dict(argstr='-pdf %s', ), pointset=dict(argstr='-pointset %s', ), scheme_file=dict(argstr='-schemefile %s', mandatory=True, ), seed_file=dict(argstr='-seedfile %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = TrackBayesDirac.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TrackBayesDirac_outputs(): output_map = dict(tracked=dict(), ) outputs = TrackBayesDirac.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py000066400000000000000000000043111227300005300264570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import TrackBootstrap def test_TrackBootstrap_inputs(): input_map = dict(anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), bgmask=dict(argstr='-bgmask %s', ), bsdatafiles=dict(argstr='-bsdatafile %s', mandatory=True, ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), environ=dict(nohash=True, usedefault=True, ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), inversion=dict(argstr='-inversion %s', ), ipthresh=dict(argstr='-ipthresh %f', ), iterations=dict(argstr='-iterations %d', units='NA', ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict(argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), scheme_file=dict(argstr='-schemefile %s', mandatory=True, ), seed_file=dict(argstr='-seedfile %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = TrackBootstrap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TrackBootstrap_outputs(): output_map = dict(tracked=dict(), ) outputs = TrackBootstrap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_TrackDT.py000066400000000000000000000035621227300005300250200ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import TrackDT def test_TrackDT_inputs(): input_map = dict(anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), environ=dict(nohash=True, usedefault=True, ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), ipthresh=dict(argstr='-ipthresh %f', ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict(argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict(argstr='-seedfile %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = TrackDT.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TrackDT_outputs(): output_map = dict(tracked=dict(), ) outputs = TrackDT.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_TrackPICo.py000066400000000000000000000037461227300005300253070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.dti import TrackPICo def test_TrackPICo_inputs(): input_map = dict(anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), environ=dict(nohash=True, usedefault=True, ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', mandatory=True, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), ipthresh=dict(argstr='-ipthresh %f', ), iterations=dict(argstr='-iterations %d', units='NA', ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict(argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), pdf=dict(argstr='-pdf %s', ), seed_file=dict(argstr='-seedfile %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = TrackPICo.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TrackPICo_outputs(): output_map = dict(tracked=dict(), ) outputs = TrackPICo.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_TractShredder.py000066400000000000000000000024221227300005300262540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import TractShredder def test_TractShredder_inputs(): input_map = dict(args=dict(argstr='%s', ), bunchsize=dict(argstr='%d', position=2, units='NA', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', mandatory=True, position=-2, ), offset=dict(argstr='%d', position=1, units='NA', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), space=dict(argstr='%d', position=3, units='NA', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = TractShredder.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TractShredder_outputs(): output_map = dict(shredded=dict(), ) outputs = TractShredder.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py000066400000000000000000000031241227300005300264710ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino.convert import VtkStreamlines def test_VtkStreamlines_inputs(): input_map = dict(args=dict(argstr='%s', ), colourorient=dict(argstr='-colourorient', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr=' < %s', mandatory=True, position=-2, ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), interpolate=dict(argstr='-interpolate', ), interpolatescalars=dict(argstr='-interpolatescalars', ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), scalar_file=dict(argstr='-scalarfile %s', position=3, ), seed_file=dict(argstr='-seedfile %s', position=1, ), target_file=dict(argstr='-targetfile %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxeldims=dict(argstr='-voxeldims %s', position=4, units='mm', ), ) inputs = VtkStreamlines.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_VtkStreamlines_outputs(): output_map = dict(vtk=dict(), ) outputs = VtkStreamlines.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino2trackvis/000077500000000000000000000000001227300005300215445ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/camino2trackvis/__init__.py000066400000000000000000000003241227300005300236540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Camino2Trackvis top level namespace """ from .convert import Camino2Trackvis, Trackvis2Camino nipype-0.9.2/nipype/interfaces/camino2trackvis/convert.py000066400000000000000000000115031227300005300235760ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File from nipype.utils.filemanip import split_filename import os """Provides interfaces to various commands provided by Camino-Trackvis """ class Camino2TrackvisInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1, desc='The input .Bfloat (camino) file.') out_file = File(argstr='-o %s', genfile=True, position=2, desc='The filename to which to write the .trk (trackvis) file.') min_length = traits.Float(argstr='-l %d', position=3, units='mm', desc='The minimum length of tracts to output') data_dims = traits.List(traits.Int, argstr='-d %s', sep=',', mandatory=True, position=4, minlen=3, maxlen=3, desc='Three comma-separated integers giving the number of voxels along each dimension of the source scans.') voxel_dims = traits.List(traits.Float, argstr='-x %s', sep=',', mandatory=True, position=5, minlen=3, maxlen=3, desc='Three comma-separated numbers giving the size of each voxel in mm.') #Change to enum with all combinations? i.e. LAS, LPI, RAS, etc.. voxel_order = File(argstr='--voxel-order %s', mandatory=True, position=6, desc='Set the order in which various directions were stored.\ Specify with three letters consisting of one each \ from the pairs LR, AP, and SI. These stand for Left-Right, \ Anterior-Posterior, and Superior-Inferior. \ Whichever is specified in each position will \ be the direction of increasing order. \ Read coordinate system from a NIfTI file.') nifti_file = File(argstr='--nifti %s', exists=True, position=7, desc='Read coordinate system from a NIfTI file.') class Camino2TrackvisOutputSpec(TraitedSpec): trackvis = File(exists=True, desc='The filename to which to write the .trk (trackvis) file.') class Camino2Trackvis(CommandLine): """ Wraps camino_to_trackvis from Camino-Trackvis Convert files from camino .Bfloat format to trackvis .trk format. Example ------- >>> import nipype.interfaces.camino2trackvis as cam2trk >>> c2t = cam2trk.Camino2Trackvis() >>> c2t.inputs.in_file = 'data.Bfloat' >>> c2t.inputs.out_file = 'streamlines.trk' >>> c2t.inputs.min_length = 30 >>> c2t.inputs.data_dims = [128, 104, 64] >>> c2t.inputs.voxel_dims = [2.0, 2.0, 2.0] >>> c2t.inputs.voxel_order = 'LAS' >>> c2t.run() # doctest: +SKIP """ _cmd = 'camino_to_trackvis' input_spec=Camino2TrackvisInputSpec output_spec=Camino2TrackvisOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['trackvis'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '.trk' class Trackvis2CaminoInputSpec(CommandLineInputSpec): """ Wraps trackvis_to_camino from Camino-Trackvis Convert files from camino .Bfloat format to trackvis .trk format. Example ------- >>> import nipype.interfaces.camino2trackvis as cam2trk >>> t2c = cam2trk.Trackvis2Camino() >>> t2c.inputs.in_file = 'streamlines.trk' >>> t2c.inputs.out_file = 'streamlines.Bfloat' >>> t2c.run() # doctest: +SKIP """ in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1, desc='The input .trk (trackvis) file.') out_file = File(argstr='-o %s', genfile=True, position=2, desc='The filename to which to write the .Bfloat (camino).') append_file = File(exists=True, argstr='-a %s', position=2, desc='A file to which the append the .Bfloat data. ') class Trackvis2CaminoOutputSpec(TraitedSpec): camino = File(exists=True, desc='The filename to which to write the .Bfloat (camino).') class Trackvis2Camino(CommandLine): _cmd = 'trackvis_to_camino' input_spec=Trackvis2CaminoInputSpec output_spec=Trackvis2CaminoOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['camino'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '.Bfloat' nipype-0.9.2/nipype/interfaces/camino2trackvis/setup.py000066400000000000000000000007241227300005300232610ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('camino2trackvis', parent_package, top_path) #config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/camino2trackvis/tests/000077500000000000000000000000001227300005300227065ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py000066400000000000000000000027641227300005300303570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino2trackvis.convert import Camino2Trackvis def test_Camino2Trackvis_inputs(): input_map = dict(args=dict(argstr='%s', ), data_dims=dict(argstr='-d %s', mandatory=True, position=4, sep=',', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=1, ), min_length=dict(argstr='-l %d', position=3, units='mm', ), nifti_file=dict(argstr='--nifti %s', position=7, ), out_file=dict(argstr='-o %s', genfile=True, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-x %s', mandatory=True, position=5, sep=',', ), voxel_order=dict(argstr='--voxel-order %s', mandatory=True, position=6, ), ) inputs = Camino2Trackvis.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Camino2Trackvis_outputs(): output_map = dict(trackvis=dict(), ) outputs = Camino2Trackvis.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py000066400000000000000000000022211227300005300303430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.camino2trackvis.convert import Trackvis2Camino def test_Trackvis2Camino_inputs(): input_map = dict(append_file=dict(argstr='-a %s', position=2, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=1, ), out_file=dict(argstr='-o %s', genfile=True, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Trackvis2Camino.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Trackvis2Camino_outputs(): output_map = dict(camino=dict(), ) outputs = Trackvis2Camino.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/000077500000000000000000000000001227300005300174035ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/cmtk/__init__.py000066400000000000000000000003431227300005300215140ustar00rootroot00000000000000from .cmtk import ROIGen, CreateMatrix, CreateNodes from .nx import NetworkXMetrics, AverageNetworks from .parcellation import Parcellate from .convert import CFFConverter, MergeCNetworks from .nbs import NetworkBasedStatistic nipype-0.9.2/nipype/interfaces/cmtk/cmtk.py000066400000000000000000001121121227300005300207110ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, InputMultiPath, Directory, OutputMultiPath, isdefined) from nipype.utils.filemanip import split_filename import pickle import scipy.io as sio import os, os.path as op import numpy as np import nibabel as nb import networkx as nx import sys from ... import logging iflogger = logging.getLogger('interface') def length(xyz, along=False): """ Euclidean length of track line Parameters ---------- xyz : array-like shape (N,3) array representing x,y,z of N points in a track along : bool, optional If True, return array giving cumulative length along track, otherwise (default) return scalar giving total length. Returns ------- L : scalar or array shape (N-1,) scalar in case of `along` == False, giving total length, array if `along` == True, giving cumulative lengths. Examples -------- >>> xyz = np.array([[1,1,1],[2,3,4],[0,0,0]]) >>> expected_lens = np.sqrt([1+2**2+3**2, 2**2+3**2+4**2]) >>> length(xyz) == expected_lens.sum() True >>> len_along = length(xyz, along=True) >>> np.allclose(len_along, expected_lens.cumsum()) True >>> length([]) 0 >>> length([[1, 2, 3]]) 0 >>> length([], along=True) array([0]) """ xyz = np.asarray(xyz) if xyz.shape[0] < 2: if along: return np.array([0]) return 0 dists = np.sqrt((np.diff(xyz, axis=0) ** 2).sum(axis=1)) if along: return np.cumsum(dists) return np.sum(dists) def get_rois_crossed(pointsmm, roiData, voxelSize): n_points = len(pointsmm) rois_crossed = [] for j in xrange(0, n_points): # store point x = int(pointsmm[j, 0] / float(voxelSize[0])) y = int(pointsmm[j, 1] / float(voxelSize[1])) z = int(pointsmm[j, 2] / float(voxelSize[2])) if not roiData[x, y, z] == 0: rois_crossed.append(roiData[x, y, z]) rois_crossed = dict.fromkeys(rois_crossed).keys() #Removed duplicates from the list return rois_crossed def get_connectivity_matrix(n_rois, list_of_roi_crossed_lists): connectivity_matrix = np.zeros((n_rois, n_rois), dtype=np.uint) for rois_crossed in list_of_roi_crossed_lists: for idx_i, roi_i in enumerate(rois_crossed): for idx_j, roi_j in enumerate(rois_crossed): if idx_i > idx_j: if not roi_i == roi_j: connectivity_matrix[roi_i - 1, roi_j - 1] += 1 connectivity_matrix = connectivity_matrix + connectivity_matrix.T return connectivity_matrix def create_allpoints_cmat(streamlines, roiData, voxelSize, n_rois): """ Create the intersection arrays for each fiber """ n_fib = len(streamlines) pc = -1 # Computation for each fiber final_fiber_ids = [] list_of_roi_crossed_lists = [] for i, fiber in enumerate(streamlines): pcN = int(round(float(100 * i) / n_fib)) if pcN > pc and pcN % 1 == 0: pc = pcN print '%4.0f%%' % (pc) rois_crossed = get_rois_crossed(fiber[0], roiData, voxelSize) if len(rois_crossed) > 0: list_of_roi_crossed_lists.append(list(rois_crossed)) final_fiber_ids.append(i) connectivity_matrix = get_connectivity_matrix(n_rois, list_of_roi_crossed_lists) dis = n_fib - len(final_fiber_ids) iflogger.info("Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)" % (dis, dis * 100.0 / n_fib, n_fib)) iflogger.info("Valid fibers: %i (%f percent)" % (n_fib - dis, 100 - dis * 100.0 / n_fib)) iflogger.info('Returning the intersecting point connectivity matrix') return connectivity_matrix, final_fiber_ids def create_endpoints_array(fib, voxelSize): """ Create the endpoints arrays for each fiber Parameters ---------- fib: the fibers data voxelSize: 3-tuple containing the voxel size of the ROI image Returns ------- (endpoints: matrix of size [#fibers, 2, 3] containing for each fiber the index of its first and last point in the voxelSize volume endpointsmm) : endpoints in milimeter coordinates """ # Init n = len(fib) endpoints = np.zeros((n, 2, 3)) endpointsmm = np.zeros((n, 2, 3)) pc = -1 # Computation for each fiber for i, fi in enumerate(fib): f = fi[0] # store startpoint endpoints[i, 0, :] = f[0, :] # store endpoint endpoints[i, 1, :] = f[-1, :] # store startpoint endpointsmm[i, 0, :] = f[0, :] # store endpoint endpointsmm[i, 1, :] = f[-1, :] # Translate from mm to index endpoints[i, 0, 0] = int(endpoints[i, 0, 0] / float(voxelSize[0])) endpoints[i, 0, 1] = int(endpoints[i, 0, 1] / float(voxelSize[1])) endpoints[i, 0, 2] = int(endpoints[i, 0, 2] / float(voxelSize[2])) endpoints[i, 1, 0] = int(endpoints[i, 1, 0] / float(voxelSize[0])) endpoints[i, 1, 1] = int(endpoints[i, 1, 1] / float(voxelSize[1])) endpoints[i, 1, 2] = int(endpoints[i, 1, 2] / float(voxelSize[2])) # Return the matrices iflogger.info('Returning the endpoint matrix') return (endpoints, endpointsmm) def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_name, endpoint_name, intersections=False): """ Create the connection matrix for each resolution using fibers and ROIs. """ stats = {} iflogger.info('Running cmat function') # Identify the endpoints of each fiber en_fname = op.abspath(endpoint_name + '_endpoints.npy') en_fnamemm = op.abspath(endpoint_name + '_endpointsmm.npy') iflogger.info('Reading Trackvis file {trk}'.format(trk=track_file)) fib, hdr = nb.trackvis.read(track_file, False) stats['orig_n_fib'] = len(fib) roi = nb.load(roi_file) roiData = roi.get_data() roiVoxelSize = roi.get_header().get_zooms() (endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize) # Output endpoint arrays iflogger.info('Saving endpoint array: {array}'.format(array=en_fname)) np.save(en_fname, endpoints) iflogger.info('Saving endpoint array in mm: {array}'.format(array=en_fnamemm)) np.save(en_fnamemm, endpointsmm) n = len(fib) iflogger.info('Number of fibers {num}'.format(num=n)) # Create empty fiber label array fiberlabels = np.zeros((n, 2)) final_fiberlabels = [] final_fibers_idx = [] # Add node information from specified parcellation scheme path, name, ext = split_filename(resolution_network_file) if ext == '.pck': gp = nx.read_gpickle(resolution_network_file) elif ext == '.graphml': gp = nx.read_graphml(resolution_network_file) nROIs = len(gp.nodes()) # add node information from parcellation if gp.node[gp.nodes()[0]].has_key('dn_position'): G = gp.copy() else: G = nx.Graph() for u, d in gp.nodes_iter(data=True): G.add_node(int(u), d) # compute a position for the node based on the mean position of the # ROI in voxel coordinates (segmentation volume ) xyz = tuple(np.mean(np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])) , axis=1)) G.node[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) if intersections: iflogger.info("Filtering tractography from intersections") intersection_matrix, final_fiber_ids = create_allpoints_cmat(fib, roiData, roiVoxelSize, nROIs) finalfibers_fname = op.abspath(endpoint_name + '_intersections_streamline_final.trk') stats['intersections_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, final_fiber_ids) intersection_matrix = np.matrix(intersection_matrix) I = G.copy() H = nx.from_numpy_matrix(np.matrix(intersection_matrix)) H = nx.relabel_nodes(H, lambda x: x + 1) #relabel nodes so they start at 1 I.add_weighted_edges_from(((u, v, d['weight']) for u, v, d in H.edges(data=True))) dis = 0 for i in xrange(endpoints.shape[0]): # ROI start => ROI end try: startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1], endpoints[i, 0, 2]]) endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1], endpoints[i, 1, 2]]) except IndexError: iflogger.error(("AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. PLEASE CHECK ENDPOINT GENERATION" % i)) break # Filter if startROI == 0 or endROI == 0: dis += 1 fiberlabels[i, 0] = -1 continue if startROI > nROIs or endROI > nROIs: iflogger.error("Start or endpoint of fiber terminate in a voxel which is labeled higher") iflogger.error("than is expected by the parcellation node information.") iflogger.error("Start ROI: %i, End ROI: %i" % (startROI, endROI)) iflogger.error("This needs bugfixing!") continue # Update fiber label # switch the rois in order to enforce startROI < endROI if endROI < startROI: tmp = startROI startROI = endROI endROI = tmp fiberlabels[i, 0] = startROI fiberlabels[i, 1] = endROI final_fiberlabels.append([ startROI, endROI ]) final_fibers_idx.append(i) # Add edge to graph if G.has_edge(startROI, endROI) and G.edge[startROI][endROI].has_key('fiblist'): G.edge[startROI][endROI]['fiblist'].append(i) else: G.add_edge(startROI, endROI, fiblist=[i]) # create a final fiber length array finalfiberlength = [] if intersections: final_fibers_indices = final_fiber_ids else: final_fibers_indices = final_fibers_idx for idx in final_fibers_indices: # compute length of fiber finalfiberlength.append(length(fib[idx][0])) # convert to array final_fiberlength_array = np.array(finalfiberlength) # make final fiber labels as array final_fiberlabels_array = np.array(final_fiberlabels, dtype=int) iflogger.info("Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)" % (dis, dis * 100.0 / n, n)) iflogger.info("Valid fibers: %i (%f percent)" % (n - dis, 100 - dis * 100.0 / n)) numfib = nx.Graph() numfib.add_nodes_from(G) fibmean = numfib.copy() fibmedian = numfib.copy() fibdev = numfib.copy() for u, v, d in G.edges_iter(data=True): G.remove_edge(u, v) di = {} if d.has_key('fiblist'): di['number_of_fibers'] = len(d['fiblist']) idx = np.where((final_fiberlabels_array[:, 0] == int(u)) & (final_fiberlabels_array[:, 1] == int(v)))[0] di['fiber_length_mean'] = float(np.mean(final_fiberlength_array[idx])) di['fiber_length_median'] = float(np.median(final_fiberlength_array[idx])) di['fiber_length_std'] = float(np.std(final_fiberlength_array[idx])) else: di['number_of_fibers'] = 0 di['fiber_length_mean'] = 0 di['fiber_length_median'] = 0 di['fiber_length_std'] = 0 if not u == v: #Fix for self loop problem G.add_edge(u, v, di) if d.has_key('fiblist'): numfib.add_edge(u, v, weight=di['number_of_fibers']) fibmean.add_edge(u, v, weight=di['fiber_length_mean']) fibmedian.add_edge(u, v, weight=di['fiber_length_median']) fibdev.add_edge(u, v, weight=di['fiber_length_std']) iflogger.info('Writing network as {ntwk}'.format(ntwk=matrix_name)) nx.write_gpickle(G, op.abspath(matrix_name)) numfib_mlab = nx.to_numpy_matrix(numfib, dtype=int) numfib_dict = {'number_of_fibers': numfib_mlab} fibmean_mlab = nx.to_numpy_matrix(fibmean, dtype=np.float64) fibmean_dict = {'mean_fiber_length':fibmean_mlab} fibmedian_mlab = nx.to_numpy_matrix(fibmedian, dtype=np.float64) fibmedian_dict = {'median_fiber_length':fibmedian_mlab} fibdev_mlab = nx.to_numpy_matrix(fibdev, dtype=np.float64) fibdev_dict = {'fiber_length_std':fibdev_mlab} if intersections: path, name, ext = split_filename(matrix_name) intersection_matrix_name = op.abspath(name + '_intersections') + ext iflogger.info('Writing intersection network as {ntwk}'.format(ntwk=intersection_matrix_name)) nx.write_gpickle(I, intersection_matrix_name) path, name, ext = split_filename(matrix_mat_name) if not ext == '.mat': ext = '.mat' matrix_mat_name = matrix_mat_name + ext iflogger.info('Writing matlab matrix as {mat}'.format(mat=matrix_mat_name)) sio.savemat(matrix_mat_name, numfib_dict) if intersections: intersect_dict = {'intersections': intersection_matrix} intersection_matrix_mat_name = op.abspath(name + '_intersections') + ext iflogger.info('Writing intersection matrix as {mat}'.format(mat=intersection_matrix_mat_name)) sio.savemat(intersection_matrix_mat_name, intersect_dict) mean_fiber_length_matrix_name = op.abspath(name + '_mean_fiber_length') + ext iflogger.info('Writing matlab mean fiber length matrix as {mat}'.format(mat=mean_fiber_length_matrix_name)) sio.savemat(mean_fiber_length_matrix_name, fibmean_dict) median_fiber_length_matrix_name = op.abspath(name + '_median_fiber_length') + ext iflogger.info('Writing matlab median fiber length matrix as {mat}'.format(mat=median_fiber_length_matrix_name)) sio.savemat(median_fiber_length_matrix_name, fibmedian_dict) fiber_length_std_matrix_name = op.abspath(name + '_fiber_length_std') + ext iflogger.info('Writing matlab fiber length deviation matrix as {mat}'.format(mat=fiber_length_std_matrix_name)) sio.savemat(fiber_length_std_matrix_name, fibdev_dict) fiberlengths_fname = op.abspath(endpoint_name + '_final_fiberslength.npy') iflogger.info("Storing final fiber length array as %s" % fiberlengths_fname) np.save(fiberlengths_fname, final_fiberlength_array) fiberlabels_fname = op.abspath(endpoint_name + '_filtered_fiberslabel.npy') iflogger.info("Storing all fiber labels (with orphans) as %s" % fiberlabels_fname) np.save(fiberlabels_fname, np.array(fiberlabels, dtype=np.int32),) fiberlabels_noorphans_fname = op.abspath(endpoint_name + '_final_fiberslabels.npy') iflogger.info("Storing final fiber labels (no orphans) as %s" % fiberlabels_noorphans_fname) np.save(fiberlabels_noorphans_fname, final_fiberlabels_array) iflogger.info("Filtering tractography - keeping only no orphan fibers") finalfibers_fname = op.abspath(endpoint_name + '_streamline_final.trk') stats['endpoint_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, final_fibers_idx) stats['endpoints_percent'] = float(stats['endpoint_n_fib'])/float(stats['orig_n_fib'])*100 stats['intersections_percent'] = float(stats['intersections_n_fib'])/float(stats['orig_n_fib'])*100 out_stats_file = op.abspath(endpoint_name + '_statistics.mat') iflogger.info("Saving matrix creation statistics as %s" % out_stats_file) sio.savemat(out_stats_file, stats) def save_fibers(oldhdr, oldfib, fname, indices): """ Stores a new trackvis file fname using only given indices """ hdrnew = oldhdr.copy() outstreams = [] for i in indices: outstreams.append(oldfib[i]) n_fib_out = len(outstreams) hdrnew['n_count'] = n_fib_out iflogger.info("Writing final non-orphan fibers as %s" % fname) nb.trackvis.write(fname, outstreams, hdrnew) return n_fib_out class CreateMatrixInputSpec(TraitedSpec): roi_file = File(exists=True, mandatory=True, desc='Freesurfer aparc+aseg file') tract_file = File(exists=True, mandatory=True, desc='Trackvis tract file') resolution_network_file = File(exists=True, mandatory=True, desc='Parcellation files from Connectome Mapping Toolkit') count_region_intersections = traits.Bool(False, usedefault=True, desc='Counts all of the fiber-region traversals in the connectivity matrix (requires significantly more computational time)') out_matrix_file = File(genfile=True, desc='NetworkX graph describing the connectivity') out_matrix_mat_file = File('cmatrix.mat', usedefault=True, desc='Matlab matrix describing the connectivity') out_mean_fiber_length_matrix_mat_file = File(genfile=True, desc='Matlab matrix describing the mean fiber lengths between each node.') out_median_fiber_length_matrix_mat_file = File(genfile=True, desc='Matlab matrix describing the mean fiber lengths between each node.') out_fiber_length_std_matrix_mat_file = File(genfile=True, desc='Matlab matrix describing the deviation in fiber lengths connecting each node.') out_intersection_matrix_mat_file = File(genfile=True, desc='Matlab connectivity matrix if all region/fiber intersections are counted.') out_endpoint_array_name = File(genfile=True, desc='Name for the generated endpoint arrays') class CreateMatrixOutputSpec(TraitedSpec): matrix_file = File(desc='NetworkX graph describing the connectivity', exists=True) intersection_matrix_file = File(desc='NetworkX graph describing the connectivity', exists=True) matrix_files = OutputMultiPath(File(desc='All of the gpickled network files output by this interface', exists=True)) matlab_matrix_files = OutputMultiPath(File(desc='All of the MATLAB .mat files output by this interface', exists=True)) matrix_mat_file = File(desc='Matlab matrix describing the connectivity', exists=True) intersection_matrix_mat_file = File(desc='Matlab matrix describing the mean fiber lengths between each node.', exists=True) mean_fiber_length_matrix_mat_file = File(desc='Matlab matrix describing the mean fiber lengths between each node.', exists=True) median_fiber_length_matrix_mat_file = File(desc='Matlab matrix describing the median fiber lengths between each node.', exists=True) fiber_length_std_matrix_mat_file = File(desc='Matlab matrix describing the deviation in fiber lengths connecting each node.', exists=True) endpoint_file = File(desc='Saved Numpy array with the endpoints of each fiber', exists=True) endpoint_file_mm = File(desc='Saved Numpy array with the endpoints of each fiber (in millimeters)', exists=True) fiber_length_file = File(desc='Saved Numpy array with the lengths of each fiber', exists=True) fiber_label_file = File(desc='Saved Numpy array with the labels for each fiber', exists=True) fiber_labels_noorphans = File(desc='Saved Numpy array with the labels for each non-orphan fiber', exists=True) filtered_tractography = File(desc='TrackVis file containing only those fibers originate in one and terminate in another region', exists=True) filtered_tractography_by_intersections = File(desc='TrackVis file containing all fibers which connect two regions', exists=True) filtered_tractographies = OutputMultiPath(File(desc='TrackVis file containing only those fibers originate in one and terminate in another region', exists=True)) stats_file = File(desc='Saved Matlab .mat file with the number of fibers saved at each stage', exists=True) class CreateMatrix(BaseInterface): """ Performs connectivity mapping and outputs the result as a NetworkX graph and a Matlab matrix Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> conmap = cmtk.CreateMatrix() >>> conmap.roi_file = 'fsLUT_aparc+aseg.nii' >>> conmap.tract_file = 'fibers.trk' >>> conmap.run() # doctest: +SKIP """ input_spec = CreateMatrixInputSpec output_spec = CreateMatrixOutputSpec def _run_interface(self, runtime): if isdefined(self.inputs.out_matrix_file): path, name, _ = split_filename(self.inputs.out_matrix_file) matrix_file = op.abspath(name + '.pck') else: matrix_file = self._gen_outfilename('.pck') matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file) path, name, ext = split_filename(matrix_mat_file) if not ext == '.mat': ext = '.mat' matrix_mat_file = matrix_mat_file + ext if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file): mean_fiber_length_matrix_mat_file = op.abspath(self.inputs.out_mean_fiber_length_matrix_mat_file) else: mean_fiber_length_matrix_name = op.abspath(self._gen_outfilename('_mean_fiber_length.mat')) if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file): median_fiber_length_matrix_mat_file = op.abspath(self.inputs.out_median_fiber_length_matrix_mat_file) else: median_fiber_length_matrix_name = op.abspath(self._gen_outfilename('_median_fiber_length.mat')) if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file): fiber_length_std_matrix_mat_file = op.abspath(self.inputs.out_fiber_length_std_matrix_mat_file) else: fiber_length_std_matrix_name = op.abspath(self._gen_outfilename('_fiber_length_std.mat')) if not isdefined(self.inputs.out_endpoint_array_name): _, endpoint_name , _ = split_filename(self.inputs.tract_file) endpoint_name = op.abspath(endpoint_name) else: endpoint_name = op.abspath(self.inputs.out_endpoint_array_name) cmat(self.inputs.tract_file, self.inputs.roi_file, self.inputs.resolution_network_file, matrix_file, matrix_mat_file, endpoint_name, self.inputs.count_region_intersections) return runtime def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_matrix_file): path, name, _ = split_filename(self.inputs.out_matrix_file) out_matrix_file = op.abspath(name + '.pck') out_intersection_matrix_file = op.abspath(name + '_intersections.pck') else: out_matrix_file = op.abspath(self._gen_outfilename('.pck')) out_intersection_matrix_file = op.abspath(self._gen_outfilename('_intersections.pck')) outputs['matrix_file'] = out_matrix_file outputs['intersection_matrix_file'] = out_intersection_matrix_file matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file) path, name, ext = split_filename(matrix_mat_file) if not ext == '.mat': ext = '.mat' matrix_mat_file = matrix_mat_file + ext outputs['matrix_mat_file'] = matrix_mat_file if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file): outputs['mean_fiber_length_matrix_mat_file'] = op.abspath(self.inputs.out_mean_fiber_length_matrix_mat_file) else: outputs['mean_fiber_length_matrix_mat_file'] = op.abspath(self._gen_outfilename('_mean_fiber_length.mat')) if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file): outputs['median_fiber_length_matrix_mat_file'] = op.abspath(self.inputs.out_median_fiber_length_matrix_mat_file) else: outputs['median_fiber_length_matrix_mat_file'] = op.abspath(self._gen_outfilename('_median_fiber_length.mat')) if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file): outputs['fiber_length_std_matrix_mat_file'] = op.abspath(self.inputs.out_fiber_length_std_matrix_mat_file) else: outputs['fiber_length_std_matrix_mat_file'] = op.abspath(self._gen_outfilename('_fiber_length_std.mat')) if isdefined(self.inputs.out_intersection_matrix_mat_file): outputs['intersection_matrix_mat_file'] = op.abspath(self.inputs.out_intersection_matrix_mat_file) else: outputs['intersection_matrix_mat_file'] = op.abspath(self._gen_outfilename('_intersections.mat')) if isdefined(self.inputs.out_endpoint_array_name): endpoint_name = self.inputs.out_endpoint_array_name outputs['endpoint_file'] = op.abspath(self.inputs.out_endpoint_array_name + '_endpoints.npy') outputs['endpoint_file_mm'] = op.abspath(self.inputs.out_endpoint_array_name + '_endpointsmm.npy') outputs['fiber_length_file'] = op.abspath(self.inputs.out_endpoint_array_name + '_final_fiberslength.npy') outputs['fiber_label_file'] = op.abspath(self.inputs.out_endpoint_array_name + '_filtered_fiberslabel.npy') outputs['fiber_labels_noorphans'] = op.abspath(self.inputs.out_endpoint_array_name + '_final_fiberslabels.npy') else: _, endpoint_name , _ = split_filename(self.inputs.tract_file) outputs['endpoint_file'] = op.abspath(endpoint_name + '_endpoints.npy') outputs['endpoint_file_mm'] = op.abspath(endpoint_name + '_endpointsmm.npy') outputs['fiber_length_file'] = op.abspath(endpoint_name + '_final_fiberslength.npy') outputs['fiber_label_file'] = op.abspath(endpoint_name + '_filtered_fiberslabel.npy') outputs['fiber_labels_noorphans'] = op.abspath(endpoint_name + '_final_fiberslabels.npy') if self.inputs.count_region_intersections: outputs['matrix_files'] = [out_matrix_file, out_intersection_matrix_file] outputs['matlab_matrix_files'] = [outputs['matrix_mat_file'], outputs['mean_fiber_length_matrix_mat_file'], outputs['median_fiber_length_matrix_mat_file'], outputs['fiber_length_std_matrix_mat_file'], outputs['intersection_matrix_mat_file']] else: outputs['matrix_files'] = [out_matrix_file] outputs['matlab_matrix_files'] = [outputs['matrix_mat_file'], outputs['mean_fiber_length_matrix_mat_file'], outputs['median_fiber_length_matrix_mat_file'], outputs['fiber_length_std_matrix_mat_file']] outputs['filtered_tractography'] = op.abspath(endpoint_name + '_streamline_final.trk') outputs['filtered_tractography_by_intersections'] = op.abspath(endpoint_name + '_intersections_streamline_final.trk') outputs['filtered_tractographies'] = [outputs['filtered_tractography'], outputs['filtered_tractography_by_intersections']] outputs['stats_file'] = op.abspath(endpoint_name + '_statistics.mat') return outputs def _gen_outfilename(self, ext): if ext.endswith("mat") and isdefined(self.inputs.out_matrix_mat_file): _, name , _ = split_filename(self.inputs.out_matrix_mat_file) elif isdefined(self.inputs.out_matrix_file): _, name , _ = split_filename(self.inputs.out_matrix_file) else: _, name , _ = split_filename(self.inputs.tract_file) return name + ext class ROIGenInputSpec(BaseInterfaceInputSpec): aparc_aseg_file = File(exists=True, mandatory=True, desc='Freesurfer aparc+aseg file') LUT_file = File(exists=True, xor=['use_freesurfer_LUT'], desc='Custom lookup table (cf. FreeSurferColorLUT.txt)') use_freesurfer_LUT = traits.Bool(xor=['LUT_file'], desc='Boolean value; Set to True to use default Freesurfer LUT, False for custom LUT') freesurfer_dir = Directory(requires=['use_freesurfer_LUT'], desc='Freesurfer main directory') out_roi_file = File(genfile=True, desc='Region of Interest file for connectivity mapping') out_dict_file = File(genfile=True, desc='Label dictionary saved in Pickle format') class ROIGenOutputSpec(TraitedSpec): roi_file = File(desc='Region of Interest file for connectivity mapping') dict_file = File(desc='Label dictionary saved in Pickle format') class ROIGen(BaseInterface): """ Generates a ROI file for connectivity mapping and a dictionary file containing relevant node information Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> rg = cmtk.ROIGen() >>> rg.inputs.aparc_aseg_file = 'aparc+aseg.nii' >>> rg.inputs.use_freesurfer_LUT = True >>> rg.inputs.freesurfer_dir = '/usr/local/freesurfer' >>> rg.run() # doctest: +SKIP The label dictionary is written to disk using Pickle. Resulting data can be loaded using: >>> file = open("FreeSurferColorLUT_adapted_aparc+aseg_out.pck", "r") >>> file = open("fsLUT_aparc+aseg.pck", "r") >>> labelDict = pickle.load(file) # doctest: +SKIP >>> print labelDict # doctest: +SKIP """ input_spec = ROIGenInputSpec output_spec = ROIGenOutputSpec def _run_interface(self, runtime): aparc_aseg_file = self.inputs.aparc_aseg_file aparcpath, aparcname, aparcext = split_filename(aparc_aseg_file) iflogger.info('Using Aparc+Aseg file: {name}'.format(name=aparcname + aparcext)) niiAPARCimg = nb.load(aparc_aseg_file) niiAPARCdata = niiAPARCimg.get_data() niiDataLabels = np.unique(niiAPARCdata) numDataLabels = np.size(niiDataLabels) iflogger.info('Number of labels in image: {n}'.format(n=numDataLabels)) write_dict = True if self.inputs.use_freesurfer_LUT: self.LUT_file = self.inputs.freesurfer_dir + '/FreeSurferColorLUT.txt' iflogger.info('Using Freesurfer LUT: {name}'.format(name=self.LUT_file)) prefix = 'fsLUT' elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file): self.LUT_file = op.abspath(self.inputs.LUT_file) lutpath, lutname, lutext = split_filename(self.LUT_file) iflogger.info('Using Custom LUT file: {name}'.format(name=lutname + lutext)) prefix = lutname else: prefix = 'hardcoded' write_dict = False if isdefined(self.inputs.out_roi_file): roi_file = op.abspath(self.inputs.out_roi_file) else: roi_file = op.abspath(prefix + '_' + aparcname + '.nii') if isdefined(self.inputs.out_dict_file): dict_file = op.abspath(self.inputs.out_dict_file) else: dict_file = op.abspath(prefix + '_' + aparcname + '.pck') if write_dict: iflogger.info('Lookup table: {name}'.format(name=op.abspath(self.LUT_file))) LUTlabelsRGBA = np.loadtxt(self.LUT_file, skiprows=4, usecols=[0, 1, 2, 3, 4, 5], comments='#', dtype={'names': ('index', 'label', 'R', 'G', 'B', 'A'), 'formats': ('int', '|S30', 'int', 'int', 'int', 'int')}) numLUTLabels = np.size(LUTlabelsRGBA) if numLUTLabels < numDataLabels: iflogger.error('LUT file provided does not contain all of the regions in the image') iflogger.error('Removing unmapped regions') iflogger.info('Number of labels in LUT: {n}'.format(n=numLUTLabels)) LUTlabelDict = {} """ Create dictionary for input LUT table""" for labels in xrange(0, numLUTLabels): LUTlabelDict[LUTlabelsRGBA[labels][0]] = [LUTlabelsRGBA[labels][1], LUTlabelsRGBA[labels][2], LUTlabelsRGBA[labels][3], LUTlabelsRGBA[labels][4], LUTlabelsRGBA[labels][5]] iflogger.info('Printing LUT label dictionary') iflogger.info(LUTlabelDict) mapDict = {} MAPPING = [[1, 2012], [2, 2019], [3, 2032], [4, 2014], [5, 2020], [6, 2018], [7, 2027], [8, 2028], [9, 2003], [10, 2024], [11, 2017], [12, 2026], [13, 2002], [14, 2023], [15, 2010], [16, 2022], [17, 2031], [18, 2029], [19, 2008], [20, 2025], [21, 2005], [22, 2021], [23, 2011], [24, 2013], [25, 2007], [26, 2016], [27, 2006], [28, 2033], [29, 2009], [30, 2015], [31, 2001], [32, 2030], [33, 2034], [34, 2035], [35, 49], [36, 50], [37, 51], [38, 52], [39, 58], [40, 53], [41, 54], [42, 1012], [43, 1019], [44, 1032], [45, 1014], [46, 1020], [47, 1018], [48, 1027], [49, 1028], [50, 1003], [51, 1024], [52, 1017], [53, 1026], [54, 1002], [55, 1023], [56, 1010], [57, 1022], [58, 1031], [59, 1029], [60, 1008], [61, 1025], [62, 1005], [63, 1021], [64, 1011], [65, 1013], [66, 1007], [67, 1016], [68, 1006], [69, 1033], [70, 1009], [71, 1015], [72, 1001], [73, 1030], [74, 1034], [75, 1035], [76, 10], [77, 11], [78, 12], [79, 13], [80, 26], [81, 17], [82, 18], [83, 16]] """ Create empty grey matter mask, Populate with only those regions defined in the mapping.""" niiGM = np.zeros(niiAPARCdata.shape, dtype=np.uint) for ma in MAPPING: niiGM[ niiAPARCdata == ma[1]] = ma[0] mapDict[ma[0]] = ma[1] iflogger.info('Grey matter mask created') greyMaskLabels = np.unique(niiGM) numGMLabels = np.size(greyMaskLabels) iflogger.info('Number of grey matter labels: {num}'.format(num=numGMLabels)) labelDict = {} GMlabelDict = {} for label in greyMaskLabels: try: mapDict[label] if write_dict: GMlabelDict['originalID'] = mapDict[label] except: iflogger.info('Label {lbl} not in provided mapping'.format(lbl=label)) if write_dict: del GMlabelDict GMlabelDict = {} GMlabelDict['labels'] = LUTlabelDict[label][0] GMlabelDict['colors'] = [LUTlabelDict[label][1], LUTlabelDict[label][2], LUTlabelDict[label][3]] GMlabelDict['a'] = LUTlabelDict[label][4] labelDict[label] = GMlabelDict roi_image = nb.Nifti1Image(niiGM, niiAPARCimg.get_affine(), niiAPARCimg.get_header()) iflogger.info('Saving ROI File to {path}'.format(path=roi_file)) nb.save(roi_image, roi_file) if write_dict: iflogger.info('Saving Dictionary File to {path} in Pickle format'.format(path=dict_file)) file = open(dict_file, 'w') pickle.dump(labelDict, file) file.close() return runtime def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.out_roi_file): outputs['roi_file'] = op.abspath(self.inputs.out_roi_file) else: outputs['roi_file'] = op.abspath(self._gen_outfilename('nii')) if isdefined(self.inputs.out_dict_file): outputs['dict_file'] = op.abspath(self.inputs.out_dict_file) else: outputs['dict_file'] = op.abspath(self._gen_outfilename('pck')) return outputs def _gen_outfilename(self, ext): _, name , _ = split_filename(self.inputs.aparc_aseg_file) if self.inputs.use_freesurfer_LUT: prefix = 'fsLUT' elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file): lutpath, lutname, lutext = split_filename(self.inputs.LUT_file) prefix = lutname else: prefix = 'hardcoded' return prefix + '_' + name + '.' + ext def create_nodes(roi_file, resolution_network_file, out_filename): G = nx.Graph() gp = nx.read_graphml(resolution_network_file) roi_image = nb.load(roi_file) roiData = roi_image.get_data() nROIs = len(gp.nodes()) for u, d in gp.nodes_iter(data=True): G.add_node(int(u), d) xyz = tuple(np.mean(np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])) , axis=1)) G.node[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) nx.write_gpickle(G, out_filename) return out_filename class CreateNodesInputSpec(BaseInterfaceInputSpec): roi_file = File(exists=True, mandatory=True, desc='Region of interest file') resolution_network_file = File(exists=True, mandatory=True, desc='Parcellation file from Connectome Mapping Toolkit') out_filename = File('nodenetwork.pck', usedefault=True, desc='Output gpickled network with the nodes defined.') class CreateNodesOutputSpec(TraitedSpec): node_network = File(desc='Output gpickled network with the nodes defined.') class CreateNodes(BaseInterface): """ Generates a NetworkX graph containing nodes at the centroid of each region in the input ROI file. Node data is added from the resolution network file. Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> mknode = cmtk.CreateNodes() >>> mknode.inputs.roi_file = 'ROI_scale500.nii.gz' >>> mknode.run() # doctest: +SKIP """ input_spec = CreateNodesInputSpec output_spec = CreateNodesOutputSpec def _run_interface(self, runtime): iflogger.info('Creating nodes...') create_nodes(self.inputs.roi_file, self.inputs.resolution_network_file, self.inputs.out_filename) iflogger.info('Saving node network to {path}'.format(path=op.abspath(self.inputs.out_filename))) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['node_network'] = op.abspath(self.inputs.out_filename) return outputs nipype-0.9.2/nipype/interfaces/cmtk/convert.py000066400000000000000000000241731227300005300214440ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os, os.path as op import datetime import string import warnings import networkx as nx from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, InputMultiPath, isdefined) from nipype.utils.filemanip import split_filename from nipype.utils.misc import package_check have_cfflib = True try: package_check('cfflib') except Exception, e: have_cfflib = False else: import cfflib as cf class CFFConverterInputSpec(BaseInterfaceInputSpec): graphml_networks = InputMultiPath(File(exists=True), desc='list of graphML networks') gpickled_networks = InputMultiPath(File(exists=True), desc='list of gpickled Networkx graphs') gifti_surfaces = InputMultiPath(File(exists=True), desc='list of GIFTI surfaces') gifti_labels = InputMultiPath(File(exists=True), desc='list of GIFTI labels') nifti_volumes = InputMultiPath(File(exists=True), desc='list of NIFTI volumes') tract_files = InputMultiPath(File(exists=True), desc='list of Trackvis fiber files') timeseries_files = InputMultiPath(File(exists=True), desc='list of HDF5 timeseries files') script_files = InputMultiPath(File(exists=True), desc='list of script files to include') data_files = InputMultiPath(File(exists=True), desc='list of external data files (i.e. Numpy, HD5, XML) ') title = traits.Str(desc='Connectome Title') creator = traits.Str(desc='Creator') email = traits.Str(desc='Email address') publisher = traits.Str(desc='Publisher') license = traits.Str(desc='License') rights = traits.Str(desc='Rights') references = traits.Str(desc='References') relation = traits.Str(desc='Relation') species = traits.Str('Homo sapiens',desc='Species',usedefault=True) description = traits.Str('Created with the Nipype CFF converter', desc='Description', usedefault=True) out_file = File('connectome.cff', usedefault = True, desc='Output connectome file') class CFFConverterOutputSpec(TraitedSpec): connectome_file = File(exists=True, desc='Output connectome file') class CFFConverter(BaseInterface): """ Creates a Connectome File Format (CFF) file from input networks, surfaces, volumes, tracts, etcetera.... Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> cvt = cmtk.CFFConverter() >>> cvt.inputs.title = 'subject 1' >>> cvt.inputs.gifti_surfaces = ['lh.pial_converted.gii', 'rh.pial_converted.gii'] >>> cvt.inputs.tract_files = ['streamlines.trk'] >>> cvt.inputs.gpickled_networks = ['network0.gpickle'] >>> cvt.run() # doctest: +SKIP """ input_spec = CFFConverterInputSpec output_spec = CFFConverterOutputSpec def _run_interface(self, runtime): a = cf.connectome() if isdefined(self.inputs.title): a.connectome_meta.set_title(self.inputs.title) else: a.connectome_meta.set_title(self.inputs.out_file) if isdefined(self.inputs.creator): a.connectome_meta.set_creator(self.inputs.creator) else: #Probably only works on some OSes... a.connectome_meta.set_creator(os.getenv('USER')) if isdefined(self.inputs.email): a.connectome_meta.set_email(self.inputs.email) if isdefined(self.inputs.publisher): a.connectome_meta.set_publisher(self.inputs.publisher) if isdefined(self.inputs.license): a.connectome_meta.set_license(self.inputs.license) if isdefined(self.inputs.rights): a.connectome_meta.set_rights(self.inputs.rights) if isdefined(self.inputs.references): a.connectome_meta.set_references(self.inputs.references) if isdefined(self.inputs.relation): a.connectome_meta.set_relation(self.inputs.relation) if isdefined(self.inputs.species): a.connectome_meta.set_species(self.inputs.species) if isdefined(self.inputs.description): a.connectome_meta.set_description(self.inputs.description) a.connectome_meta.set_created(datetime.date.today()) count = 0 if isdefined(self.inputs.graphml_networks): for ntwk in self.inputs.graphml_networks: # There must be a better way to deal with the unique name problem #(i.e. tracks and networks can't use the same name, and previously we were pulling them both from the input files) ntwk_name = 'Network {cnt}'.format(cnt=count) a.add_connectome_network_from_graphml(ntwk_name, ntwk) count += 1 if isdefined(self.inputs.gpickled_networks): unpickled = [] for ntwk in self.inputs.gpickled_networks: _, ntwk_name, _ = split_filename(ntwk) unpickled = nx.read_gpickle(ntwk) cnet = cf.CNetwork(name = ntwk_name) cnet.set_with_nxgraph(unpickled) a.add_connectome_network(cnet) count += 1 count = 0 if isdefined(self.inputs.tract_files): for trk in self.inputs.tract_files: _, trk_name, _ = split_filename(trk) ctrack = cf.CTrack(trk_name, trk) a.add_connectome_track(ctrack) count += 1 count = 0 if isdefined(self.inputs.gifti_surfaces): for surf in self.inputs.gifti_surfaces: _, surf_name, _ = split_filename(surf) csurf = cf.CSurface.create_from_gifti("Surface %d - %s" % (count,surf_name), surf) csurf.fileformat='Gifti' csurf.dtype='Surfaceset' a.add_connectome_surface(csurf) count += 1 count = 0 if isdefined(self.inputs.gifti_labels): for label in self.inputs.gifti_labels: _, label_name, _ = split_filename(label) csurf = cf.CSurface.create_from_gifti("Surface Label %d - %s" % (count,label_name), label) csurf.fileformat='Gifti' csurf.dtype='Labels' a.add_connectome_surface(csurf) count += 1 if isdefined(self.inputs.nifti_volumes): for vol in self.inputs.nifti_volumes: _, vol_name, _ = split_filename(vol) cvol = cf.CVolume.create_from_nifti(vol_name,vol) a.add_connectome_volume(cvol) if isdefined(self.inputs.script_files): for script in self.inputs.script_files: _, script_name, _ = split_filename(script) cscript = cf.CScript.create_from_file(script_name, script) a.add_connectome_script(cscript) if isdefined(self.inputs.data_files): for data in self.inputs.data_files: _, data_name, _ = split_filename(data) cda = cf.CData(name=data_name, src=data, fileformat='NumPy') if not string.find(data_name,'lengths') == -1: cda.dtype = 'FinalFiberLengthArray' if not string.find(data_name,'endpoints') == -1: cda.dtype = 'FiberEndpoints' if not string.find(data_name,'labels') == -1: cda.dtype = 'FinalFiberLabels' a.add_connectome_data(cda) a.print_summary() _, name, ext = split_filename(self.inputs.out_file) if not ext == '.cff': ext = '.cff' cf.save_to_cff(a,op.abspath(name + ext)) return runtime def _list_outputs(self): outputs = self._outputs().get() _, name, ext = split_filename(self.inputs.out_file) if not ext == '.cff': ext = '.cff' outputs['connectome_file'] = op.abspath(name + ext) return outputs class MergeCNetworksInputSpec(BaseInterfaceInputSpec): in_files = InputMultiPath(File(exists=True), mandatory=True, desc='List of CFF files to extract networks from') out_file = File('merged_network_connectome.cff', usedefault = True, desc='Output CFF file with all the networks added') class MergeCNetworksOutputSpec(TraitedSpec): connectome_file = File(exists=True, desc='Output CFF file with all the networks added') class MergeCNetworks(BaseInterface): """ Merges networks from multiple CFF files into one new CFF file. Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> mrg = cmtk.MergeCNetworks() >>> mrg.inputs.in_files = ['subj1.cff','subj2.cff'] >>> mrg.run() # doctest: +SKIP """ input_spec = MergeCNetworksInputSpec output_spec = MergeCNetworksOutputSpec def _run_interface(self, runtime): extracted_networks = [] for i, con in enumerate(self.inputs.in_files): mycon = cf.load(con) nets = mycon.get_connectome_network() for ne in nets: # here, you might want to skip networks with a given # metadata information ne.load() contitle = mycon.get_connectome_meta().get_title() ne.set_name( str(i) + ': ' + contitle + ' - ' + ne.get_name() ) ne.set_src(ne.get_name()) extracted_networks.append(ne) # Add networks to new connectome newcon = cf.connectome(title = 'All CNetworks', connectome_network = extracted_networks) # Setting additional metadata metadata = newcon.get_connectome_meta() metadata.set_creator('My Name') metadata.set_email('My Email') _, name, ext = split_filename(self.inputs.out_file) if not ext == '.cff': ext = '.cff' cf.save_to_cff(newcon, op.abspath(name + ext)) return runtime def _list_outputs(self): outputs = self._outputs().get() _, name, ext = split_filename(self.inputs.out_file) if not ext == '.cff': ext = '.cff' outputs['connectome_file'] = op.abspath(name + ext) return outputs nipype-0.9.2/nipype/interfaces/cmtk/nbs.py000066400000000000000000000140631227300005300205430ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, InputMultiPath, OutputMultiPath, isdefined) import os.path as op import numpy as np import networkx as nx from nipype.utils.misc import package_check import warnings from ... import logging iflogger = logging.getLogger('interface') have_cv = True try: package_check('cviewer') except Exception, e: have_cv = False else: import cviewer.libs.pyconto.groupstatistics.nbs as nbs def ntwks_to_matrices(in_files, edge_key): first = nx.read_gpickle(in_files[0]) files = len(in_files) nodes = len(first.nodes()) matrix = np.zeros((nodes, nodes, files)) for idx, name in enumerate(in_files): graph = nx.read_gpickle(name) for u, v, d in graph.edges(data=True): graph[u][v]['weight'] = d[edge_key] # Setting the edge requested edge value as weight value matrix[:, :, idx] = nx.to_numpy_matrix(graph) # Retrieve the matrix return matrix class NetworkBasedStatisticInputSpec(BaseInterfaceInputSpec): in_group1 = InputMultiPath(File(exists=True), mandatory=True, desc='Networks for the first group of subjects') in_group2 = InputMultiPath(File(exists=True), mandatory=True, desc='Networks for the second group of subjects') node_position_network = File(desc='An optional network used to position the nodes for the output networks') number_of_permutations = traits.Int(1000, usedefault=True, desc='Number of permutations to perform') threshold = traits.Float(3, usedefault=True, desc='T-statistic threshold') t_tail = traits.Enum('left', 'right', 'both', usedefault=True, desc='Can be one of "left", "right", or "both"') edge_key = traits.Str('number_of_fibers', usedefault=True, desc='Usually "number_of_fibers, "fiber_length_mean", "fiber_length_std" for matrices made with CMTK' \ 'Sometimes "weight" or "value" for functional networks.') out_nbs_network = File(desc='Output network with edges identified by the NBS') out_nbs_pval_network = File(desc='Output network with p-values to weight the edges identified by the NBS') class NetworkBasedStatisticOutputSpec(TraitedSpec): nbs_network = File(exists=True, desc='Output network with edges identified by the NBS') nbs_pval_network = File(exists=True, desc='Output network with p-values to weight the edges identified by the NBS') network_files = OutputMultiPath(File(exists=True), desc='Output network with edges identified by the NBS') class NetworkBasedStatistic(BaseInterface): """ Calculates and outputs the average network given a set of input NetworkX gpickle files For documentation of Network-based statistic parameters: https://github.com/LTS5/connectomeviewer/blob/master/cviewer/libs/pyconto/groupstatistics/nbs/_nbs.py Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> nbs = cmtk.NetworkBasedStatistic() >>> nbs.inputs.in_group1 = ['subj1.pck', 'subj2.pck'] # doctest: +SKIP >>> nbs.inputs.in_group2 = ['pat1.pck', 'pat2.pck'] # doctest: +SKIP >>> nbs.run() # doctest: +SKIP """ input_spec = NetworkBasedStatisticInputSpec output_spec = NetworkBasedStatisticOutputSpec def _run_interface(self, runtime): THRESH = self.inputs.threshold K = self.inputs.number_of_permutations TAIL = self.inputs.t_tail edge_key = self.inputs.edge_key details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str(K) + '-tail-' + TAIL + '.pck' # Fill in the data from the networks X = ntwks_to_matrices(self.inputs.in_group1, edge_key) Y = ntwks_to_matrices(self.inputs.in_group2, edge_key) PVAL, ADJ, _ = nbs.compute_nbs(X, Y, THRESH, K, TAIL) iflogger.info('p-values:') iflogger.info(PVAL) pADJ = ADJ.copy() for idx, _ in enumerate(PVAL): x, y = np.where(ADJ == idx + 1) pADJ[x, y] = PVAL[idx] # Create networkx graphs from the adjacency matrix nbsgraph = nx.from_numpy_matrix(ADJ) nbs_pval_graph = nx.from_numpy_matrix(pADJ) # Relabel nodes because they should not start at zero for our convention nbsgraph = nx.relabel_nodes(nbsgraph, lambda x: x + 1) nbs_pval_graph = nx.relabel_nodes(nbs_pval_graph, lambda x: x + 1) if isdefined(self.inputs.node_position_network): node_ntwk_name = self.inputs.node_position_network else: node_ntwk_name = self.inputs.in_group1[0] node_network = nx.read_gpickle(node_ntwk_name) iflogger.info('Populating node dictionaries with attributes from {node}'.format(node=node_ntwk_name)) for nid, ndata in node_network.nodes_iter(data=True): nbsgraph.node[nid] = ndata nbs_pval_graph.node[nid] = ndata path = op.abspath('NBS_Result_' + details) iflogger.info(path) nx.write_gpickle(nbsgraph, path) iflogger.info('Saving output NBS edge network as {out}'.format(out=path)) pval_path = op.abspath('NBS_P_vals_' + details) iflogger.info(pval_path) nx.write_gpickle(nbs_pval_graph, pval_path) iflogger.info('Saving output p-value network as {out}'.format(out=pval_path)) return runtime def _list_outputs(self): outputs = self.output_spec().get() THRESH = self.inputs.threshold K = self.inputs.number_of_permutations TAIL = self.inputs.t_tail edge_key = self.inputs.edge_key details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str(K) + '-tail-' + TAIL + '.pck' path = op.abspath('NBS_Result_' + details) pval_path = op.abspath('NBS_P_vals_' + details) outputs['nbs_network'] = path outputs['nbs_pval_network'] = pval_path outputs['network_files'] = [path, pval_path] return outputs def _gen_outfilename(self, name, ext): return name + '.' + ext nipype-0.9.2/nipype/interfaces/cmtk/nx.py000066400000000000000000000616241227300005300204130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, InputMultiPath, OutputMultiPath, isdefined) from nipype.utils.filemanip import split_filename import os, os.path as op import numpy as np import networkx as nx import scipy.io as sio import pickle from nipype.utils.misc import package_check import warnings from ... import logging iflogger = logging.getLogger('interface') have_cmp = True try: package_check('cmp') except Exception, e: have_cmp = False else: import cmp def read_unknown_ntwk(ntwk): if not isinstance(ntwk, nx.classes.graph.Graph): path, name, ext = split_filename(ntwk) if ext == '.pck': ntwk = nx.read_gpickle(ntwk) elif ext == '.graphml': ntwk = nx.read_graphml(ntwk) return ntwk def remove_all_edges(ntwk): ntwktmp = ntwk.copy() edges = ntwktmp.edges_iter() for edge in edges: ntwk.remove_edge(edge[0], edge[1]) return ntwk def fix_keys_for_gexf(orig): """ GEXF Networks can be read in Gephi, however, the keys for the node and edge IDs must be converted to strings """ import networkx as nx ntwk = nx.Graph() nodes = orig.nodes_iter() edges = orig.edges_iter() for node in nodes: newnodedata = {} newnodedata.update(orig.node[node]) if orig.node[node].has_key('dn_fsname'): newnodedata['label'] = orig.node[node]['dn_fsname'] ntwk.add_node(str(node), newnodedata) if ntwk.node[str(node)].has_key('dn_position') and newnodedata.has_key('dn_position'): ntwk.node[str(node)]['dn_position'] = str(newnodedata['dn_position']) for edge in edges: data = {} data = orig.edge[edge[0]][edge[1]] ntwk.add_edge(str(edge[0]), str(edge[1]), data) if ntwk.edge[str(edge[0])][str(edge[1])].has_key('fiber_length_mean'): ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_mean'] = str(data['fiber_length_mean']) if ntwk.edge[str(edge[0])][str(edge[1])].has_key('fiber_length_std'): ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_std'] = str(data['fiber_length_std']) if ntwk.edge[str(edge[0])][str(edge[1])].has_key('number_of_fibers'): ntwk.edge[str(edge[0])][str(edge[1])]['number_of_fibers'] = str(data['number_of_fibers']) if ntwk.edge[str(edge[0])][str(edge[1])].has_key('value'): ntwk.edge[str(edge[0])][str(edge[1])]['value'] = str(data['value']) return ntwk def add_dicts_by_key(in_dict1, in_dict2): """ Combines two dictionaries and adds the values for those keys that are shared """ both = {} for key1 in in_dict1: for key2 in in_dict2: if key1 == key2: both[key1] = in_dict1[key1] + in_dict2[key2] return both def average_networks(in_files, ntwk_res_file, group_id): """ Sums the edges of input networks and divides by the number of networks Writes the average network as .pck and .gexf and returns the name of the written networks """ import networkx as nx import os.path as op iflogger.info("Creating average network for group: {grp}".format(grp=group_id)) matlab_network_list = [] if len(in_files) == 1: avg_ntwk = read_unknown_ntwk(in_files[0]) else: count_to_keep_edge = np.round(float(len(in_files)) / 2) iflogger.info("Number of networks: {L}, an edge must occur in at least {c} to remain in the average network".format(L=len(in_files), c=count_to_keep_edge)) ntwk_res_file = read_unknown_ntwk(ntwk_res_file) iflogger.info("{n} Nodes found in network resolution file".format(n=ntwk_res_file.number_of_nodes())) ntwk = remove_all_edges(ntwk_res_file) counting_ntwk = ntwk.copy() # Sums all the relevant variables for index, subject in enumerate(in_files): tmp = nx.read_gpickle(subject) iflogger.info('File {s} has {n} edges'.format(s=subject, n=tmp.number_of_edges())) edges = tmp.edges_iter() for edge in edges: data = {} data = tmp.edge[edge[0]][edge[1]] data['count'] = 1 if ntwk.has_edge(edge[0], edge[1]): current = {} current = ntwk.edge[edge[0]][edge[1]] data = add_dicts_by_key(current, data) ntwk.add_edge(edge[0], edge[1], data) nodes = tmp.nodes_iter() for node in nodes: data = {} data = ntwk.node[node] if tmp.node[node].has_key('value'): data['value'] = data['value'] + tmp.node[node]['value'] ntwk.add_node(node, data) # Divides each value by the number of files nodes = ntwk.nodes_iter() edges = ntwk.edges_iter() iflogger.info('Total network has {n} edges'.format(n=ntwk.number_of_edges())) avg_ntwk = nx.Graph() newdata = {} for node in nodes: data = ntwk.node[node] newdata = data if data.has_key('value'): newdata['value'] = data['value'] / len(in_files) ntwk.node[node]['value'] = newdata avg_ntwk.add_node(node, newdata) edge_dict = {} edge_dict['count'] = np.zeros((avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes())) for edge in edges: data = ntwk.edge[edge[0]][edge[1]] if ntwk.edge[edge[0]][edge[1]]['count'] >= count_to_keep_edge: for key in data.keys(): if not key == 'count': data[key] = data[key] / len(in_files) ntwk.edge[edge[0]][edge[1]] = data avg_ntwk.add_edge(edge[0],edge[1],data) edge_dict['count'][edge[0]-1][edge[1]-1] = ntwk.edge[edge[0]][edge[1]]['count'] iflogger.info('After thresholding, the average network has has {n} edges'.format(n=avg_ntwk.number_of_edges())) avg_edges = avg_ntwk.edges_iter() for edge in avg_edges: data = avg_ntwk.edge[edge[0]][edge[1]] for key in data.keys(): if not key == 'count': edge_dict[key] = np.zeros((avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes())) edge_dict[key][edge[0]-1][edge[1]-1] = data[key] for key in edge_dict.keys(): tmp = {} network_name = group_id + '_' + key + '_average.mat' matlab_network_list.append(op.abspath(network_name)) tmp[key] = edge_dict[key] sio.savemat(op.abspath(network_name), tmp) iflogger.info('Saving average network for key: {k} as {out}'.format(k=key, out=op.abspath(network_name))) # Writes the networks and returns the name network_name = group_id + '_average.pck' nx.write_gpickle(avg_ntwk, op.abspath(network_name)) iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name))) avg_ntwk = fix_keys_for_gexf(avg_ntwk) network_name = group_id + '_average.gexf' nx.write_gexf(avg_ntwk, op.abspath(network_name)) iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name))) return network_name, matlab_network_list def compute_node_measures(ntwk, calculate_cliques=False): """ These return node-based measures """ iflogger.info('Computing node measures:') measures = {} iflogger.info('...Computing degree...') measures['degree'] = np.array(ntwk.degree().values()) iflogger.info('...Computing load centrality...') measures['load_centrality'] = np.array(nx.load_centrality(ntwk).values()) iflogger.info('...Computing betweenness centrality...') measures['betweenness_centrality'] = np.array(nx.betweenness_centrality(ntwk).values()) iflogger.info('...Computing degree centrality...') measures['degree_centrality'] = np.array(nx.degree_centrality(ntwk).values()) iflogger.info('...Computing closeness centrality...') measures['closeness_centrality'] = np.array(nx.closeness_centrality(ntwk).values()) # iflogger.info('...Computing eigenvector centrality...') # measures['eigenvector_centrality'] = np.array(nx.eigenvector_centrality(ntwk, max_iter=100000).values()) iflogger.info('...Computing triangles...') measures['triangles'] = np.array(nx.triangles(ntwk).values()) iflogger.info('...Computing clustering...') measures['clustering'] = np.array(nx.clustering(ntwk).values()) iflogger.info('...Computing k-core number') measures['core_number'] = np.array(nx.core_number(ntwk).values()) iflogger.info('...Identifying network isolates...') isolate_list = nx.isolates(ntwk) binarized = np.zeros((ntwk.number_of_nodes(), 1)) for value in isolate_list: value = value - 1 # Zero indexing binarized[value] = 1 measures['isolates'] = binarized if calculate_cliques: iflogger.info('...Calculating node clique number') measures['node_clique_number'] = np.array(nx.node_clique_number(ntwk).values()) iflogger.info('...Computing number of cliques for each node...') measures['number_of_cliques'] = np.array(nx.number_of_cliques(ntwk).values()) return measures def compute_edge_measures(ntwk): """ These return edge-based measures """ iflogger.info('Computing edge measures:') measures = {} #iflogger.info('...Computing google matrix...' #Makes really large networks (500k+ edges)) #measures['google_matrix'] = nx.google_matrix(ntwk) #iflogger.info('...Computing hub matrix...') #measures['hub_matrix'] = nx.hub_matrix(ntwk) #iflogger.info('...Computing authority matrix...') #measures['authority_matrix'] = nx.authority_matrix(ntwk) return measures def compute_dict_measures(ntwk): """ Returns a dictionary """ iflogger.info('Computing measures which return a dictionary:') measures = {} iflogger.info('...Computing rich club coefficient...') measures['rich_club_coef'] = nx.rich_club_coefficient(ntwk) return measures def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False): """ Returns a single value per network """ iflogger.info('Computing single valued measures:') measures = {} iflogger.info('...Computing degree assortativity (pearson number) ...') try: measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk) except AttributeError: # For NetworkX 1.6 measures['degree_pearsonr'] = nx.degree_pearson_correlation_coefficient(ntwk) iflogger.info('...Computing degree assortativity...') try: measures['degree_assortativity'] = nx.degree_assortativity(ntwk) except AttributeError: measures['degree_assortativity'] = nx.degree_assortativity_coefficient(ntwk) iflogger.info('...Computing transitivity...') measures['transitivity'] = nx.transitivity(ntwk) iflogger.info('...Computing number of connected_components...') measures['number_connected_components'] = nx.number_connected_components(ntwk) iflogger.info('...Computing graph density...') measures['graph_density'] = nx.density(ntwk) iflogger.info('...Recording number of edges...') measures['number_of_edges'] = nx.number_of_edges(ntwk) iflogger.info('...Recording number of nodes...') measures['number_of_nodes'] = nx.number_of_nodes(ntwk) iflogger.info('...Computing average clustering...') measures['average_clustering'] = nx.average_clustering(ntwk) if nx.is_connected(ntwk): iflogger.info('...Calculating average shortest path length...') measures['average_shortest_path_length'] = nx.average_shortest_path_length(ntwk, weighted) else: iflogger.info('...Calculating average shortest path length...') measures['average_shortest_path_length'] = nx.average_shortest_path_length(nx.connected_component_subgraphs(ntwk)[0], weighted) if calculate_cliques: iflogger.info('...Computing graph clique number...') measures['graph_clique_number'] = nx.graph_clique_number(ntwk) #out of memory error return measures def compute_network_measures(ntwk): measures = {} #iflogger.info('Identifying k-core') #measures['k_core'] = nx.k_core(ntwk) #iflogger.info('Identifying k-shell') #measures['k_shell'] = nx.k_shell(ntwk) #iflogger.info('Identifying k-crust') #measures['k_crust'] = nx.k_crust(ntwk) return measures def add_node_data(node_array, ntwk): node_ntwk = nx.Graph() newdata = {} for idx, data in ntwk.nodes_iter(data=True): if not int(idx) == 0: newdata['value'] = node_array[int(idx) - 1] data.update(newdata) node_ntwk.add_node(int(idx), data) return node_ntwk def add_edge_data(edge_array, ntwk, above=0, below=0): edge_ntwk = ntwk.copy() data = {} for x, row in enumerate(edge_array): for y in range(0, np.max(np.shape(edge_array[x]))): if not edge_array[x, y] == 0: data['value'] = edge_array[x, y] if data['value'] <= below or data['value'] >= above: if edge_ntwk.has_edge(x + 1, y + 1): old_edge_dict = edge_ntwk.edge[x + 1][y + 1] edge_ntwk.remove_edge(x + 1, y + 1) data.update(old_edge_dict) edge_ntwk.add_edge(x + 1, y + 1, data) return edge_ntwk class NetworkXMetricsInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='Input network') out_k_core = File('k_core', usedefault=True, desc='Computed k-core network stored as a NetworkX pickle.') out_k_shell = File('k_shell', usedefault=True, desc='Computed k-shell network stored as a NetworkX pickle.') out_k_crust = File('k_crust', usedefault=True, desc='Computed k-crust network stored as a NetworkX pickle.') treat_as_weighted_graph = traits.Bool(True, usedefault=True, desc='Some network metrics can be calculated while considering only a binarized version of the graph') compute_clique_related_measures = traits.Bool(False, usedefault=True, desc='Computing clique-related measures (e.g. node clique number) can be very time consuming') out_global_metrics_matlab = File(genfile=True, desc='Output node metrics in MATLAB .mat format') out_node_metrics_matlab = File(genfile=True, desc='Output node metrics in MATLAB .mat format') out_edge_metrics_matlab = File(genfile=True, desc='Output edge metrics in MATLAB .mat format') out_pickled_extra_measures = File('extra_measures', usedefault=True, desc='Network measures for group 1 that return dictionaries stored as a Pickle.') class NetworkXMetricsOutputSpec(TraitedSpec): gpickled_network_files = OutputMultiPath(File(desc='Output gpickled network files')) matlab_matrix_files = OutputMultiPath(File(desc='Output network metrics in MATLAB .mat format')) global_measures_matlab = File(desc='Output global metrics in MATLAB .mat format') node_measures_matlab = File(desc='Output node metrics in MATLAB .mat format') edge_measures_matlab = File(desc='Output edge metrics in MATLAB .mat format') node_measure_networks = OutputMultiPath(File(desc='Output gpickled network files for all node-based measures')) edge_measure_networks = OutputMultiPath(File(desc='Output gpickled network files for all edge-based measures')) k_networks = OutputMultiPath(File(desc='Output gpickled network files for the k-core, k-shell, and k-crust networks')) k_core = File(desc='Computed k-core network stored as a NetworkX pickle.') k_shell = File(desc='Computed k-shell network stored as a NetworkX pickle.') k_crust = File(desc='Computed k-crust network stored as a NetworkX pickle.') pickled_extra_measures = File(desc='Network measures for the group that return dictionaries, stored as a Pickle.') matlab_dict_measures = OutputMultiPath(File(desc='Network measures for the group that return dictionaries, stored as matlab matrices.')) class NetworkXMetrics(BaseInterface): """ Calculates and outputs NetworkX-based measures for an input network Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> nxmetrics = cmtk.NetworkXMetrics() >>> nxmetrics.inputs.in_file = 'subj1.pck' >>> nxmetrics.run() # doctest: +SKIP """ input_spec = NetworkXMetricsInputSpec output_spec = NetworkXMetricsOutputSpec def _run_interface(self, runtime): global gpickled, nodentwks, edgentwks, kntwks, matlab gpickled = list() nodentwks = list() edgentwks = list() kntwks = list() matlab = list() ntwk = nx.read_gpickle(self.inputs.in_file) # Each block computes, writes, and saves a measure # The names are then added to the output .pck file list # In the case of the degeneracy networks, they are given specified output names calculate_cliques = self.inputs.compute_clique_related_measures weighted = self.inputs.treat_as_weighted_graph global_measures = compute_singlevalued_measures(ntwk, weighted, calculate_cliques) if isdefined(self.inputs.out_global_metrics_matlab): global_out_file = op.abspath(self.inputs.out_global_metrics_matlab) else: global_out_file = op.abspath(self._gen_outfilename('globalmetrics', 'mat')) sio.savemat(global_out_file, global_measures, oned_as='column') matlab.append(global_out_file) node_measures = compute_node_measures(ntwk, calculate_cliques) for key in node_measures.keys(): newntwk = add_node_data(node_measures[key], ntwk) out_file = op.abspath(self._gen_outfilename(key, 'pck')) nx.write_gpickle(newntwk, out_file) nodentwks.append(out_file) if isdefined(self.inputs.out_node_metrics_matlab): node_out_file = op.abspath(self.inputs.out_node_metrics_matlab) else: node_out_file = op.abspath(self._gen_outfilename('nodemetrics', 'mat')) sio.savemat(node_out_file, node_measures, oned_as='column') matlab.append(node_out_file) gpickled.extend(nodentwks) edge_measures = compute_edge_measures(ntwk) for key in edge_measures.keys(): newntwk = add_edge_data(edge_measures[key], ntwk) out_file = op.abspath(self._gen_outfilename(key, 'pck')) nx.write_gpickle(newntwk, out_file) edgentwks.append(out_file) if isdefined(self.inputs.out_edge_metrics_matlab): edge_out_file = op.abspath(self.inputs.out_edge_metrics_matlab) else: edge_out_file = op.abspath(self._gen_outfilename('edgemetrics', 'mat')) sio.savemat(edge_out_file, edge_measures, oned_as='column') matlab.append(edge_out_file) gpickled.extend(edgentwks) ntwk_measures = compute_network_measures(ntwk) for key in ntwk_measures.keys(): if key == 'k_core': out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_core, 'pck')) if key == 'k_shell': out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_shell, 'pck')) if key == 'k_crust': out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_crust, 'pck')) nx.write_gpickle(ntwk_measures[key], out_file) kntwks.append(out_file) gpickled.extend(kntwks) out_pickled_extra_measures = op.abspath(self._gen_outfilename(self.inputs.out_pickled_extra_measures, 'pck')) dict_measures = compute_dict_measures(ntwk) iflogger.info('Saving extra measure file to {path} in Pickle format'.format(path=op.abspath(out_pickled_extra_measures))) file = open(out_pickled_extra_measures, 'w') pickle.dump(dict_measures, file) file.close() iflogger.info('Saving MATLAB measures as {m}'.format(m=matlab)) # Loops through the measures which return a dictionary, # converts the keys and values to a Numpy array, # stacks them together, and saves them in a MATLAB .mat file via Scipy global dicts dicts = list() for idx, key in enumerate(dict_measures.keys()): for idxd, keyd in enumerate(dict_measures[key].keys()): if idxd == 0: nparraykeys = np.array(keyd) nparrayvalues = np.array(dict_measures[key][keyd]) else: nparraykeys = np.append(nparraykeys, np.array(keyd)) values = np.array(dict_measures[key][keyd]) nparrayvalues = np.append(nparrayvalues, values) nparray = np.vstack((nparraykeys, nparrayvalues)) out_file = op.abspath(self._gen_outfilename(key, 'mat')) npdict = {} npdict[key] = nparray sio.savemat(out_file, npdict, oned_as='column') dicts.append(out_file) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs["k_core"] = op.abspath(self._gen_outfilename(self.inputs.out_k_core, 'pck')) outputs["k_shell"] = op.abspath(self._gen_outfilename(self.inputs.out_k_shell, 'pck')) outputs["k_crust"] = op.abspath(self._gen_outfilename(self.inputs.out_k_crust, 'pck')) outputs["gpickled_network_files"] = gpickled outputs["k_networks"] = kntwks outputs["node_measure_networks"] = nodentwks outputs["edge_measure_networks"] = edgentwks outputs["matlab_dict_measures"] = dicts outputs["global_measures_matlab"] = op.abspath(self._gen_outfilename('globalmetrics', 'mat')) outputs["node_measures_matlab"] = op.abspath(self._gen_outfilename('nodemetrics', 'mat')) outputs["edge_measures_matlab"] = op.abspath(self._gen_outfilename('edgemetrics', 'mat')) outputs["matlab_matrix_files"] = [outputs["global_measures_matlab"], outputs["node_measures_matlab"], outputs["edge_measures_matlab"]] outputs["pickled_extra_measures"] = op.abspath(self._gen_outfilename(self.inputs.out_pickled_extra_measures, 'pck')) return outputs def _gen_outfilename(self, name, ext): return name + '.' + ext class AverageNetworksInputSpec(BaseInterfaceInputSpec): in_files = InputMultiPath(File(exists=True), mandatory=True, desc='Networks for a group of subjects') resolution_network_file = File(exists=True, desc='Parcellation files from Connectome Mapping Toolkit. This is not necessary' \ ', but if included, the interface will output the statistical maps as networkx graphs.') group_id = traits.Str('group1', usedefault=True, desc='ID for group') out_gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck') out_gexf_groupavg = File(desc='Average network saved as a .gexf file') class AverageNetworksOutputSpec(TraitedSpec): gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck') gexf_groupavg = File(desc='Average network saved as a .gexf file') matlab_groupavgs = OutputMultiPath(File(desc='Average network saved as a .gexf file')) class AverageNetworks(BaseInterface): """ Calculates and outputs the average network given a set of input NetworkX gpickle files This interface will only keep an edge in the averaged network if that edge is present in at least half of the input networks. Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> avg = cmtk.AverageNetworks() >>> avg.inputs.in_files = ['subj1.pck', 'subj2.pck'] >>> avg.run() # doctest: +SKIP """ input_spec = AverageNetworksInputSpec output_spec = AverageNetworksOutputSpec def _run_interface(self, runtime): if isdefined(self.inputs.resolution_network_file): ntwk_res_file = self.inputs.resolution_network_file else: ntwk_res_file = self.inputs.in_files[0] global matlab_network_list network_name, matlab_network_list = average_networks(self.inputs.in_files, ntwk_res_file, self.inputs.group_id) return runtime def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_gpickled_groupavg): outputs["gpickled_groupavg"] = op.abspath(self._gen_outfilename(self.inputs.group_id + '_average', 'pck')) else: outputs["gpickled_groupavg"] = op.abspath(self.inputs.out_gpickled_groupavg) if not isdefined(self.inputs.out_gexf_groupavg): outputs["gexf_groupavg"] = op.abspath(self._gen_outfilename(self.inputs.group_id + '_average', 'gexf')) else: outputs["gexf_groupavg"] = op.abspath(self.inputs.out_gexf_groupavg) outputs["matlab_groupavgs"] = matlab_network_list return outputs def _gen_outfilename(self, name, ext): return name + '.' + ext nipype-0.9.2/nipype/interfaces/cmtk/parcellation.py000066400000000000000000000616211227300005300224400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import ( BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, Directory, isdefined) import os import os.path as op import numpy as np import nibabel as nb import networkx as nx import shutil from nipype.utils.misc import package_check import warnings from ... import logging iflogger = logging.getLogger('interface') have_cmp = True try: package_check('cmp') except Exception, e: have_cmp = False else: import cmp from cmp.util import runCmd def create_annot_label(subject_id, subjects_dir, fs_dir, parcellation_name): iflogger.info("Create the cortical labels necessary for our ROIs") iflogger.info("=================================================") fs_label_dir = op.join(op.join(subjects_dir, subject_id), 'label') output_dir = op.abspath(op.curdir) paths = [] cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" for hemi in ['lh', 'rh']: spath = cmp_config._get_lausanne_parcellation( 'Lausanne2008')[parcellation_name]['fs_label_subdir_name'] % hemi paths.append(spath) for p in paths: try: os.makedirs(op.join('.', p)) except: pass if '33' in parcellation_name: comp = [ ('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot', 'regenerated_rh_36', 'myaparc_36'), ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', 'regenerated_rh_60', 'myaparc_60'), ('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot', 'regenerated_lh_36', 'myaparc_36'), ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', 'regenerated_lh_60', 'myaparc_60'), ] elif '60' in parcellation_name: comp = [ ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', 'regenerated_rh_60', 'myaparc_60'), ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', 'regenerated_lh_60', 'myaparc_60'), ] elif '125' in parcellation_name: comp = [ ('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot', 'regenerated_rh_125', 'myaparc_125'), ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', 'regenerated_rh_60', 'myaparc_60'), ('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot', 'regenerated_lh_125', 'myaparc_125'), ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', 'regenerated_lh_60', 'myaparc_60'), ] elif '250' in parcellation_name: comp = [ ('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot', 'regenerated_rh_250', 'myaparc_250'), ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', 'regenerated_rh_60', 'myaparc_60'), ('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot', 'regenerated_lh_250', 'myaparc_250'), ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', 'regenerated_lh_60', 'myaparc_60'), ] else: comp = [ ('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot', 'regenerated_rh_36', 'myaparc_36'), ('rh', 'myatlasP1_16_rh.gcs', 'rh.myaparcP1_16.annot', 'regenerated_rh_500', 'myaparcP1_16'), ('rh', 'myatlasP17_28_rh.gcs', 'rh.myaparcP17_28.annot', 'regenerated_rh_500', 'myaparcP17_28'), ('rh', 'myatlasP29_36_rh.gcs', 'rh.myaparcP29_36.annot', 'regenerated_rh_500', 'myaparcP29_36'), ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', 'regenerated_rh_60', 'myaparc_60'), ('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot', 'regenerated_rh_125', 'myaparc_125'), ('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot', 'regenerated_rh_250', 'myaparc_250'), ('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot', 'regenerated_lh_36', 'myaparc_36'), ('lh', 'myatlasP1_16_lh.gcs', 'lh.myaparcP1_16.annot', 'regenerated_lh_500', 'myaparcP1_16'), ('lh', 'myatlasP17_28_lh.gcs', 'lh.myaparcP17_28.annot', 'regenerated_lh_500', 'myaparcP17_28'), ('lh', 'myatlasP29_36_lh.gcs', 'lh.myaparcP29_36.annot', 'regenerated_lh_500', 'myaparcP29_36'), ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', 'regenerated_lh_60', 'myaparc_60'), ('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot', 'regenerated_lh_125', 'myaparc_125'), ('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot', 'regenerated_lh_250', 'myaparc_250'), ] log = cmp_config.get_logger() for out in comp: mris_cmd = 'mris_ca_label %s %s "%s/surf/%s.sphere.reg" "%s" "%s" ' % (subject_id, out[0], op.join(subjects_dir, subject_id), out[0], cmp_config.get_lausanne_atlas(out[1]), op.join(fs_label_dir, out[2])) runCmd(mris_cmd, log) iflogger.info('-----------') annot = '--annotation "%s"' % out[4] mri_an_cmd = 'mri_annotation2label --subject %s --hemi %s --outdir "%s" %s' % (subject_id, out[0], op.join(output_dir, out[3]), annot) iflogger.info(mri_an_cmd) runCmd(mri_an_cmd, log) iflogger.info('-----------') iflogger.info(os.environ['SUBJECTS_DIR']) # extract cc and unknown to add to tractography mask, we do not want this as a region of interest # in FS 5.0, unknown and corpuscallosum are not available for the 35 scale (why?), # but for the other scales only, take the ones from _60 rhun = op.join(output_dir, 'rh.unknown.label') lhun = op.join(output_dir, 'lh.unknown.label') rhco = op.join(output_dir, 'rh.corpuscallosum.label') lhco = op.join(output_dir, 'lh.corpuscallosum.label') shutil.copy( op.join(output_dir, 'regenerated_rh_60', 'rh.unknown.label'), rhun) shutil.copy( op.join(output_dir, 'regenerated_lh_60', 'lh.unknown.label'), lhun) shutil.copy(op.join( output_dir, 'regenerated_rh_60', 'rh.corpuscallosum.label'), rhco) shutil.copy(op.join( output_dir, 'regenerated_lh_60', 'lh.corpuscallosum.label'), lhco) mri_cmd = """mri_label2vol --label "%s" --label "%s" --label "%s" --label "%s" --temp "%s" --o "%s" --identity """ % (rhun, lhun, rhco, lhco, op.join(op.join(subjects_dir, subject_id), 'mri', 'orig.mgz'), op.join(fs_label_dir, 'cc_unknown.nii.gz') ) runCmd(mri_cmd, log) runCmd('mris_volmask %s' % subject_id, log) mri_cmd = 'mri_convert -i "%s/mri/ribbon.mgz" -o "%s/mri/ribbon.nii.gz"' % (op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id)) runCmd(mri_cmd, log) mri_cmd = 'mri_convert -i "%s/mri/aseg.mgz" -o "%s/mri/aseg.nii.gz"' % ( op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id)) runCmd(mri_cmd, log) iflogger.info("[ DONE ]") def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): """ Creates the ROI_%s.nii.gz files using the given parcellation information from networks. Iteratively create volume. """ iflogger.info("Create the ROIs:") output_dir = op.abspath(op.curdir) fs_dir = op.join(subjects_dir, subject_id) cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" log = cmp_config.get_logger() parval = cmp_config._get_lausanne_parcellation( 'Lausanne2008')[parcellation_name] pgpath = parval['node_information_graphml'] aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz')) asegd = aseg.get_data() # identify cortical voxels, right (3) and left (42) hemispheres idxr = np.where(asegd == 3) idxl = np.where(asegd == 42) xx = np.concatenate((idxr[0], idxl[0])) yy = np.concatenate((idxr[1], idxl[1])) zz = np.concatenate((idxr[2], idxl[2])) # initialize variables necessary for cortical ROIs dilation # dimensions of the neighbourhood for rois labels assignment (choose odd dimensions!) shape = (25, 25, 25) center = np.array(shape) // 2 # dist: distances from the center of the neighbourhood dist = np.zeros(shape, dtype='float32') for x in range(shape[0]): for y in range(shape[1]): for z in range(shape[2]): distxyz = center - [x, y, z] dist[x, y, z] = np.sqrt(np.sum(np.multiply(distxyz, distxyz))) iflogger.info("Working on parcellation: ") iflogger.info(cmp_config._get_lausanne_parcellation( 'Lausanne2008')[parcellation_name]) iflogger.info("========================") pg = nx.read_graphml(pgpath) # each node represents a brain region # create a big 256^3 volume for storage of all ROIs rois = np.zeros((256, 256, 256), dtype=np.int16) count = 0 for brk, brv in pg.nodes_iter(data=True): count = count + 1 iflogger.info(brv) iflogger.info(brk) if brv['dn_hemisphere'] == 'left': hemi = 'lh' elif brv['dn_hemisphere'] == 'right': hemi = 'rh' if brv['dn_region'] == 'subcortical': iflogger.info(brv) iflogger.info("---------------------") iflogger.info("Work on brain region: %s" % (brv['dn_region'])) iflogger.info("Freesurfer Name: %s" % brv['dn_fsname']) iflogger.info("Region %s of %s " % (count, pg.number_of_nodes())) iflogger.info("---------------------") # if it is subcortical, retrieve roi from aseg idx = np.where(asegd == int(brv['dn_fs_aseg_val'])) rois[idx] = int(brv['dn_correspondence_id']) elif brv['dn_region'] == 'cortical': iflogger.info(brv) iflogger.info("---------------------") iflogger.info("Work on brain region: %s" % (brv['dn_region'])) iflogger.info("Freesurfer Name: %s" % brv['dn_fsname']) iflogger.info("Region %s of %s " % (count, pg.number_of_nodes())) iflogger.info("---------------------") labelpath = op.join( output_dir, parval['fs_label_subdir_name'] % hemi) # construct .label file name fname = '%s.%s.label' % (hemi, brv['dn_fsname']) # execute fs mri_label2vol to generate volume roi from the label file # store it in temporary file to be overwritten for each region mri_cmd = 'mri_label2vol --label "%s" --temp "%s" --o "%s" --identity' % (op.join(labelpath, fname), op.join(fs_dir, 'mri', 'orig.mgz'), op.join(output_dir, 'tmp.nii.gz')) runCmd(mri_cmd, log) tmp = nb.load(op.join(output_dir, 'tmp.nii.gz')) tmpd = tmp.get_data() # find voxel and set them to intensityvalue in rois idx = np.where(tmpd == 1) rois[idx] = int(brv['dn_correspondence_id']) # store volume eg in ROI_scale33.nii.gz out_roi = op.abspath('ROI_%s.nii.gz' % parcellation_name) # update the header hdr = aseg.get_header() hdr2 = hdr.copy() hdr2.set_data_dtype(np.uint16) log.info("Save output image to %s" % out_roi) img = nb.Nifti1Image(rois, aseg.get_affine(), hdr2) nb.save(img, out_roi) iflogger.info("[ DONE ]") # dilate cortical regions if (dilation == True): iflogger.info("Dilating cortical regions...") # loop throughout all the voxels belonging to the aseg GM volume for j in range(xx.size): if rois[xx[j], yy[j], zz[j]] == 0: local = extract( rois, shape, position=(xx[j], yy[j], zz[j]), fill=0) mask = local.copy() mask[np.nonzero(local > 0)] = 1 thisdist = np.multiply(dist, mask) thisdist[np.nonzero(thisdist == 0)] = np.amax(thisdist) value = np.int_( local[np.nonzero(thisdist == np.amin(thisdist))]) if value.size > 1: counts = np.bincount(value) value = np.argmax(counts) rois[xx[j], yy[j], zz[j]] = value # store volume eg in ROIv_scale33.nii.gz out_roi = op.abspath('ROIv_%s.nii.gz' % parcellation_name) iflogger.info("Save output image to %s" % out_roi) img = nb.Nifti1Image(rois, aseg.get_affine(), hdr2) nb.save(img, out_roi) iflogger.info("[ DONE ]") def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name): iflogger.info("Create white matter mask") fs_dir = op.join(subjects_dir, subject_id) cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" pgpath = cmp_config._get_lausanne_parcellation( 'Lausanne2008')[parcellation_name]['node_information_graphml'] # load ribbon as basis for white matter mask fsmask = nb.load(op.join(fs_dir, 'mri', 'ribbon.nii.gz')) fsmaskd = fsmask.get_data() wmmask = np.zeros(fsmaskd.shape) # extract right and left white matter idx_lh = np.where(fsmaskd == 120) idx_rh = np.where(fsmaskd == 20) wmmask[idx_lh] = 1 wmmask[idx_rh] = 1 # remove subcortical nuclei from white matter mask aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz')) asegd = aseg.get_data() try: import scipy.ndimage.morphology as nd except ImportError: raise Exception('Need scipy for binary erosion of white matter mask') # need binary erosion function imerode = nd.binary_erosion # ventricle erosion csfA = np.zeros(asegd.shape) csfB = np.zeros(asegd.shape) # structuring elements for erosion se1 = np.zeros((3, 3, 5)) se1[1, :, 2] = 1 se1[:, 1, 2] = 1 se1[1, 1, :] = 1 se = np.zeros((3, 3, 3)) se[1, :, 1] = 1 se[:, 1, 1] = 1 se[1, 1, :] = 1 # lateral ventricles, thalamus proper and caudate # the latter two removed for better erosion, but put back afterwards idx = np.where((asegd == 4) | (asegd == 43) | (asegd == 11) | (asegd == 50) | (asegd == 31) | (asegd == 63) | (asegd == 10) | (asegd == 49)) csfA[idx] = 1 csfA = imerode(imerode(csfA, se1), se) # thalmus proper and cuadate are put back because they are not lateral ventricles idx = np.where((asegd == 11) | (asegd == 50) | (asegd == 10) | (asegd == 49)) csfA[idx] = 0 # REST CSF, IE 3RD AND 4TH VENTRICULE AND EXTRACEREBRAL CSF idx = np.where((asegd == 5) | (asegd == 14) | (asegd == 15) | (asegd == 24) | (asegd == 44) | (asegd == 72) | (asegd == 75) | (asegd == 76) | (asegd == 213) | (asegd == 221)) # 43 ??, 4?? 213?, 221? # more to discuss. for i in [5, 14, 15, 24, 44, 72, 75, 76, 213, 221]: idx = np.where(asegd == i) csfB[idx] = 1 # do not remove the subthalamic nucleus for now from the wm mask # 23, 60 # would stop the fiber going to the segmented "brainstem" # grey nuclei, either with or without erosion gr_ncl = np.zeros(asegd.shape) # with erosion for i in [10, 11, 12, 49, 50, 51]: idx = np.where(asegd == i) # temporary volume tmp = np.zeros(asegd.shape) tmp[idx] = 1 tmp = imerode(tmp, se) idx = np.where(tmp == 1) gr_ncl[idx] = 1 # without erosion for i in [13, 17, 18, 26, 52, 53, 54, 58]: idx = np.where(asegd == i) gr_ncl[idx] = 1 # remove remaining structure, e.g. brainstem remaining = np.zeros(asegd.shape) idx = np.where(asegd == 16) remaining[idx] = 1 # now remove all the structures from the white matter idx = np.where( (csfA != 0) | (csfB != 0) | (gr_ncl != 0) | (remaining != 0)) wmmask[idx] = 0 iflogger.info("Removing lateral ventricles and eroded grey nuclei and brainstem from white matter mask") # ADD voxels from 'cc_unknown.nii.gz' dataset ccun = nb.load(op.join(fs_dir, 'label', 'cc_unknown.nii.gz')) ccund = ccun.get_data() idx = np.where(ccund != 0) iflogger.info("Add corpus callosum and unknown to wm mask") wmmask[idx] = 1 # check if we should subtract the cortical rois from this parcellation iflogger.info("Loading %s to subtract cortical ROIs from white matter mask" % ('ROI_%s.nii.gz' % parcellation_name)) roi = nb.load(op.join(op.curdir, 'ROI_%s.nii.gz' % parcellation_name)) roid = roi.get_data() assert roid.shape[0] == wmmask.shape[0] pg = nx.read_graphml(pgpath) for brk, brv in pg.nodes_iter(data=True): if brv['dn_region'] == 'cortical': iflogger.info("Subtracting region %s with intensity value %s" % (brv['dn_region'], brv['dn_correspondence_id'])) idx = np.where(roid == int(brv['dn_correspondence_id'])) wmmask[idx] = 0 # output white matter mask. crop and move it afterwards wm_out = op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz') img = nb.Nifti1Image(wmmask, fsmask.get_affine(), fsmask.get_header()) iflogger.info("Save white matter mask: %s" % wm_out) nb.save(img, wm_out) def crop_and_move_datasets(subject_id, subjects_dir, fs_dir, parcellation_name, out_roi_file,dilation): fs_dir = op.join(subjects_dir, subject_id) cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" log = cmp_config.get_logger() output_dir = op.abspath(op.curdir) iflogger.info("Cropping and moving datasets to %s" % output_dir) ds = [ (op.join(fs_dir, 'mri', 'aseg.nii.gz'), op.abspath('aseg.nii.gz')), (op.join(fs_dir, 'mri', 'ribbon.nii.gz'), op.abspath('ribbon.nii.gz')), (op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz'), op.abspath('fsmask_1mm.nii.gz')), (op.join(fs_dir, 'label', 'cc_unknown.nii.gz'), op.abspath('cc_unknown.nii.gz')) ] ds.append((op.abspath('ROI_%s.nii.gz' % parcellation_name), op.abspath('ROI_HR_th.nii.gz'))) if(dilation==True): ds.append((op.abspath('ROIv_%s.nii.gz' % parcellation_name), op.abspath('ROIv_HR_th.nii.gz'))) orig = op.join(fs_dir, 'mri', 'orig', '001.mgz') for d in ds: iflogger.info("Processing %s:" % d[0]) if not op.exists(d[0]): raise Exception('File %s does not exist.' % d[0]) # reslice to original volume because the roi creation with freesurfer # changed to 256x256x256 resolution mri_cmd = 'mri_convert -rl "%s" -rt nearest "%s" -nc "%s"' % ( orig, d[0], d[1]) runCmd(mri_cmd, log) def extract(Z, shape, position, fill): """ Extract voxel neighbourhood Parameters ---------- Z: the original data shape: tuple containing neighbourhood dimensions position: tuple containing central point indexes fill: value for the padding of Z Returns ------- R: the neighbourhood of the specified point in Z """ R = np.ones(shape, dtype=Z.dtype) * \ fill # initialize output block to the fill value P = np.array( list(position)).astype(int) # position coordinates(numpy array) Rs = np.array( list(R.shape)).astype(int) # output block dimensions (numpy array) Zs = np.array( list(Z.shape)).astype(int) # original volume dimensions (numpy array) R_start = np.zeros(len(shape)).astype(int) R_stop = np.array(list(shape)).astype(int) Z_start = (P - Rs // 2) Z_start_cor = (np.maximum(Z_start, 0)).tolist() # handle borders R_start = R_start + (Z_start_cor - Z_start) Z_stop = (P + Rs // 2) + Rs % 2 Z_stop_cor = (np.minimum(Z_stop, Zs)).tolist() # handle borders R_stop = R_stop - (Z_stop - Z_stop_cor) R[R_start[0]:R_stop[0], R_start[1]:R_stop[1], R_start[2]:R_stop[2]] = Z[Z_start_cor[0]:Z_stop_cor[0], Z_start_cor[1]:Z_stop_cor[1], Z_start_cor[2]:Z_stop_cor[2]] return R class ParcellateInputSpec(BaseInterfaceInputSpec): subject_id = traits.String(mandatory=True, desc='Subject ID') parcellation_name = traits.Enum('scale500', ['scale33', 'scale60', 'scale125', 'scale250', 'scale500'], usedefault=True) freesurfer_dir = Directory(exists=True, desc='Freesurfer main directory') subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory') out_roi_file = File( genfile=True, desc='Region of Interest file for connectivity mapping') dilation = traits.Bool(False, usedefault=True, desc='Dilate cortical parcels? Useful for fMRI connectivity') class ParcellateOutputSpec(TraitedSpec): roi_file = File( exists=True, desc='Region of Interest file for connectivity mapping') roiv_file = File(desc='Region of Interest file for fMRI connectivity mapping') white_matter_mask_file = File(exists=True, desc='White matter mask file') cc_unknown_file = File( desc='Image file with regions labelled as unknown cortical structures', exists=True) ribbon_file = File(desc='Image file detailing the cortical ribbon', exists=True) aseg_file = File( desc='Automated segmentation file converted from Freesurfer "subjects" directory', exists=True) roi_file_in_structural_space = File( desc='ROI image resliced to the dimensions of the original structural image', exists=True) dilated_roi_file_in_structural_space = File( desc='dilated ROI image resliced to the dimensions of the original structural image') class Parcellate(BaseInterface): """Subdivides segmented ROI file into smaller subregions This interface implements the same procedure as in the ConnectomeMapper's parcellation stage (cmp/stages/parcellation/maskcreation.py) for a single parcellation scheme (e.g. 'scale500'). Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> parcellate = cmtk.Parcellate() >>> parcellate.inputs.freesurfer_dir = '.' >>> parcellate.inputs.subjects_dir = '.' >>> parcellate.inputs.subject_id = 'subj1' >>> parcellate.inputs.dilation = True >>> parcellate.inputs.parcellation_name = 'scale500' >>> parcellate.run() # doctest: +SKIP """ input_spec = ParcellateInputSpec output_spec = ParcellateOutputSpec def _run_interface(self, runtime): if self.inputs.subjects_dir: os.environ.update({'SUBJECTS_DIR': self.inputs.subjects_dir}) if not os.path.exists(op.join(self.inputs.subjects_dir, self.inputs.subject_id)): raise Exception iflogger.info("ROI_HR_th.nii.gz / fsmask_1mm.nii.gz CREATION") iflogger.info("=============================================") create_annot_label(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name) create_roi(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name, self.inputs.dilation) create_wm_mask(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name) crop_and_move_datasets(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name, self.inputs.out_roi_file,self.inputs.dilation) return runtime def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.out_roi_file): outputs['roi_file'] = op.abspath(self.inputs.out_roi_file) else: outputs['roi_file'] = op.abspath( self._gen_outfilename('nii.gz', 'ROI')) if(self.inputs.dilation==True): outputs['roiv_file'] = op.abspath(self._gen_outfilename( 'nii.gz', 'ROIv')) outputs['white_matter_mask_file'] = op.abspath('fsmask_1mm.nii.gz') outputs['cc_unknown_file'] = op.abspath('cc_unknown.nii.gz') outputs['ribbon_file'] = op.abspath('ribbon.nii.gz') outputs['aseg_file'] = op.abspath('aseg.nii.gz') outputs['roi_file_in_structural_space'] = op.abspath( 'ROI_HR_th.nii.gz') if(self.inputs.dilation==True): outputs['dilated_roi_file_in_structural_space'] = op.abspath( 'ROIv_HR_th.nii.gz') return outputs def _gen_outfilename(self, ext, prefix='ROI'): return prefix + '_' + self.inputs.parcellation_name + '.' + ext nipype-0.9.2/nipype/interfaces/cmtk/setup.py000066400000000000000000000007111227300005300211140ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('cmtk', parent_package, top_path) #config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/cmtk/tests/000077500000000000000000000000001227300005300205455ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py000066400000000000000000000020121227300005300263100ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.nx import AverageNetworks def test_AverageNetworks_inputs(): input_map = dict(group_id=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(mandatory=True, ), out_gexf_groupavg=dict(), out_gpickled_groupavg=dict(), resolution_network_file=dict(), ) inputs = AverageNetworks.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AverageNetworks_outputs(): output_map = dict(gexf_groupavg=dict(), gpickled_groupavg=dict(), matlab_groupavgs=dict(), ) outputs = AverageNetworks.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py000066400000000000000000000024371227300005300255020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.convert import CFFConverter def test_CFFConverter_inputs(): input_map = dict(creator=dict(), data_files=dict(), description=dict(usedefault=True, ), email=dict(), gifti_labels=dict(), gifti_surfaces=dict(), gpickled_networks=dict(), graphml_networks=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), license=dict(), nifti_volumes=dict(), out_file=dict(usedefault=True, ), publisher=dict(), references=dict(), relation=dict(), rights=dict(), script_files=dict(), species=dict(usedefault=True, ), timeseries_files=dict(), title=dict(), tract_files=dict(), ) inputs = CFFConverter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CFFConverter_outputs(): output_map = dict(connectome_file=dict(), ) outputs = CFFConverter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py000066400000000000000000000036061227300005300256030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.cmtk import CreateMatrix def test_CreateMatrix_inputs(): input_map = dict(count_region_intersections=dict(usedefault=True, ), out_endpoint_array_name=dict(genfile=True, ), out_fiber_length_std_matrix_mat_file=dict(genfile=True, ), out_intersection_matrix_mat_file=dict(genfile=True, ), out_matrix_file=dict(genfile=True, ), out_matrix_mat_file=dict(usedefault=True, ), out_mean_fiber_length_matrix_mat_file=dict(genfile=True, ), out_median_fiber_length_matrix_mat_file=dict(genfile=True, ), resolution_network_file=dict(mandatory=True, ), roi_file=dict(mandatory=True, ), tract_file=dict(mandatory=True, ), ) inputs = CreateMatrix.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CreateMatrix_outputs(): output_map = dict(endpoint_file=dict(), endpoint_file_mm=dict(), fiber_label_file=dict(), fiber_labels_noorphans=dict(), fiber_length_file=dict(), fiber_length_std_matrix_mat_file=dict(), filtered_tractographies=dict(), filtered_tractography=dict(), filtered_tractography_by_intersections=dict(), intersection_matrix_file=dict(), intersection_matrix_mat_file=dict(), matlab_matrix_files=dict(), matrix_file=dict(), matrix_files=dict(), matrix_mat_file=dict(), mean_fiber_length_matrix_mat_file=dict(), median_fiber_length_matrix_mat_file=dict(), stats_file=dict(), ) outputs = CreateMatrix.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py000066400000000000000000000016241227300005300254050ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.cmtk import CreateNodes def test_CreateNodes_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), out_filename=dict(usedefault=True, ), resolution_network_file=dict(mandatory=True, ), roi_file=dict(mandatory=True, ), ) inputs = CreateNodes.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CreateNodes_outputs(): output_map = dict(node_network=dict(), ) outputs = CreateNodes.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py000066400000000000000000000015551227300005300261130ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.convert import MergeCNetworks def test_MergeCNetworks_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(mandatory=True, ), out_file=dict(usedefault=True, ), ) inputs = MergeCNetworks.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MergeCNetworks_outputs(): output_map = dict(connectome_file=dict(), ) outputs = MergeCNetworks.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py000066400000000000000000000023241227300005300274670ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.nbs import NetworkBasedStatistic def test_NetworkBasedStatistic_inputs(): input_map = dict(edge_key=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_group1=dict(mandatory=True, ), in_group2=dict(mandatory=True, ), node_position_network=dict(), number_of_permutations=dict(usedefault=True, ), out_nbs_network=dict(), out_nbs_pval_network=dict(), t_tail=dict(usedefault=True, ), threshold=dict(usedefault=True, ), ) inputs = NetworkBasedStatistic.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_NetworkBasedStatistic_outputs(): output_map = dict(nbs_network=dict(), nbs_pval_network=dict(), network_files=dict(), ) outputs = NetworkBasedStatistic.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py000066400000000000000000000032101227300005300263120ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.nx import NetworkXMetrics def test_NetworkXMetrics_inputs(): input_map = dict(compute_clique_related_measures=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(mandatory=True, ), out_edge_metrics_matlab=dict(genfile=True, ), out_global_metrics_matlab=dict(genfile=True, ), out_k_core=dict(usedefault=True, ), out_k_crust=dict(usedefault=True, ), out_k_shell=dict(usedefault=True, ), out_node_metrics_matlab=dict(genfile=True, ), out_pickled_extra_measures=dict(usedefault=True, ), treat_as_weighted_graph=dict(usedefault=True, ), ) inputs = NetworkXMetrics.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_NetworkXMetrics_outputs(): output_map = dict(edge_measure_networks=dict(), edge_measures_matlab=dict(), global_measures_matlab=dict(), gpickled_network_files=dict(), k_core=dict(), k_crust=dict(), k_networks=dict(), k_shell=dict(), matlab_dict_measures=dict(), matlab_matrix_files=dict(), node_measure_networks=dict(), node_measures_matlab=dict(), pickled_extra_measures=dict(), ) outputs = NetworkXMetrics.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py000066400000000000000000000023101227300005300252560ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.parcellation import Parcellate def test_Parcellate_inputs(): input_map = dict(dilation=dict(usedefault=True, ), freesurfer_dir=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), out_roi_file=dict(genfile=True, ), parcellation_name=dict(usedefault=True, ), subject_id=dict(mandatory=True, ), subjects_dir=dict(), ) inputs = Parcellate.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Parcellate_outputs(): output_map = dict(aseg_file=dict(), cc_unknown_file=dict(), dilated_roi_file_in_structural_space=dict(), ribbon_file=dict(), roi_file=dict(), roi_file_in_structural_space=dict(), roiv_file=dict(), white_matter_mask_file=dict(), ) outputs = Parcellate.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py000066400000000000000000000020571227300005300242750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.cmtk.cmtk import ROIGen def test_ROIGen_inputs(): input_map = dict(LUT_file=dict(xor=['use_freesurfer_LUT'], ), aparc_aseg_file=dict(mandatory=True, ), freesurfer_dir=dict(requires=['use_freesurfer_LUT'], ), ignore_exception=dict(nohash=True, usedefault=True, ), out_dict_file=dict(genfile=True, ), out_roi_file=dict(genfile=True, ), use_freesurfer_LUT=dict(xor=['LUT_file'], ), ) inputs = ROIGen.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ROIGen_outputs(): output_map = dict(dict_file=dict(), roi_file=dict(), ) outputs = ROIGen.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/dcm2nii.py000066400000000000000000000122671227300005300203540ustar00rootroot00000000000000from nipype.interfaces.base import (CommandLine, CommandLineInputSpec, InputMultiPath, traits, TraitedSpec, OutputMultiPath, isdefined, File, Directory) import os from copy import deepcopy from nipype.utils.filemanip import split_filename import re class Dcm2niiInputSpec(CommandLineInputSpec): source_names = InputMultiPath(File(exists=True), argstr="%s", position=10, mandatory=True) gzip_output = traits.Bool(False, argstr='-g', position=0, usedefault=True) nii_output = traits.Bool(True, argstr='-n', position=1, usedefault=True) anonymize = traits.Bool(argstr='-a', position=2) id_in_filename = traits.Bool(False, argstr='-i', usedefault=True, position=3) reorient = traits.Bool(argstr='-r', position=4) reorient_and_crop = traits.Bool(argstr='-x', position=5) output_dir = Directory(exists=True, argstr='-o %s', genfile=True, position=6) config_file = File(exists=True, argstr="-b %s", genfile=True, position=7) convert_all_pars = traits.Bool(argstr='-v', position=8) args = traits.Str(argstr='%s', desc='Additional parameters to the command', position=9) class Dcm2niiOutputSpec(TraitedSpec): converted_files = OutputMultiPath(File(exists=True)) reoriented_files = OutputMultiPath(File(exists=True)) reoriented_and_cropped_files = OutputMultiPath(File(exists=True)) bvecs = OutputMultiPath(File(exists=True)) bvals = OutputMultiPath(File(exists=True)) class Dcm2nii(CommandLine): input_spec=Dcm2niiInputSpec output_spec=Dcm2niiOutputSpec _cmd = 'dcm2nii' def _format_arg(self, opt, spec, val): if opt in ['gzip_output', 'nii_output', 'anonymize', 'id_in_filename', 'reorient', 'reorient_and_crop', 'convert_all_pars']: spec = deepcopy(spec) if val: spec.argstr += ' y' else: spec.argstr += ' n' val = True return super(Dcm2nii, self)._format_arg(opt, spec, val) def _run_interface(self, runtime): new_runtime = super(Dcm2nii, self)._run_interface(runtime) (self.output_files, self.reoriented_files, self.reoriented_and_cropped_files, self.bvecs, self.bvals) = self._parse_stdout(new_runtime.stdout) return new_runtime def _parse_stdout(self, stdout): files = [] reoriented_files = [] reoriented_and_cropped_files = [] bvecs = [] bvals = [] skip = False last_added_file = None for line in stdout.split("\n"): if not skip: file = None if line.startswith("Saving "): file = line[len("Saving "):] elif line.startswith("GZip..."): #for gzipped outpus files are not absolute if isdefined(self.inputs.output_dir): output_dir = self.inputs.output_dir else: output_dir = self._gen_filename('output_dir') file = os.path.abspath(os.path.join(output_dir, line[len("GZip..."):])) elif line.startswith("Number of diffusion directions "): if last_added_file: base, filename, ext = split_filename(last_added_file) bvecs.append(os.path.join(base,filename + ".bvec")) bvals.append(os.path.join(base,filename + ".bval")) elif re.search('-->(.*)', line): search = re.search('.*--> (.*)', line) file = search.groups()[0] if file: files.append(file) last_added_file = file continue if line.startswith("Reorienting as "): reoriented_files.append(line[len("Reorienting as "):]) skip = True continue elif line.startswith("Cropping NIfTI/Analyze image "): base, filename = os.path.split(line[len("Cropping NIfTI/Analyze image "):]) filename = "c" + filename reoriented_and_cropped_files.append(os.path.join(base, filename)) skip = True continue skip = False return files, reoriented_files, reoriented_and_cropped_files, bvecs, bvals def _list_outputs(self): outputs = self.output_spec().get() outputs['converted_files'] = self.output_files outputs['reoriented_files'] = self.reoriented_files outputs['reoriented_and_cropped_files'] = self.reoriented_and_cropped_files outputs['bvecs'] = self.bvecs outputs['bvals'] = self.bvals return outputs def _gen_filename(self, name): if name == 'output_dir': return os.getcwd() elif name == 'config_file': config_file = "config.ini" f = open(config_file, "w") # disable interactive mode f.write("[BOOL]\nManualNIfTIConv=0\n") f.close() return config_file return None nipype-0.9.2/nipype/interfaces/dcmstack.py000066400000000000000000000343461227300005300206220ustar00rootroot00000000000000"""Provides interfaces to various commands provided by freeusrfer Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ from __future__ import absolute_import import os, string from os import path from glob import glob from nipype.interfaces.base import (TraitedSpec, DynamicTraitedSpec, InputMultiPath, File, Directory, traits, BaseInterface, ) import nibabel as nb from nipype.interfaces.traits_extension import isdefined, Undefined have_dcmstack = True try: import dicom import dcmstack from dcmstack.dcmmeta import NiftiWrapper except ImportError: have_dcmstack = False def sanitize_path_comp(path_comp): result = [] for char in path_comp: if not char in string.letters + string.digits + '-_.': result.append('_') else: result.append(char) return ''.join(result) class NiftiGeneratorBaseInputSpec(TraitedSpec): out_format = traits.Str(desc="String which can be formatted with " "meta data to create the output filename(s)") out_ext = traits.Str('.nii.gz', usedefault=True, desc="Determines output file type") class NiftiGeneratorBase(BaseInterface): '''Base class for interfaces that produce Nifti files, potentially with embeded meta data.''' def _get_out_path(self, meta, idx=None): '''Return the output path for the gernerated Nifti.''' if self.inputs.out_format: out_fmt = self.inputs.out_format else: #If no out_format is specified, use a sane default that will work #with the provided meta data. out_fmt = [] if not idx is None: out_fmt.append('%03d' % idx) if 'SeriesNumber' in meta: out_fmt.append('%(SeriesNumber)03d') if 'ProtocolName' in meta: out_fmt.append('%(ProtocolName)s') elif 'SeriesDescription' in meta: out_fmt.append('%(SeriesDescription)s') else: out_fmt.append('sequence') out_fmt = '-'.join(out_fmt) out_fn = (out_fmt % meta) + self.inputs.out_ext out_fn = sanitize_path_comp(out_fn) return path.join(os.getcwd(), out_fn) class DcmStackInputSpec(NiftiGeneratorBaseInputSpec): dicom_files = traits.Either(InputMultiPath(File(exists=True)), Directory(exists=True), traits.Str(), mandatory=True) embed_meta = traits.Bool(desc="Embed DICOM meta data into result") exclude_regexes = traits.List(desc="Meta data to exclude, suplementing " "any default exclude filters") include_regexes = traits.List(desc="Meta data to include, overriding any " "exclude filters") class DcmStackOutputSpec(TraitedSpec): out_file = File(exists=True) class DcmStack(NiftiGeneratorBase): '''Create one Nifti file from a set of DICOM files. Can optionally embed meta data. Example ------- >>> from nipype.interfaces.dcmstack import DcmStack >>> stacker = DcmStack() >>> stacker.inputs.dicom_files = 'path/to/series/' >>> stacker.run() # doctest: +SKIP >>> result.outputs.out_file # doctest: +SKIP '/path/to/cwd/sequence.nii.gz' ''' input_spec = DcmStackInputSpec output_spec = DcmStackOutputSpec def _get_filelist(self, trait_input): if isinstance(trait_input, str): if path.isdir(trait_input): return glob(path.join(trait_input, '*.dcm')) else: return glob(trait_input) return trait_input def _run_interface(self, runtime): src_paths = self._get_filelist(self.inputs.dicom_files) include_regexes = dcmstack.default_key_incl_res if isdefined(self.inputs.include_regexes): include_regexes += self.inputs.include_regexes exclude_regexes = dcmstack.default_key_excl_res if isdefined(self.inputs.exclude_regexes): exclude_regexes += self.inputs.exclude_regexes meta_filter = dcmstack.make_key_regex_filter(exclude_regexes, include_regexes) stack = dcmstack.DicomStack(meta_filter=meta_filter) for src_path in src_paths: src_dcm = dicom.read_file(src_path, force=True) stack.add_dcm(src_dcm) nii = stack.to_nifti(embed_meta=True) nw = NiftiWrapper(nii) self.out_path = \ self._get_out_path(nw.meta_ext.get_class_dict(('global', 'const'))) if not self.inputs.embed_meta: nw.remove_extension() nb.save(nii, self.out_path) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = self.out_path return outputs class GroupAndStackOutputSpec(TraitedSpec): out_list = traits.List(desc="List of output nifti files") class GroupAndStack(DcmStack): '''Create (potentially) multiple Nifti files for a set of DICOM files. ''' input_spec = DcmStackInputSpec output_spec = GroupAndStackOutputSpec def _run_interface(self, runtime): src_paths = self._get_filelist(self.inputs.dicom_files) stacks = dcmstack.parse_and_stack(src_paths) self.out_list = [] for key, stack in stacks.iteritems(): nw = NiftiWrapper(stack.to_nifti(embed_meta=True)) const_meta = nw.meta_ext.get_class_dict(('global', 'const')) out_path = self._get_out_path(const_meta) if not self.inputs.embed_meta: nw.remove_extension() nb.save(nw.nii_img, out_path) self.out_list.append(out_path) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["out_list"] = self.out_list return outputs class LookupMetaInputSpec(TraitedSpec): in_file = File(mandatory=True, exists=True, desc='The input Nifti file') meta_keys = traits.Either(traits.List(), traits.Dict(), mandatory=True, desc=("List of meta data keys to lookup, or a " "dict where keys specify the meta data keys to " "lookup and the values specify the output names") ) class LookupMeta(BaseInterface): '''Lookup meta data values from a Nifti with embeded meta data. Example ------- >>> from nipype.interfaces import dcmstack >>> lookup = dcmstack.LookupMeta() >>> lookup.inputs.in_file = 'functional.nii' >>> lookup.inputs.meta_keys = {'RepetitionTime' : 'TR', \ 'EchoTime' : 'TE'} >>> result = lookup.run() # doctest: +SKIP >>> result.outputs.TR # doctest: +SKIP 9500.0 >>> result.outputs.TE # doctest: +SKIP 95.0 ''' input_spec = LookupMetaInputSpec output_spec = DynamicTraitedSpec def _make_name_map(self): if isinstance(self.inputs.meta_keys, list): self._meta_keys = {} for key in self.inputs.meta_keys: self._meta_keys[key] = key else: self._meta_keys = self.inputs.meta_keys def _outputs(self): self._make_name_map() outputs = super(LookupMeta, self)._outputs() undefined_traits = {} for out_name in self._meta_keys.values(): outputs.add_trait(out_name, traits.Any) undefined_traits[out_name] = Undefined outputs.trait_set(trait_change_notify=False, **undefined_traits) #Not sure why this is needed for out_name in self._meta_keys.values(): _ = getattr(outputs, out_name) return outputs def _run_interface(self, runtime): #If the 'meta_keys' input is a list, covert it to a dict self._make_name_map() nw = NiftiWrapper.from_filename(self.inputs.in_file) self.result = {} for meta_key, out_name in self._meta_keys.iteritems(): self.result[out_name] = nw.meta_ext.get_values(meta_key) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs.update(self.result) return outputs class CopyMetaInputSpec(TraitedSpec): src_file = File(mandatory=True, exists=True) dest_file = File(mandatory=True, exists=True) include_classes = traits.List(desc="List of specific meta data " "classifications to include. If not " "specified include everything.") exclude_classes = traits.List(desc="List of meta data " "classifications to exclude") class CopyMetaOutputSpec(TraitedSpec): dest_file = File(exists=True) class CopyMeta(BaseInterface): '''Copy meta data from one Nifti file to another. Useful for preserving meta data after some processing steps.''' input_spec = CopyMetaInputSpec output_spec = CopyMetaOutputSpec def _run_interface(self, runtime): src_nii = nb.load(self.inputs.src_file) src = NiftiWrapper(src_nii, make_empty=True) dest_nii = nb.load(self.inputs.dest_file) dest = NiftiWrapper(dest_nii, make_empty=True) classes = src.meta_ext.get_valid_classes() if self.inputs.include_classes: classes = [cls for cls in classes if cls in self.inputs.include_classes ] if self.inputs.exclude_classes: classes = [cls for cls in classes if not cls in self.inputs.exclude_classes ] for cls in classes: src_dict = src.meta_ext.get_class_dict(cls) dest_dict = dest.meta_ext.get_class_dict(cls) dest_dict.update(src_dict) # Update the shape and slice dimension to reflect the meta extension update. dest.meta_ext.slice_dim = src.meta_ext.slice_dim dest.meta_ext.shape = src.meta_ext.shape self.out_path = path.join(os.getcwd(), path.basename(self.inputs.dest_file)) dest.to_filename(self.out_path) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['dest_file'] = self.out_path return outputs class MergeNiftiInputSpec(NiftiGeneratorBaseInputSpec): in_files = traits.List(mandatory=True, desc="List of Nifti files to merge") sort_order = traits.Either(traits.Str(), traits.List(), desc="One or more meta data keys to " "sort files by.") merge_dim = traits.Int(desc="Dimension to merge along. If not " "specified, the last singular or " "non-existant dimension is used.") class MergeNiftiOutputSpec(TraitedSpec): out_file = File(exists=True, desc="Merged Nifti file") def make_key_func(meta_keys, index=None): def key_func(src_nii): result = [src_nii.get_meta(key, index) for key in meta_keys] return result return key_func class MergeNifti(NiftiGeneratorBase): '''Merge multiple Nifti files into one. Merges together meta data extensions as well.''' input_spec = MergeNiftiInputSpec output_spec = MergeNiftiOutputSpec def _run_interface(self, runtime): niis = [nb.load(fn) for fn in self.inputs.in_files ] nws = [NiftiWrapper(nii, make_empty=True) for nii in niis ] if self.inputs.sort_order: sort_order = self.inputs.sort_order if isinstance(sort_order, str): sort_order = [sort_order] nws.sort(key=make_key_func(sort_order)) if self.inputs.merge_dim == traits.Undefined: merge_dim = None else: merge_dim = self.inputs.merge_dim merged = NiftiWrapper.from_sequence(nws, merge_dim) const_meta = merged.meta_ext.get_class_dict(('global', 'const')) self.out_path = self._get_out_path(const_meta) nb.save(merged.nii_img, self.out_path) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = self.out_path return outputs class SplitNiftiInputSpec(NiftiGeneratorBaseInputSpec): in_file = File(exists=True, mandatory=True, desc="Nifti file to split") split_dim = traits.Int(desc="Dimension to split along. If not " "specified, the last dimension is used.") class SplitNiftiOutputSpec(TraitedSpec): out_list = traits.List(File(exists=True), desc="Split Nifti files") class SplitNifti(NiftiGeneratorBase): '''Split one Nifti file into many along the specified dimension. Each result has an updated meta data extension as well.''' input_spec = SplitNiftiInputSpec output_spec = SplitNiftiOutputSpec def _run_interface(self, runtime): self.out_list = [] nii = nb.load(self.inputs.in_file) nw = NiftiWrapper(nii, make_empty=True) split_dim = None if self.inputs.split_dim == traits.Undefined: split_dim = None else: split_dim = self.inputs.split_dim for split_idx, split_nw in enumerate(nw.split(split_dim)): const_meta = split_nw.meta_ext.get_class_dict(('global', 'const')) out_path = self._get_out_path(const_meta, idx=split_idx) nb.save(split_nw.nii_img, out_path) self.out_list.append(out_path) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_list'] = self.out_list return outputs nipype-0.9.2/nipype/interfaces/diffusion_toolkit/000077500000000000000000000000001227300005300222005ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/diffusion_toolkit/__init__.py000066400000000000000000000002201227300005300243030ustar00rootroot00000000000000from .base import Info from .postproc import SplineFilter from .dti import DTIRecon, DTITracker from .odf import HARDIMat, ODFRecon, ODFTracker nipype-0.9.2/nipype/interfaces/diffusion_toolkit/base.py000066400000000000000000000024371227300005300234720ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The dtk module provides basic functions for interfacing with Diffusion Toolkit tools. Currently these tools are supported: * TODO Examples -------- See the docstrings for the individual classes for 'working' examples. """ __docformat__ = 'restructuredtext' import re from nipype.interfaces.base import CommandLine class Info(object): """ Handle dtk output type and version information. Examples -------- >>> from nipype.interfaces.diffusion_toolkit import Info >>> Info.version() # doctest: +SKIP >>> Info.subjectsdir() # doctest: +SKIP """ @staticmethod def version(): """Check for dtk version on system Parameters ---------- None Returns ------- version : str Version number as string or None if FSL not found """ clout = CommandLine(command='dti_recon', terminal_output='allatonce').run() if clout.runtime.returncode is not 0: return None dtirecon = clout.runtime.stdout result = re.search('dti_recon (.*)\n', dtirecon) version = result.group(0).split()[1] return version nipype-0.9.2/nipype/interfaces/diffusion_toolkit/dti.py000066400000000000000000000223371227300005300233410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by diffusion toolkit Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import re from nipype.utils.filemanip import fname_presuffix, split_filename, copyfile import os __docformat__ = 'restructuredtext' from nipype.interfaces.base import (TraitedSpec, File, traits, CommandLine, CommandLineInputSpec, isdefined) class DTIReconInputSpec(CommandLineInputSpec): DWI = File(desc='Input diffusion volume', argstr='%s',exists=True, mandatory=True,position=1) out_prefix = traits.Str("dti", desc='Output file prefix', argstr='%s', usedefault=True,position=2) output_type = traits.Enum('nii', 'analyze', 'ni1', 'nii.gz', argstr='-ot %s', desc='output file type', usedefault=True) bvecs = File(exists=True, desc = 'b vectors file', argstr='-gm %s', mandatory=True) bvals = File(exists=True,desc = 'b values file', mandatory=True) n_averages = traits.Int(desc='Number of averages', argstr='-nex %s') image_orientation_vectors = traits.List(traits.Float(), minlen=6, maxlen=6, desc="""specify image orientation vectors. if just one argument given, will treat it as filename and read the orientation vectors from the file. if 6 arguments are given, will treat them as 6 float numbers and construct the 1st and 2nd vector and calculate the 3rd one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", argstr="-iop %f") oblique_correction = traits.Bool(desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not adjust gradient accordingly, thus it requires adjustment for correct diffusion tensor calculation""", argstr="-oc") b0_threshold = traits.Float(desc="""program will use b0 image with the given threshold to mask out high background of fa/adc maps. by default it will calculate threshold automatically. but if it failed, you need to set it manually.""", argstr="-b0_th") class DTIReconOutputSpec(TraitedSpec): ADC = File(exists=True) B0 = File(exists=True) L1 = File(exists=True) L2 = File(exists=True) L3 = File(exists=True) exp = File(exists=True) FA = File(exists=True) FA_color = File(exists=True) tensor = File(exists=True) V1 = File(exists=True) V2 = File(exists=True) V3 = File(exists=True) class DTIRecon(CommandLine): """Use dti_recon to generate tensors and other maps """ input_spec=DTIReconInputSpec output_spec=DTIReconOutputSpec _cmd = 'dti_recon' def _create_gradient_matrix(self, bvecs_file, bvals_file): _gradient_matrix_file = 'gradient_matrix.txt' bvals = [val for val in re.split('\s+', open(bvals_file).readline().strip())] bvecs_f = open(bvecs_file) bvecs_x = [val for val in re.split('\s+', bvecs_f.readline().strip())] bvecs_y = [val for val in re.split('\s+', bvecs_f.readline().strip())] bvecs_z = [val for val in re.split('\s+', bvecs_f.readline().strip())] bvecs_f.close() gradient_matrix_f = open(_gradient_matrix_file, 'w') for i in range(len(bvals)): gradient_matrix_f.write("%s, %s, %s, %s\n"%(bvecs_x[i], bvecs_y[i], bvecs_z[i], bvals[i])) gradient_matrix_f.close() return _gradient_matrix_file def _format_arg(self, name, spec, value): if name == "bvecs": new_val = self._create_gradient_matrix(self.inputs.bvecs, self.inputs.bvals) return super(DTIRecon, self)._format_arg("bvecs", spec, new_val) return super(DTIRecon, self)._format_arg(name, spec, value) def _list_outputs(self): out_prefix = self.inputs.out_prefix output_type = self.inputs.output_type outputs = self.output_spec().get() outputs['ADC'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_adc.'+ output_type)) outputs['B0'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_b0.'+ output_type)) outputs['L1'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_e1.'+ output_type)) outputs['L2'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_e2.'+ output_type)) outputs['L3'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_e3.'+ output_type)) outputs['exp'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_exp.'+ output_type)) outputs['FA'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_fa.'+ output_type)) outputs['FA_color'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_fa_color.'+ output_type)) outputs['tensor'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_tensor.'+ output_type)) outputs['V1'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_v1.'+ output_type)) outputs['V2'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_v2.'+ output_type)) outputs['V3'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_v3.'+ output_type)) return outputs class DTITrackerInputSpec(CommandLineInputSpec): tensor_file = File(exists=True, desc="reconstructed tensor file") input_type = traits.Enum('nii', 'analyze', 'ni1', 'nii.gz', desc="""input and output file type. accepted values are: analyze -> analyze format 7.5 ni1 -> nifti format saved in seperate .hdr and .img file nii -> nifti format with one .nii file nii.gz -> nifti format with compression default type is 'nii'""", argstr = "-it %s") tracking_method = traits.Enum('fact', 'rk2', 'tl', 'sl', desc="""fact -> use FACT method for tracking. this is the default method. rk2 -> use 2nd order runge-kutta method for tracking. tl -> use tensorline method for tracking. sl -> use interpolated streamline method with fixed step-length""", argstr="-%s") step_length = traits.Float(desc="""set step length, in the unit of minimum voxel size. default value is 0.5 for interpolated streamline method and 0.1 for other methods""", argstr="-l %f") angle_threshold = traits.Float(desc="set angle threshold. default value is 35 degree", argstr="-at %f") angle_threshold_weight = traits.Float(desc="set angle threshold weighting factor. weighting will be be applied \ on top of the angle_threshold", argstr = "-atw %f") random_seed = traits.Int(desc = "use random location in a voxel instead of the center of the voxel \ to seed. can also define number of seed per voxel. default is 1", argstr="-rseed") invert_x = traits.Bool(desc="invert x component of the vector", argstr = "-ix") invert_y = traits.Bool(desc="invert y component of the vector", argstr = "-iy") invert_z = traits.Bool(desc="invert z component of the vector", argstr = "-iz") swap_xy = traits.Bool(desc="swap x & y vectors while tracking", argstr = "-sxy") swap_yz = traits.Bool(desc="swap y & z vectors while tracking", argstr = "-syz") swap_zx = traits.Bool(desc="swap x & z vectors while tracking", argstr = "-szx") mask1_file = File(desc="first mask image", mandatory=True, argstr="-m %s", position=2) mask1_threshold = traits.Float(desc="threshold value for the first mask image, if not given, the program will \ try automatically find the threshold", position=3) mask2_file = File(desc="second mask image", argstr="-m2 %s", position=4) mask2_threshold = traits.Float(desc="threshold value for the second mask image, if not given, the program will \ try automatically find the threshold", position=5) input_data_prefix = traits.Str("dti", desc="for internal naming use only", position=0, argstr="%s", usedefault=True) output_file = File("tracks.trk", "file containing tracks", argstr="%s", position=1, usedefault=True) output_mask = File(desc="output a binary mask file in analyze format", argstr="-om %s") primary_vector = traits.Enum('v2', 'v3', desc = "which vector to use for fibre tracking: v2 or v3. If not set use v1", argstr="-%s") class DTITrackerOutputSpec(TraitedSpec): track_file = File(exists=True) mask_file = File(exists=True) class DTITracker(CommandLine): input_spec=DTITrackerInputSpec output_spec=DTITrackerOutputSpec _cmd = 'dti_tracker' def _run_interface(self, runtime): _, _, ext = split_filename(self.inputs.tensor_file) copyfile(self.inputs.tensor_file, os.path.abspath(self.inputs.input_data_prefix + "_tensor" + ext), copy=False) return super(DTITracker, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() outputs['track_file'] = os.path.abspath(self.inputs.output_file) if isdefined(self.inputs.output_mask) and self.inputs.output_mask: outputs['mask_file'] = os.path.abspath(self.inputs.output_mask) return outputsnipype-0.9.2/nipype/interfaces/diffusion_toolkit/odf.py000066400000000000000000000316771227300005300233400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by diffusion toolkit Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import re from nipype.utils.filemanip import fname_presuffix, split_filename, copyfile import os __docformat__ = 'restructuredtext' from nipype.interfaces.base import (TraitedSpec, File, traits, CommandLine, CommandLineInputSpec, isdefined) class HARDIMatInputSpec(CommandLineInputSpec): bvecs = File(exists=True, desc = 'b vectors file', argstr='%s', position=1, mandatory=True) bvals = File(exists=True,desc = 'b values file', mandatory=True) out_file = File("recon_mat.dat", desc = 'output matrix file', argstr='%s', usedefault=True, position=2) order = traits.Int(argstr='-order %s', desc="""maximum order of spherical harmonics. must be even number. default is 4""") odf_file = File(exists=True, argstr='-odf %s', desc="""filename that contains the reconstruction points on a HEMI-sphere. use the pre-set 181 points by default""") reference_file = File(exists=True, argstr='-ref %s', desc="""provide a dicom or nifti image as the reference for the program to figure out the image orientation information. if no such info was found in the given image header, the next 5 options -info, etc., will be used if provided. if image orientation info can be found in the given reference, all other 5 image orientation options will be IGNORED""") image_info = File(exists=True, argstr='-info %s', desc="""specify image information file. the image info file is generated from original dicom image by diff_unpack program and contains image orientation and other information needed for reconstruction and tracking. by default will look into the image folder for .info file""") image_orientation_vectors = traits.List(traits.Float(), minlen=6, maxlen=6, desc="""specify image orientation vectors. if just one argument given, will treat it as filename and read the orientation vectors from the file. if 6 arguments are given, will treat them as 6 float numbers and construct the 1st and 2nd vector and calculate the 3rd one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", argstr="-iop %f") oblique_correction = traits.Bool(desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not adjust gradient accordingly, thus it requires adjustment for correct diffusion tensor calculation""", argstr="-oc") class HARDIMatOutputSpec(TraitedSpec): out_file = File(exists=True, desc='output matrix file') class HARDIMat(CommandLine): """Use hardi_mat to calculate a reconstruction matrix from a gradient table """ input_spec=HARDIMatInputSpec output_spec=HARDIMatOutputSpec _cmd = 'hardi_mat' def _create_gradient_matrix(self, bvecs_file, bvals_file): _gradient_matrix_file = 'gradient_matrix.txt' bvals = [val for val in re.split('\s+', open(bvals_file).readline().strip())] bvecs_f = open(bvecs_file) bvecs_x = [val for val in re.split('\s+', bvecs_f.readline().strip())] bvecs_y = [val for val in re.split('\s+', bvecs_f.readline().strip())] bvecs_z = [val for val in re.split('\s+', bvecs_f.readline().strip())] bvecs_f.close() gradient_matrix_f = open(_gradient_matrix_file, 'w') for i in range(len(bvals)): if int(bvals[i]) == 0: continue gradient_matrix_f.write("%s %s %s\n"%(bvecs_x[i], bvecs_y[i], bvecs_z[i])) gradient_matrix_f.close() return _gradient_matrix_file def _format_arg(self, name, spec, value): if name == "bvecs": new_val = self._create_gradient_matrix(self.inputs.bvecs, self.inputs.bvals) return super(HARDIMat, self)._format_arg("bvecs", spec, new_val) return super(HARDIMat, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs class ODFReconInputSpec(CommandLineInputSpec): DWI = File(desc='Input raw data', argstr='%s',exists=True, mandatory=True,position=1) n_directions = traits.Int(desc='Number of directions', argstr='%s', mandatory=True, position=2) n_output_directions = traits.Int(desc='Number of output directions', argstr='%s', mandatory=True, position=3) out_prefix = traits.Str("odf", desc='Output file prefix', argstr='%s', usedefault=True, position=4) matrix = File(argstr='-mat %s', exists=True, desc="""use given file as reconstruction matrix.""", mandatory=True) n_b0 = traits.Int(argstr='-b0 %s', desc="""number of b0 scans. by default the program gets this information from the number of directions and number of volumes in the raw data. useful when dealing with incomplete raw data set or only using part of raw data set to reconstruct""", mandatory=True) output_type = traits.Enum('nii', 'analyze', 'ni1', 'nii.gz', argstr='-ot %s', desc='output file type', usedefault=True) sharpness = traits.Float(desc="""smooth or sharpen the raw data. factor > 0 is smoothing. factor < 0 is sharpening. default value is 0 NOTE: this option applies to DSI study only""", argstr='-s %f') filter = traits.Bool(desc="""apply a filter (e.g. high pass) to the raw image""", argstr='-f') subtract_background = traits.Bool(desc="""subtract the background value before reconstruction""", argstr='-bg') dsi = traits.Bool(desc="""indicates that the data is dsi""", argstr='-dsi') output_entropy = traits.Bool(desc="""output entropy map""", argstr='-oe') image_orientation_vectors = traits.List(traits.Float(), minlen=6, maxlen=6, desc="""specify image orientation vectors. if just one argument given, will treat it as filename and read the orientation vectors from the file. if 6 arguments are given, will treat them as 6 float numbers and construct the 1st and 2nd vector and calculate the 3rd one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", argstr="-iop %f") oblique_correction = traits.Bool(desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not adjust gradient accordingly, thus it requires adjustment for correct diffusion tensor calculation""", argstr="-oc") class ODFReconOutputSpec(TraitedSpec): B0 = File(exists=True) DWI = File(exists=True) max = File(exists=True) ODF = File(exists=True) entropy = File() class ODFRecon(CommandLine): """Use odf_recon to generate tensors and other maps """ input_spec=ODFReconInputSpec output_spec=ODFReconOutputSpec _cmd = 'odf_recon' def _list_outputs(self): out_prefix = self.inputs.out_prefix output_type = self.inputs.output_type outputs = self.output_spec().get() outputs['B0'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_b0.'+ output_type)) outputs['DWI'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_dwi.'+ output_type)) outputs['max'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_max.'+ output_type)) outputs['ODF'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_odf.'+ output_type)) if isdefined(self.inputs.output_entropy): outputs['entropy'] = os.path.abspath(fname_presuffix("", prefix=out_prefix, suffix='_entropy.'+ output_type)) return outputs class ODFTrackerInputSpec(CommandLineInputSpec): max = File(exists=True, mandatory=True) ODF = File(exists=True, mandatory=True) input_data_prefix = traits.Str("odf", desc='recon data prefix', argstr='%s', usedefault=True, position=0) out_file = File("tracks.trk", desc = 'output track file', argstr='%s', usedefault=True, position=1) input_output_type = traits.Enum('nii', 'analyze', 'ni1', 'nii.gz', argstr='-it %s', desc='input and output file type', usedefault=True) runge_kutta2 = traits.Bool(argstr='-rk2', desc="""use 2nd order runge-kutta method for tracking. default tracking method is non-interpolate streamline""") step_length = traits.Float(argstr='-l %f', desc="""set step length, in the unit of minimum voxel size. default value is 0.1.""") angle_threshold = traits.Float(argstr='-at %f',desc="""set angle threshold. default value is 35 degree for default tracking method and 25 for rk2""") random_seed = traits.Int(argstr='-rseed %s', desc="""use random location in a voxel instead of the center of the voxel to seed. can also define number of seed per voxel. default is 1""") invert_x = traits.Bool(argstr='-ix', desc='invert x component of the vector') invert_y = traits.Bool(argstr='-iy', desc='invert y component of the vector') invert_z = traits.Bool(argstr='-iz', desc='invert z component of the vector') swap_xy = traits.Bool(argstr='-sxy', desc='swap x and y vectors while tracking') swap_yz = traits.Bool(argstr='-syz', desc='swap y and z vectors while tracking') swap_zx = traits.Bool(argstr='-szx', desc='swap x and z vectors while tracking') disc = traits.Bool(argstr='-disc', desc='use disc tracking') mask1_file = File(desc="first mask image", mandatory=True, argstr="-m %s", position=2) mask1_threshold = traits.Float(desc="threshold value for the first mask image, if not given, the program will \ try automatically find the threshold", position=3) mask2_file = File(desc="second mask image", argstr="-m2 %s", position=4) mask2_threshold = traits.Float(desc="threshold value for the second mask image, if not given, the program will \ try automatically find the threshold", position=5) limit = traits.Int(argstr='-limit %d', desc="""in some special case, such as heart data, some track may go into infinite circle and take long time to stop. this option allows setting a limit for the longest tracking steps (voxels)""") dsi = traits.Bool(argstr='-dsi', desc=""" specify the input odf data is dsi. because dsi recon uses fixed pre-calculated matrix, some special orientation patch needs to be applied to keep dti/dsi/q-ball consistent.""") image_orientation_vectors = traits.List(traits.Float(), minlen=6, maxlen=6, desc="""specify image orientation vectors. if just one argument given, will treat it as filename and read the orientation vectors from the file. if 6 arguments are given, will treat them as 6 float numbers and construct the 1st and 2nd vector and calculate the 3rd one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", argstr="-iop %f") slice_order = traits.Int(argstr='-sorder %d', desc='set the slice order. 1 means normal, -1 means reversed. default value is 1') voxel_order = traits.Enum('RAS', 'RPS', 'RAI', 'RPI', 'LAI', 'LAS', 'LPS', 'LPI', argstr='-vorder %s', desc="""specify the voxel order in RL/AP/IS (human brain) reference. must be 3 letters with no space in between. for example, RAS means the voxel row is from L->R, the column is from P->A and the slice order is from I->S. by default voxel order is determined by the image orientation (but NOT guaranteed to be correct because of various standards). for example, siemens axial image is LPS, coronal image is LIP and sagittal image is PIL. this information also is NOT needed for tracking but will be saved in the track file and is essential for track display to map onto the right coordinates""") class ODFTrackerOutputSpec(TraitedSpec): track_file = File(exists=True, desc='output track file') class ODFTracker(CommandLine): """Use odf_tracker to generate track file """ input_spec=ODFTrackerInputSpec output_spec=ODFTrackerOutputSpec _cmd = 'odf_tracker' def _run_interface(self, runtime): _, _, ext = split_filename(self.inputs.max) copyfile(self.inputs.max, os.path.abspath(self.inputs.input_data_prefix + "_max" + ext), copy=False) _, _, ext = split_filename(self.inputs.ODF) copyfile(self.inputs.ODF, os.path.abspath(self.inputs.input_data_prefix + "_odf" + ext), copy=False) return super(ODFTracker, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() outputs['track_file'] = os.path.abspath(self.inputs.out_file) return outputs nipype-0.9.2/nipype/interfaces/diffusion_toolkit/postproc.py000066400000000000000000000025751227300005300244340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by diffusion toolkit Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os __docformat__ = 'restructuredtext' from nipype.interfaces.base import (TraitedSpec, File, traits, CommandLine, CommandLineInputSpec) class SplineFilterInputSpec(CommandLineInputSpec): track_file = File(exists=True, desc="file containing tracks to be filtered", position=0, argstr="%s", mandatory=True) step_length = traits.Float(desc="in the unit of minimum voxel size", position=1, argstr="%f", mandatory=True) output_file = File("spline_tracks.trk", desc="target file for smoothed tracks", position=2, argstr="%s", usedefault=True) class SplineFilterOutputSpec(TraitedSpec): smoothed_track_file = File(exists=True) class SplineFilter(CommandLine): input_spec=SplineFilterInputSpec output_spec=SplineFilterOutputSpec _cmd = "spline_filter" def _list_outputs(self): outputs = self.output_spec().get() outputs['smoothed_track_file'] = os.path.abspath(self.inputs.output_file) return outputsnipype-0.9.2/nipype/interfaces/diffusion_toolkit/tests/000077500000000000000000000000001227300005300233425ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py000066400000000000000000000031031227300005300274070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.diffusion_toolkit.dti import DTIRecon def test_DTIRecon_inputs(): input_map = dict(DWI=dict(argstr='%s', mandatory=True, position=1, ), args=dict(argstr='%s', ), b0_threshold=dict(argstr='-b0_th', ), bvals=dict(mandatory=True, ), bvecs=dict(argstr='-gm %s', mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), image_orientation_vectors=dict(argstr='-iop %f', ), n_averages=dict(argstr='-nex %s', ), oblique_correction=dict(argstr='-oc', ), out_prefix=dict(argstr='%s', position=2, usedefault=True, ), output_type=dict(argstr='-ot %s', usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DTIRecon.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTIRecon_outputs(): output_map = dict(ADC=dict(), B0=dict(), FA=dict(), FA_color=dict(), L1=dict(), L2=dict(), L3=dict(), V1=dict(), V2=dict(), V3=dict(), exp=dict(), tensor=dict(), ) outputs = DTIRecon.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py000066400000000000000000000036731227300005300277500ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.diffusion_toolkit.dti import DTITracker def test_DTITracker_inputs(): input_map = dict(angle_threshold=dict(argstr='-at %f', ), angle_threshold_weight=dict(argstr='-atw %f', ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_data_prefix=dict(argstr='%s', position=0, usedefault=True, ), input_type=dict(argstr='-it %s', ), invert_x=dict(argstr='-ix', ), invert_y=dict(argstr='-iy', ), invert_z=dict(argstr='-iz', ), mask1_file=dict(argstr='-m %s', mandatory=True, position=2, ), mask1_threshold=dict(position=3, ), mask2_file=dict(argstr='-m2 %s', position=4, ), mask2_threshold=dict(position=5, ), output_file=dict(argstr='%s', position=1, usedefault=True, ), output_mask=dict(argstr='-om %s', ), primary_vector=dict(argstr='-%s', ), random_seed=dict(argstr='-rseed', ), step_length=dict(argstr='-l %f', ), swap_xy=dict(argstr='-sxy', ), swap_yz=dict(argstr='-syz', ), swap_zx=dict(argstr='-szx', ), tensor_file=dict(), terminal_output=dict(mandatory=True, nohash=True, ), tracking_method=dict(argstr='-%s', ), ) inputs = DTITracker.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTITracker_outputs(): output_map = dict(mask_file=dict(), track_file=dict(), ) outputs = DTITracker.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py000066400000000000000000000025641227300005300273030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.diffusion_toolkit.odf import HARDIMat def test_HARDIMat_inputs(): input_map = dict(args=dict(argstr='%s', ), bvals=dict(mandatory=True, ), bvecs=dict(argstr='%s', mandatory=True, position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), image_info=dict(argstr='-info %s', ), image_orientation_vectors=dict(argstr='-iop %f', ), oblique_correction=dict(argstr='-oc', ), odf_file=dict(argstr='-odf %s', ), order=dict(argstr='-order %s', ), out_file=dict(argstr='%s', position=2, usedefault=True, ), reference_file=dict(argstr='-ref %s', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = HARDIMat.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_HARDIMat_outputs(): output_map = dict(out_file=dict(), ) outputs = HARDIMat.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py000066400000000000000000000034041227300005300274030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.diffusion_toolkit.odf import ODFRecon def test_ODFRecon_inputs(): input_map = dict(DWI=dict(argstr='%s', mandatory=True, position=1, ), args=dict(argstr='%s', ), dsi=dict(argstr='-dsi', ), environ=dict(nohash=True, usedefault=True, ), filter=dict(argstr='-f', ), ignore_exception=dict(nohash=True, usedefault=True, ), image_orientation_vectors=dict(argstr='-iop %f', ), matrix=dict(argstr='-mat %s', mandatory=True, ), n_b0=dict(argstr='-b0 %s', mandatory=True, ), n_directions=dict(argstr='%s', mandatory=True, position=2, ), n_output_directions=dict(argstr='%s', mandatory=True, position=3, ), oblique_correction=dict(argstr='-oc', ), out_prefix=dict(argstr='%s', position=4, usedefault=True, ), output_entropy=dict(argstr='-oe', ), output_type=dict(argstr='-ot %s', usedefault=True, ), sharpness=dict(argstr='-s %f', ), subtract_background=dict(argstr='-bg', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ODFRecon.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ODFRecon_outputs(): output_map = dict(B0=dict(), DWI=dict(), ODF=dict(), entropy=dict(), max=dict(), ) outputs = ODFRecon.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py000066400000000000000000000041541227300005300277330ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.diffusion_toolkit.odf import ODFTracker def test_ODFTracker_inputs(): input_map = dict(ODF=dict(mandatory=True, ), angle_threshold=dict(argstr='-at %f', ), args=dict(argstr='%s', ), disc=dict(argstr='-disc', ), dsi=dict(argstr='-dsi', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), image_orientation_vectors=dict(argstr='-iop %f', ), input_data_prefix=dict(argstr='%s', position=0, usedefault=True, ), input_output_type=dict(argstr='-it %s', usedefault=True, ), invert_x=dict(argstr='-ix', ), invert_y=dict(argstr='-iy', ), invert_z=dict(argstr='-iz', ), limit=dict(argstr='-limit %d', ), mask1_file=dict(argstr='-m %s', mandatory=True, position=2, ), mask1_threshold=dict(position=3, ), mask2_file=dict(argstr='-m2 %s', position=4, ), mask2_threshold=dict(position=5, ), max=dict(mandatory=True, ), out_file=dict(argstr='%s', position=1, usedefault=True, ), random_seed=dict(argstr='-rseed %s', ), runge_kutta2=dict(argstr='-rk2', ), slice_order=dict(argstr='-sorder %d', ), step_length=dict(argstr='-l %f', ), swap_xy=dict(argstr='-sxy', ), swap_yz=dict(argstr='-syz', ), swap_zx=dict(argstr='-szx', ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_order=dict(argstr='-vorder %s', ), ) inputs = ODFTracker.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ODFTracker_outputs(): output_map = dict(track_file=dict(), ) outputs = ODFTracker.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py000066400000000000000000000022461227300005300304070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.diffusion_toolkit.postproc import SplineFilter def test_SplineFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), output_file=dict(argstr='%s', position=2, usedefault=True, ), step_length=dict(argstr='%f', mandatory=True, position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), track_file=dict(argstr='%s', mandatory=True, position=0, ), ) inputs = SplineFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SplineFilter_outputs(): output_map = dict(smoothed_track_file=dict(), ) outputs = SplineFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/dipy/000077500000000000000000000000001227300005300174125ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/dipy/__init__.py000066400000000000000000000001041227300005300215160ustar00rootroot00000000000000from .tracks import TrackDensityMap from .tensors import TensorMode nipype-0.9.2/nipype/interfaces/dipy/setup.py000066400000000000000000000007111227300005300211230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('dipy', parent_package, top_path) #config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/dipy/tensors.py000066400000000000000000000071761227300005300214740ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import ( TraitedSpec, BaseInterface, File) from nipype.utils.filemanip import split_filename import os.path as op import nibabel as nb import numpy as np from nipype.utils.misc import package_check import warnings from ... import logging iflogger = logging.getLogger('interface') have_dipy = True try: package_check('dipy', version='0.6.0') except Exception, e: have_dipy = False else: import dipy.reconst.dti as dti from dipy.core.gradients import GradientTable class TensorModeInputSpec(TraitedSpec): in_file = File(exists=True, mandatory=True, desc='The input 4D diffusion-weighted image file') bvecs = File(exists=True, mandatory=True, desc='The input b-vector text file') bvals = File(exists=True, mandatory=True, desc='The input b-value text file') out_filename = File( genfile=True, desc='The output filename for the Tensor mode image') class TensorModeOutputSpec(TraitedSpec): out_file = File(exists=True) class TensorMode(BaseInterface): """ Creates a map of the mode of the diffusion tensors given a set of diffusion-weighted images, as well as their associated b-values and b-vectors. Fits the diffusion tensors and calculates tensor mode with Dipy. .. [1] Daniel B. Ennis and G. Kindlmann, "Orthogonal Tensor Invariants and the Analysis of Diffusion Tensor Magnetic Resonance Images", Magnetic Resonance in Medicine, vol. 55, no. 1, pp. 136-146, 2006. Example ------- >>> import nipype.interfaces.dipy as dipy >>> mode = dipy.TensorMode() >>> mode.inputs.in_file = 'diffusion.nii' >>> mode.inputs.bvecs = 'bvecs' >>> mode.inputs.bvals = 'bvals' >>> mode.run() # doctest: +SKIP """ input_spec = TensorModeInputSpec output_spec = TensorModeOutputSpec def _run_interface(self, runtime): ## Load the 4D image files img = nb.load(self.inputs.in_file) data = img.get_data() affine = img.get_affine() ## Load the gradient strengths and directions bvals = np.loadtxt(self.inputs.bvals) gradients = np.loadtxt(self.inputs.bvecs).T ## Place in Dipy's preferred format gtab = GradientTable(gradients) gtab.bvals = bvals ## Mask the data so that tensors are not fit for ## unnecessary voxels mask = data[..., 0] > 50 ## Fit the tensors to the data tenmodel = dti.TensorModel(gtab) tenfit = tenmodel.fit(data, mask) ## Calculate the mode of each voxel's tensor mode_data = tenfit.mode ## Write as a 3D Nifti image with the original affine img = nb.Nifti1Image(mode_data, affine) out_file = op.abspath(self._gen_outfilename()) nb.save(img, out_file) iflogger.info('Tensor mode image saved as {i}'.format(i=out_file)) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = op.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_mode.nii' nipype-0.9.2/nipype/interfaces/dipy/tests/000077500000000000000000000000001227300005300205545ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/dipy/tests/test_auto_TensorMode.py000066400000000000000000000015331227300005300252760ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dipy.tensors import TensorMode def test_TensorMode_inputs(): input_map = dict(bvals=dict(mandatory=True, ), bvecs=dict(mandatory=True, ), in_file=dict(mandatory=True, ), out_filename=dict(genfile=True, ), ) inputs = TensorMode.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TensorMode_outputs(): output_map = dict(out_file=dict(), ) outputs = TensorMode.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py000066400000000000000000000015271227300005300262640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dipy.tracks import TrackDensityMap def test_TrackDensityMap_inputs(): input_map = dict(data_dims=dict(), in_file=dict(mandatory=True, ), out_filename=dict(usedefault=True, ), voxel_dims=dict(), ) inputs = TrackDensityMap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TrackDensityMap_outputs(): output_map = dict(out_file=dict(), ) outputs = TrackDensityMap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/dipy/tracks.py000066400000000000000000000047441227300005300212640ustar00rootroot00000000000000# -*- coding: utf-8 -*- from nipype.interfaces.base import (TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, isdefined, traits) from nipype.utils.filemanip import split_filename import os.path as op import nibabel as nb, nibabel.trackvis as trk from nipype.utils.misc import package_check import warnings from ... import logging iflogger = logging.getLogger('interface') have_dipy = True try: package_check('dipy', version='0.6.0') except Exception, e: have_dipy = False else: from dipy.tracking.utils import density_map class TrackDensityMapInputSpec(TraitedSpec): in_file = File(exists=True, mandatory=True, desc='The input TrackVis track file') voxel_dims = traits.List(traits.Float, minlen=3, maxlen=3, desc='The size of each voxel in mm.') data_dims = traits.List(traits.Int, minlen=3, maxlen=3, desc='The size of the image in voxels.') out_filename = File('tdi.nii', usedefault=True, desc='The output filename for the tracks in TrackVis (.trk) format') class TrackDensityMapOutputSpec(TraitedSpec): out_file = File(exists=True) class TrackDensityMap(BaseInterface): """ Creates a tract density image from a TrackVis track file using functions from dipy Example ------- >>> import nipype.interfaces.dipy as dipy >>> trk2tdi = dipy.TrackDensityMap() >>> trk2tdi.inputs.in_file = 'converted.trk' >>> trk2tdi.run() # doctest: +SKIP """ input_spec = TrackDensityMapInputSpec output_spec = TrackDensityMapOutputSpec def _run_interface(self, runtime): tracks, header = trk.read(self.inputs.in_file) if not isdefined(self.inputs.data_dims): data_dims = header['dim'] else: data_dims = self.inputs.data_dims if not isdefined(self.inputs.voxel_dims): voxel_size = header['voxel_size'] else: voxel_size = self.inputs.voxel_dims affine = header['vox_to_ras'] streams = ((ii[0]) for ii in tracks) data = density_map(streams, data_dims, voxel_size) if data.max() < 2**15: data = data.astype('int16') img = nb.Nifti1Image(data,affine) out_file = op.abspath(self.inputs.out_filename) nb.save(img, out_file) iflogger.info('Track density map saved as {i}'.format(i=out_file)) iflogger.info('Data Dimensions {d}'.format(d=data_dims)) iflogger.info('Voxel Dimensions {v}'.format(v=voxel_size)) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = op.abspath(self.inputs.out_filename) return outputs nipype-0.9.2/nipype/interfaces/dynamic_slicer.py000066400000000000000000000170101227300005300220030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import warnings import xml.dom.minidom from nipype.interfaces.base import (CommandLine, CommandLineInputSpec, DynamicTraitedSpec, traits, Undefined, File, isdefined) class SlicerCommandLineInputSpec(DynamicTraitedSpec, CommandLineInputSpec): module = traits.Str(desc="name of the Slicer command line module you want to use") class SlicerCommandLine(CommandLine): """Experimental Slicer wrapper. Work in progress. """ _cmd = "Slicer3" input_spec = SlicerCommandLineInputSpec output_spec = DynamicTraitedSpec def _grab_xml(self, module): cmd = CommandLine(command = "Slicer3", args="--launch %s --xml"%module) ret = cmd.run() if ret.runtime.returncode == 0: return xml.dom.minidom.parseString(ret.runtime.stdout) else: raise Exception(cmd.cmdline + " failed:\n%s"%ret.runtime.stderr) def _outputs(self): base = super(SlicerCommandLine, self)._outputs() undefined_output_traits = {} for key in [node.getElementsByTagName('name')[0].firstChild.nodeValue for node in self._outputs_nodes]: base.add_trait(key, File(exists = True)) undefined_output_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_output_traits) return base def __init__(self, module, **inputs): warnings.warn('slicer is Not fully implemented', RuntimeWarning) super(SlicerCommandLine, self).__init__(command= "Slicer3 --launch %s "%module, name= module, **inputs) dom = self._grab_xml(module) self._outputs_filenames = {} self._outputs_nodes = [] undefined_traits = {} for paramGroup in dom.getElementsByTagName("parameters"): for param in paramGroup.childNodes: if param.nodeName in ['label', 'description', '#text', '#comment']: continue traitsParams = {} name = param.getElementsByTagName('name')[0].firstChild.nodeValue longFlagNode = param.getElementsByTagName('longflag') if longFlagNode: traitsParams["argstr"] = "--" + longFlagNode[0].firstChild.nodeValue + " " else: traitsParams["argstr"] = "--" + name + " " argsDict = {'file': '%s', 'integer': "%d", 'double': "%f", 'float': "%f", 'image': "%s", 'transform': "%s", 'boolean': '', 'string-enumeration': '%s', 'string': "%s"} if param.nodeName.endswith('-vector'): traitsParams["argstr"] += argsDict[param.nodeName[:-7]] else: traitsParams["argstr"] += argsDict[param.nodeName] index = param.getElementsByTagName('index') if index: traitsParams["position"] = index[0].firstChild.nodeValue desc = param.getElementsByTagName('description') if index: traitsParams["desc"] = desc[0].firstChild.nodeValue name = param.getElementsByTagName('name')[0].firstChild.nodeValue typesDict = {'integer': traits.Int, 'double': traits.Float, 'float': traits.Float, 'image': File, 'transform': File, 'boolean': traits.Bool, 'string': traits.Str, 'file':File} if param.nodeName == 'string-enumeration': type = traits.Enum values = [el.firstChild.nodeValue for el in param.getElementsByTagName('element')] elif param.nodeName.endswith('-vector'): type = traits.List values = [typesDict[param.nodeName[:-7]]] traitsParams["sep"] = ',' else: values = [] type = typesDict[param.nodeName] if param.nodeName in ['file', 'directory', 'image', 'transform'] and param.getElementsByTagName('channel')[0].firstChild.nodeValue == 'output': self.inputs.add_trait(name, traits.Either(traits.Bool, File, **traitsParams)) undefined_traits[name] = Undefined #traitsParams["exists"] = True self._outputs_filenames[name] = self._gen_filename_from_param(param) #undefined_output_traits[name] = Undefined #self._outputs().add_trait(name, File(*values, **traitsParams)) self._outputs_nodes.append(param) else: if param.nodeName in ['file', 'directory', 'image', 'transform']: traitsParams["exists"] = True self.inputs.add_trait(name, type(*values, **traitsParams)) undefined_traits[name] = Undefined self.inputs.trait_set(trait_change_notify=False, **undefined_traits) for name in undefined_traits.keys(): _ = getattr(self.inputs, name) #self._outputs().trait_set(trait_change_notify=False, **undefined_output_traits) def _gen_filename(self, name): if name in self._outputs_filenames: return os.path.join(os.getcwd(), self._outputs_filenames[name]) return None def _gen_filename_from_param(self,param): base = param.getElementsByTagName('name')[0].firstChild.nodeValue fileExtensions = param.getAttribute("fileExtensions") if fileExtensions: ext = fileExtensions else: ext = {'image': '.nii', 'transform': '.txt', 'file': ''}[param.nodeName] return base + ext def _list_outputs(self): outputs = self.output_spec().get() for output_node in self._outputs_nodes: name = output_node.getElementsByTagName('name')[0].firstChild.nodeValue outputs[name] = getattr(self.inputs, name) if isdefined(outputs[name]) and isinstance(outputs[name], bool): if outputs[name]: outputs[name] = self._gen_filename(name) else: outputs[name] = Undefined return outputs def _format_arg(self, name, spec, value): if name in [output_node.getElementsByTagName('name')[0].firstChild.nodeValue for output_node in self._outputs_nodes]: if isinstance(value, bool): fname = self._gen_filename(name) else: fname = value return spec.argstr % fname return super(SlicerCommandLine, self)._format_arg(name, spec, value) # test = SlicerCommandLine(module="BRAINSFit") # test.inputs.fixedVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/10_co_COR_3D_IR_PREP.nii" # test.inputs.movingVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/2_line_bisection.nii" # test.inputs.outputTransform = True # test.inputs.transformType = ["Affine"] # print test.cmdline # print test.inputs # print test._outputs() # ret = test.run() # test = SlicerCommandLine(name="BRAINSResample") # test.inputs.referenceVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/10_co_COR_3D_IR_PREP.nii" # test.inputs.inputVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/2_line_bisection.nii" # test.inputs.outputVolume = True # test.inputs.warpTransform = "/home/filo/workspace/nipype/nipype/interfaces/outputTransform.mat" # print test.cmdline # ret = test.run() # print ret.runtime.stderr # print ret.runtime.returncode nipype-0.9.2/nipype/interfaces/freesurfer/000077500000000000000000000000001227300005300206155ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/freesurfer/__init__.py000066400000000000000000000015471227300005300227350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for freesurfer.""" from .base import Info, FSCommand from .preprocess import (ParseDICOMDir, UnpackSDICOMDir, MRIConvert, Resample, ReconAll, BBRegister, ApplyVolTransform,Smooth, DICOMConvert, RobustRegister, FitMSParams, SynthesizeFLASH) from .model import (MRISPreproc, GLMFit, OneSampleTTest, Binarize, Concatenate, SegStats, Label2Vol, MS_LDA) from .utils import (SampleToSurface, SurfaceSmooth, SurfaceTransform, Surface2VolTransform, SurfaceSnapshots,ApplyMask, MRIsConvert, MRITessellate, MRIMarchingCubes, SmoothTessellation, MakeAverageSubject, ExtractMainComponent) nipype-0.9.2/nipype/interfaces/freesurfer/base.py000066400000000000000000000107311227300005300221030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The freesurfer module provides basic functions for interfacing with freesurfer tools. Currently these tools are supported: * Dicom2Nifti: using mri_convert * Resample: using mri_convert Examples -------- See the docstrings for the individual classes for 'working' examples. """ __docformat__ = 'restructuredtext' import os from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.base import (CommandLine, Directory, CommandLineInputSpec, isdefined) class Info(object): """ Freesurfer subject directory and version information. Examples -------- >>> from nipype.interfaces.freesurfer import Info >>> Info.version() # doctest: +SKIP >>> Info.subjectsdir() # doctest: +SKIP """ @staticmethod def version(): """Check for freesurfer version on system Find which freesurfer is being used....and get version from /path/to/freesurfer/build-stamp.txt Returns ------- version : string version number as string or None if freesurfer version not found """ fs_home = os.getenv('FREESURFER_HOME') if fs_home is None: return None versionfile = os.path.join(fs_home, 'build-stamp.txt') if not os.path.exists(versionfile): return None fid = open(versionfile, 'rt') version = fid.readline() fid.close() return version @classmethod def subjectsdir(cls): """Check the global SUBJECTS_DIR Parameters ---------- subjects_dir : string The system defined subjects directory Returns ------- subject_dir : string Represents the current environment setting of SUBJECTS_DIR """ if cls.version(): return os.environ['SUBJECTS_DIR'] return None class FSTraitedSpec(CommandLineInputSpec): subjects_dir = Directory(exists=True, desc='subjects directory') class FSCommand(CommandLine): """General support for FreeSurfer commands. Every FS command accepts 'subjects_dir' input. """ input_spec = FSTraitedSpec _subjects_dir = None def __init__(self, **inputs): super(FSCommand, self).__init__(**inputs) self.inputs.on_trait_change(self._subjects_dir_update, 'subjects_dir') if not self._subjects_dir: self._subjects_dir = Info.subjectsdir() if not isdefined(self.inputs.subjects_dir) and self._subjects_dir: self.inputs.subjects_dir = self._subjects_dir self._subjects_dir_update() def _subjects_dir_update(self): if self.inputs.subjects_dir: self.inputs.environ.update({'SUBJECTS_DIR': self.inputs.subjects_dir}) @classmethod def set_default_subjects_dir(cls, subjects_dir): cls._subjects_dir = subjects_dir @property def version(self): return Info.version() def run(self, **inputs): if 'subjects_dir' in inputs: self.inputs.subjects_dir = inputs['subjects_dir'] self._subjects_dir_update() return super(FSCommand, self).run(**inputs) def _gen_fname(self, basename, fname=None, cwd=None, suffix='_fs', use_ext=True): '''Define a generic mapping for a single outfile The filename is potentially autogenerated by suffixing inputs.infile Parameters ---------- basename : string (required) filename to base the new filename on fname : string if not None, just use this fname cwd : string prefix paths with cwd, otherwise os.getcwd() suffix : string default suffix ''' if basename == '': msg = 'Unable to generate filename for command %s. ' % self.cmd msg += 'basename is not set!' raise ValueError(msg) if cwd is None: cwd = os.getcwd() fname = fname_presuffix(basename, suffix=suffix, use_ext=use_ext, newpath=cwd) return fname @property def version(self): ver = Info.version() if ver: if 'dev' in ver: return ver.rstrip().split('-')[-1] + '.dev' else: return ver.rstrip().split('-v')[-1] nipype-0.9.2/nipype/interfaces/freesurfer/model.py000066400000000000000000001217041227300005300222740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The freesurfer module provides basic functions for interfacing with freesurfer tools. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ __docformat__ = 'restructuredtext' import os from nipype.utils.filemanip import fname_presuffix, split_filename from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, Directory, isdefined) class MRISPreprocInputSpec(FSTraitedSpec): out_file = File(argstr='--out %s', genfile=True, desc='output filename') target = traits.Str(argstr='--target %s', mandatory=True, desc='target subject name') hemi = traits.Enum('lh', 'rh', argstr='--hemi %s', mandatory=True, desc='hemisphere for source and target') surf_measure = traits.Str(argstr='--meas %s', xor=('surf_measure', 'surf_measure_file', 'surf_area'), desc='Use subject/surf/hemi.surf_measure as input') surf_area = traits.Str(argstr='--area %s', xor=('surf_measure', 'surf_measure_file', 'surf_area'), desc='Extract vertex area from subject/surf/hemi.surfname to use as input.') subjects = traits.List(argstr='--s %s...', xor=('subjects', 'fsgd_file', 'subject_file'), desc='subjects from who measures are calculated') fsgd_file = File(exists=True, argstr='--fsgd %s', xor=('subjects', 'fsgd_file', 'subject_file'), desc='specify subjects using fsgd file') subject_file = File(exists=True, argstr='--f %s', xor=('subjects', 'fsgd_file', 'subject_file'), desc='file specifying subjects separated by white space') surf_measure_file = InputMultiPath(File(exists=True), argstr='--is %s...', xor=('surf_measure', 'surf_measure_file', 'surf_area'), desc='file alternative to surfmeas, still requires list of subjects') source_format = traits.Str(argstr='--srcfmt %s', desc='source format') surf_dir = traits.Str(argstr='--surfdir %s', desc='alternative directory (instead of surf)') vol_measure_file = InputMultiPath(traits.Tuple(File(exists=True), File(exists=True)), argstr='--iv %s %s...', desc='list of volume measure and reg file tuples') proj_frac = traits.Float(argstr='--projfrac %s', desc='projection fraction for vol2surf') fwhm = traits.Float(argstr='--fwhm %f', xor=['num_iters'], desc='smooth by fwhm mm on the target surface') num_iters = traits.Int(argstr='--niters %d', xor=['fwhm'], desc='niters : smooth by niters on the target surface') fwhm_source = traits.Float(argstr='--fwhm-src %f', xor=['num_iters_source'], desc='smooth by fwhm mm on the source surface') num_iters_source = traits.Int(argstr='--niterssrc %d', xor=['fwhm_source'], desc='niters : smooth by niters on the source surface') smooth_cortex_only = traits.Bool(argstr='--smooth-cortex-only', desc='only smooth cortex (ie, exclude medial wall)') class MRISPreprocOutputSpec(TraitedSpec): out_file = File(exists=True, desc='preprocessed output file') class MRISPreproc(FSCommand): """Use FreeSurfer mris_preproc to prepare a group of contrasts for a second level analysis Examples -------- >>> preproc = MRISPreproc() >>> preproc.inputs.target = 'fsaverage' >>> preproc.inputs.hemi = 'lh' >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \ ('cont1a.nii', 'register.dat')] >>> preproc.inputs.out_file = 'concatenated_file.mgz' >>> preproc.cmdline 'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' """ _cmd = 'mris_preproc' input_spec = MRISPreprocInputSpec output_spec = MRISPreprocOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outfile = self.inputs.out_file outputs['out_file'] = outfile if not isdefined(outfile): outputs['out_file'] = os.path.join(os.getcwd(), 'concat_%s_%s.mgz' % (self.inputs.hemi, self.inputs.target)) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] return None class GLMFitInputSpec(FSTraitedSpec): glm_dir = traits.Str(argstr='--glmdir %s', desc='save outputs to dir', genfile=True) in_file = File(desc='input 4D file', argstr='--y %s', mandatory=True, copyfile=False) _design_xor = ('fsgd', 'design', 'one_sample') fsgd = traits.Tuple(File(exists=True), traits.Enum('doss', 'dods'), argstr='--fsgd %s %s', xor=_design_xor, desc='freesurfer descriptor file') design = File(exists=True, argstr='--X %s', xor=_design_xor, desc='design matrix file') contrast = InputMultiPath(File(exists=True), argstr='--C %s...', desc='contrast file') one_sample = traits.Bool(argstr='--osgm', xor=('one_sample', 'fsgd', 'design', 'contrast'), desc='construct X and C as a one-sample group mean') no_contrast_sok = traits.Bool(argstr='--no-contrasts-ok', desc='do not fail if no contrasts specified') per_voxel_reg = InputMultiPath(File(exists=True), argstr='--pvr %s...', desc='per-voxel regressors') self_reg = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--selfreg %d %d %d', desc='self-regressor from index col row slice') weighted_ls = File(exists=True, argstr='--wls %s', xor=('weight_file', 'weight_inv', 'weight_sqrt'), desc='weighted least squares') fixed_fx_var = File(exists=True, argstr='--yffxvar %s', desc='for fixed effects analysis') fixed_fx_dof = traits.Int(argstr='--ffxdof %d', xor=['fixed_fx_dof_file'], desc='dof for fixed effects analysis') fixed_fx_dof_file = File(argstr='--ffxdofdat %d', xor=['fixed_fx_dof'], desc='text file with dof for fixed effects analysis') weight_file = File(exists=True, xor=['weighted_ls'], desc='weight for each input at each voxel') weight_inv = traits.Bool(argstr='--w-inv', desc='invert weights', xor=['weighted_ls']) weight_sqrt = traits.Bool(argstr='--w-sqrt', desc='sqrt of weights', xor=['weighted_ls']) fwhm = traits.Range(low=0.0, argstr='--fwhm %f', desc='smooth input by fwhm') var_fwhm = traits.Range(low=0.0, argstr='--var-fwhm %f', desc='smooth variance by fwhm') no_mask_smooth = traits.Bool(argstr='--no-mask-smooth', desc='do not mask when smoothing') no_est_fwhm = traits.Bool(argstr='--no-est-fwhm', desc='turn off FWHM output estimation') mask_file = File(exists=True, argstr='--mask %s', desc='binary mask') label_file = File(exists=True, argstr='--label %s', xor=['cortex'], desc='use label as mask, surfaces only') cortex = traits.Bool(argstr='--cortex', xor=['label_file'], desc='use subjects ?h.cortex.label as label') invert_mask = traits.Bool(argstr='--mask-inv', desc='invert mask') prune = traits.Bool(argstr='--prune', desc='remove voxels that do not have a non-zero value at each frame (def)') no_prune = traits.Bool(argstr='--no-prune', xor=['prunethresh'], desc='do not prune') prune_thresh = traits.Float(argstr='--prune_thr %f', xor=['noprune'], desc='prune threshold. Default is FLT_MIN') compute_log_y = traits.Bool(argstr='--logy', desc='compute natural log of y prior to analysis') save_estimate = traits.Bool(argstr='--yhat-save', desc='save signal estimate (yhat)') save_residual = traits.Bool(argstr='--eres-save', desc='save residual error (eres)') save_res_corr_mtx = traits.Bool(argstr='--eres-scm', desc='save residual error spatial correlation matrix (eres.scm). Big!') surf = traits.Bool(argstr="--surf %s %s %s", requires=["subject_id", "hemi"], desc="analysis is on a surface mesh") subject_id = traits.Str(desc="subject id for surface geometry") hemi = traits.Enum("lh", "rh", desc="surface hemisphere") surf_geo = traits.Str("white", usedefault=True, desc="surface geometry name (e.g. white, pial)") simulation = traits.Tuple(traits.Enum('perm', 'mc-full', 'mc-z'), traits.Int(min=1), traits.Float, traits.Str, argstr='--sim %s %d %f %s', desc='nulltype nsim thresh csdbasename') sim_sign = traits.Enum('abs', 'pos', 'neg', argstr='--sim-sign %s', desc='abs, pos, or neg') uniform = traits.Tuple(traits.Float, traits.Float, argstr='--uniform %f %f', desc='use uniform distribution instead of gaussian') pca = traits.Bool(argstr='--pca', desc='perform pca/svd analysis on residual') calc_AR1 = traits.Bool(argstr='--tar1', desc='compute and save temporal AR1 of residual') save_cond = traits.Bool(argstr='--save-cond', desc='flag to save design matrix condition at each voxel') vox_dump = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--voxdump %d %d %d', desc='dump voxel GLM and exit') seed = traits.Int(argstr='--seed %d', desc='used for synthesizing noise') synth = traits.Bool(argstr='--synth', desc='replace input with gaussian') resynth_test = traits.Int(argstr='--resynthtest %d', desc='test GLM by resynthsis') profile = traits.Int(argstr='--profile %d', desc='niters : test speed') force_perm = traits.Bool(argstr='--perm-force', desc='force perumtation test, even when design matrix is not orthog') diag = traits.Int('--diag %d', desc='Gdiag_no : set diagnositc level') diag_cluster = traits.Bool(argstr='--diag-cluster', desc='save sig volume and exit from first sim loop') debug = traits.Bool(argstr='--debug', desc='turn on debugging') check_opts = traits.Bool(argstr='--checkopts', desc="don't run anything, just check options and exit") allow_repeated_subjects = traits.Bool(argstr='--allowsubjrep', desc='allow subject names to repeat in the fsgd file (must appear before --fsgd') allow_ill_cond = traits.Bool(argstr='--illcond', desc='allow ill-conditioned design matrices') sim_done_file = File(argstr='--sim-done %s', desc='create file when simulation finished') class GLMFitOutputSpec(TraitedSpec): glm_dir = Directory(exists=True, desc="output directory") beta_file = File(exists=True, desc="map of regression coefficients") error_file = File(desc="map of residual error") error_var_file = File(desc="map of residual error variance") error_stddev_file = File(desc="map of residual error standard deviation") estimate_file = File(desc="map of the estimated Y values") mask_file = File(desc="map of the mask used in the analysis") fwhm_file = File(desc="text file with estimated smoothness") dof_file = File(desc="text file with effective degrees-of-freedom for the analysis") gamma_file = OutputMultiPath(desc="map of contrast of regression coefficients") gamma_var_file = OutputMultiPath(desc="map of regression contrast variance") sig_file = OutputMultiPath(desc="map of F-test significance (in -log10p)") ftest_file = OutputMultiPath(desc="map of test statistic values") spatial_eigenvectors = File(desc="map of spatial eigenvectors from residual PCA") frame_eigenvectors = File(desc="matrix of frame eigenvectors from residual PCA") singular_values = File(desc="matrix singular values from residual PCA") svd_stats_file = File(desc="text file summarizing the residual PCA") class GLMFit(FSCommand): """Use FreeSurfer's mri_glmfit to specify and estimate a general linear model. Examples -------- >>> glmfit = GLMFit() >>> glmfit.inputs.in_file = 'functional.nii' >>> glmfit.inputs.one_sample = True >>> glmfit.cmdline == 'mri_glmfit --glmdir %s --y functional.nii --osgm'%os.getcwd() True """ _cmd = 'mri_glmfit' input_spec = GLMFitInputSpec output_spec = GLMFitOutputSpec def _format_arg(self, name, spec, value): if name == "surf": _si = self.inputs return spec.argstr % (_si.subject_id, _si.hemi, _si.surf_geo) return super(GLMFit, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() # Get the top-level output directory if not isdefined(self.inputs.glm_dir): glmdir = os.getcwd() else: glmdir = os.path.abspath(self.inputs.glm_dir) outputs["glm_dir"] = glmdir # Assign the output files that always get created outputs["beta_file"] = os.path.join(glmdir, "beta.mgh") outputs["error_var_file"] = os.path.join(glmdir, "rvar.mgh") outputs["error_stddev_file"] = os.path.join(glmdir, "rstd.mgh") outputs["mask_file"] = os.path.join(glmdir, "mask.mgh") outputs["fwhm_file"] = os.path.join(glmdir, "fwhm.dat") outputs["dof_file"] = os.path.join(glmdir, "dof.dat") # Assign the conditional outputs if isdefined(self.inputs.save_residual) and self.inputs.save_residual: outputs["error_file"] = os.path.join(glmdir, "eres.mgh") if isdefined(self.inputs.save_estimate) and self.inputs.save_estimate: outputs["estimate_file"] = os.path.join(glmdir, "yhat.mgh") # Get the contrast directory name(s) if isdefined(self.inputs.contrast): contrasts = [] for c in self.inputs.contrast: if split_filename(c)[2] in [".mat", ".dat", ".mtx", ".con"]: contrasts.append(split_filename(c)[1]) else: contrasts.append(os.path.split(c)[1]) elif isdefined(self.inputs.one_sample) and self.inputs.one_sample: contrasts = ["osgm"] # Add in the contrast images outputs["sig_file"] = [os.path.join(glmdir, c, "sig.mgh") for c in contrasts] outputs["ftest_file"] = [os.path.join(glmdir, c, "F.mgh") for c in contrasts] outputs["gamma_file"] = [os.path.join(glmdir, c, "gamma.mgh") for c in contrasts] outputs["gamma_var_file"] = [os.path.join(glmdir, c, "gammavar.mgh") for c in contrasts] # Add in the PCA results, if relevant if isdefined(self.inputs.pca) and self.inputs.pca: pcadir = os.path.join(glmdir, "pca-eres") outputs["spatial_eigenvectors"] = os.path.join(pcadir, "v.mgh") outputs["frame_eigenvectors"] = os.path.join(pcadir, "u.mtx") outputs["singluar_values"] = os.path.join(pcadir, "sdiag.mat") outputs["svd_stats_file"] = os.path.join(pcadir, "stats.dat") return outputs def _gen_filename(self, name): if name == 'glm_dir': return os.getcwd() return None class OneSampleTTest(GLMFit): def __init__(self, **kwargs): super(OneSampleTTest, self).__init__(**kwargs) self.inputs.one_sample = True class BinarizeInputSpec(FSTraitedSpec): in_file = File(exists=True, argstr='--i %s', mandatory=True, copyfile=False, desc='input volume') min = traits.Float(argstr='--min %f', xor=['wm_ven_csf'], desc='min thresh') max = traits.Float(argstr='--max %f', xor=['wm_ven_csf'], desc='max thresh') rmin = traits.Float(argstr='--rmin %f', desc='compute min based on rmin*globalmean') rmax = traits.Float(argstr='--rmax %f', desc='compute max based on rmax*globalmean') match = traits.List(traits.Int, argstr='--match %d...', desc='match instead of threshold') wm = traits.Bool(argstr='--wm', desc='set match vals to 2 and 41 (aseg for cerebral WM)') ventricles = traits.Bool(argstr='--ventricles', desc='set match vals those for aseg ventricles+choroid (not 4th)') wm_ven_csf = traits.Bool(argstr='--wm+vcsf', xor=['min', 'max'], desc='WM and ventricular CSF, including choroid (not 4th)') binary_file = File(argstr='--o %s', genfile=True, desc='binary output volume') out_type = traits.Enum('nii', 'nii.gz', 'mgz', argstr='', desc='output file type') count_file = traits.Either(traits.Bool, File, argstr='--count %s', desc='save number of hits in ascii file (hits, ntotvox, pct)') bin_val = traits.Int(argstr='--binval %d', desc='set vox within thresh to val (default is 1)') bin_val_not = traits.Int(argstr='--binvalnot %d', desc='set vox outside range to val (default is 0)') invert = traits.Bool(argstr='--inv', desc='set binval=0, binvalnot=1') frame_no = traits.Int(argstr='--frame %s', desc='use 0-based frame of input (default is 0)') merge_file = File(exists=True, argstr='--merge %s', desc='merge with mergevol') mask_file = File(exists=True, argstr='--mask maskvol', desc='must be within mask') mask_thresh = traits.Float(argstr='--mask-thresh %f', desc='set thresh for mask') abs = traits.Bool(argstr='--abs', desc='take abs of invol first (ie, make unsigned)') bin_col_num = traits.Bool(argstr='--bincol', desc='set binarized voxel value to its column number') zero_edges = traits.Bool(argstr='--zero-edges', desc='zero the edge voxels') zero_slice_edge = traits.Bool(argstr='--zero-slice-edges', desc='zero the edge slice voxels') dilate = traits.Int(argstr='--dilate %d', desc='niters: dilate binarization in 3D') erode = traits.Int(argstr='--erode %d', desc='nerode: erode binarization in 3D (after any dilation)') erode2d = traits.Int(argstr='--erode2d %d', desc='nerode2d: erode binarization in 2D (after any 3D erosion)') class BinarizeOutputSpec(TraitedSpec): binary_file = File(exists=True, desc='binarized output volume') count_file = File(desc='ascii file containing number of hits') class Binarize(FSCommand): """Use FreeSurfer mri_binarize to threshold an input volume Examples -------- >>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii') >>> binvol.cmdline 'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000' """ _cmd = 'mri_binarize' input_spec = BinarizeInputSpec output_spec = BinarizeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outfile = self.inputs.binary_file if not isdefined(outfile): if isdefined(self.inputs.out_type): outfile = fname_presuffix(self.inputs.in_file, newpath=os.getcwd(), suffix='.'.join(('_thresh', self.inputs.out_type)), use_ext=False) else: outfile = fname_presuffix(self.inputs.in_file, newpath=os.getcwd(), suffix='_thresh') outputs['binary_file'] = os.path.abspath(outfile) value = self.inputs.count_file if isdefined(value): if isinstance(value, bool): if value: outputs['count_file'] = fname_presuffix(self.inputs.in_file, suffix='_count.txt', newpath=os.getcwd(), use_ext=False) else: outputs['count_file'] = value return outputs def _format_arg(self, name, spec, value): if name == 'count_file': if isinstance(value, bool): fname = self._list_outputs()[name] else: fname = value return spec.argstr % fname if name == 'out_type': return '' return super(Binarize, self)._format_arg(name, spec, value) def _gen_filename(self, name): if name == 'binary_file': return self._list_outputs()[name] return None class ConcatenateInputSpec(FSTraitedSpec): in_files = InputMultiPath(File(exists=True), desc='Individual volumes to be concatenated', argstr='--i %s...', mandatory=True) concatenated_file = File(desc='Output volume', argstr='--o %s', genfile=True) sign = traits.Enum('abs', 'pos', 'neg', argstr='--%s', desc='Take only pos or neg voxles from input, or take abs') stats = traits.Enum('sum', 'var', 'std', 'max', 'min', 'mean', argstr='--%s', desc='Compute the sum, var, std, max, min or mean of the input volumes') paired_stats = traits.Enum('sum', 'avg', 'diff', 'diff-norm', 'diff-norm1', 'diff-norm2', argstr='--paired-%s', desc='Compute paired sum, avg, or diff') gmean = traits.Int(argstr='--gmean %d', desc='create matrix to average Ng groups, Nper=Ntot/Ng') mean_div_n = traits.Bool(argstr='--mean-div-n', desc='compute mean/nframes (good for var)') multiply_by = traits.Float(argstr='--mul %f', desc='Multiply input volume by some amount') add_val = traits.Float(argstr='--add %f', desc='Add some amount to the input volume') multiply_matrix_file = File(exists=True, argstr='--mtx %s', desc='Multiply input by an ascii matrix in file') combine = traits.Bool(argstr='--combine', desc='Combine non-zero values into single frame volume') keep_dtype = traits.Bool(argstr='--keep-datatype', desc='Keep voxelwise precision type (default is float') max_bonfcor = traits.Bool(argstr='--max-bonfcor', desc='Compute max and bonferroni correct (assumes -log10(ps))') max_index = traits.Bool(argstr='--max-index', desc='Compute the index of max voxel in concatenated volumes') mask_file = File(exists=True, argstr='--mask %s', desc='Mask input with a volume') vote = traits.Bool(argstr='--vote', desc='Most frequent value at each voxel and fraction of occurances') sort = traits.Bool(argstr='--sort', desc='Sort each voxel by ascending frame value') class ConcatenateOutputSpec(TraitedSpec): concatenated_file = File(exists=True, desc='Path/name of the output volume') class Concatenate(FSCommand): """Use Freesurfer mri_concat to combine several input volumes into one output volume. Can concatenate by frames, or compute a variety of statistics on the input volumes. Examples -------- Combine two input volumes into one volume with two frames >>> concat = Concatenate() >>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii'] >>> concat.inputs.concatenated_file = 'bar.nii' >>> concat.cmdline 'mri_concat --o bar.nii --i cont1.nii --i cont2.nii' """ _cmd = 'mri_concat' input_spec = ConcatenateInputSpec output_spec = ConcatenateOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.concatenated_file): outputs['concatenated_file'] = os.path.join(os.getcwd(), 'concat_output.nii.gz') else: outputs['concatenated_file'] = self.inputs.concatenated_file return outputs def _gen_filename(self, name): if name == 'concatenated_file': return self._list_outputs()[name] return None class SegStatsInputSpec(FSTraitedSpec): _xor_inputs = ('segmentation_file', 'annot', 'surf_label') segmentation_file = File(exists=True, argstr='--seg %s', xor=_xor_inputs, mandatory=True, desc='segmentation volume path') annot = traits.Tuple(traits.Str, traits.Enum('lh', 'rh'), traits.Str, argstr='--annot %s %s %s', xor=_xor_inputs, mandatory=True, desc='subject hemi parc : use surface parcellation') surf_label = traits.Tuple(traits.Str, traits.Enum('lh', 'rh'), traits.Str, argstr='--slabel %s %s %s', xor=_xor_inputs, mandatory=True, desc='subject hemi label : use surface label') summary_file = File(argstr='--sum %s', genfile=True, desc='Segmentation stats summary table file') partial_volume_file = File(exists=True, argstr='--pv %f', desc='Compensate for partial voluming') in_file = File(exists=True, argstr='--i %s', desc='Use the segmentation to report stats on this volume') frame = traits.Int(argstr='--frame %d', desc='Report stats on nth frame of input volume') multiply = traits.Float(argstr='--mul %f', desc='multiply input by val') calc_snr = traits.Bool(argstr='--snr', desc='save mean/std as extra column in output table') calc_power = traits.Enum('sqr', 'sqrt', argstr='--%s', desc='Compute either the sqr or the sqrt of the input') _ctab_inputs = ('color_table_file', 'default_color_table', 'gca_color_table') color_table_file = File(exists=True, argstr='--ctab %s', xor=_ctab_inputs, desc='color table file with seg id names') default_color_table = traits.Bool(argstr='--ctab-default', xor=_ctab_inputs, desc='use $FREESURFER_HOME/FreeSurferColorLUT.txt') gca_color_table = File(exists=True, argstr='--ctab-gca %s', xor=_ctab_inputs, desc='get color table from GCA (CMA)') segment_id = traits.List(argstr='--id %s...', desc='Manually specify segmentation ids') exclude_id = traits.Int(argstr='--excludeid %d', desc='Exclude seg id from report') exclude_ctx_gm_wm = traits.Bool(argstr='--excl-ctxgmwm', desc='exclude cortical gray and white matter') wm_vol_from_surf = traits.Bool(argstr='--surf-wm-vol', desc='Compute wm volume from surf') cortex_vol_from_surf = traits.Bool(argstr='--surf-ctx-vol', desc='Compute cortex volume from surf') non_empty_only = traits.Bool(argstr='--nonempty', desc='Only report nonempty segmentations') mask_file = File(exists=True, argstr='--mask %s', desc='Mask volume (same size as seg') mask_thresh = traits.Float(argstr='--maskthresh %f', desc='binarize mask with this threshold <0.5>') mask_sign = traits.Enum('abs', 'pos', 'neg', '--masksign %s', desc='Sign for mask threshold: pos, neg, or abs') mask_frame = traits.Int('--maskframe %d', requires=['mask_file'], desc='Mask with this (0 based) frame of the mask volume') mask_invert = traits.Bool(argstr='--maskinvert', desc='Invert binarized mask volume') mask_erode = traits.Int(argstr='--maskerode %d', desc='Erode mask by some amount') brain_vol = traits.Enum('brain-vol-from-seg', 'brainmask', '--%s', desc='Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``') etiv = traits.Bool(argstr='--etiv', desc='Compute ICV from talairach transform') etiv_only = traits.Enum('etiv', 'old-etiv', '--%s-only', desc='Compute etiv and exit. Use ``etiv`` or ``old-etiv``') avgwf_txt_file = traits.Either(traits.Bool, File, argstr='--avgwf %s', desc='Save average waveform into file (bool or filename)') avgwf_file = traits.Either(traits.Bool, File, argstr='--avgwfvol %s', desc='Save as binary volume (bool or filename)') sf_avg_file = traits.Either(traits.Bool, File, argstr='--sfavg %s', desc='Save mean across space and time') vox = traits.List(traits.Int, argstr='--vox %s', desc='Replace seg with all 0s except at C R S (three int inputs)') class SegStatsOutputSpec(TraitedSpec): summary_file = File(exists=True, desc='Segmentation summary statistics table') avgwf_txt_file = File(desc='Text file with functional statistics averaged over segs') avgwf_file = File(desc='Volume with functional statistics averaged over segs') sf_avg_file = File(desc='Text file with func statistics averaged over segs and framss') class SegStats(FSCommand): """Use FreeSurfer mri_segstats for ROI analysis Examples -------- >>> import nipype.interfaces.freesurfer as fs >>> ss = fs.SegStats() >>> ss.inputs.annot = ('PWS04', 'lh', 'aparc') >>> ss.inputs.in_file = 'functional.nii' >>> ss.inputs.subjects_dir = '.' >>> ss.inputs.avgwf_txt_file = './avgwf.txt' >>> ss.inputs.summary_file = './summary.stats' >>> ss.cmdline 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats' """ _cmd = 'mri_segstats' input_spec = SegStatsInputSpec output_spec = SegStatsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['summary_file'] = self.inputs.summary_file if not isdefined(outputs['summary_file']): outputs['summary_file'] = os.path.join(os.getcwd(), 'summary.stats') suffices = dict(avgwf_txt_file='_avgwf.txt', avgwf_file='_avgwf.nii.gz', sf_avg_file='sfavg.txt') if isdefined(self.inputs.segmentation_file): _, src = os.path.split(self.inputs.segmentation_file) if isdefined(self.inputs.annot): src = '_'.join(self.inputs.annot) if isdefined(self.inputs.surf_label): src = '_'.join(self.inputs.surf_label) for name, suffix in suffices.items(): value = getattr(self.inputs, name) if isdefined(value): if isinstance(value, bool): outputs[name] = fname_presuffix(src, suffix=suffix, newpath=os.getcwd(), use_ext=False) else: outputs[name] = value return outputs def _format_arg(self, name, spec, value): if name in ['avgwf_txt_file', 'avgwf_file', 'sf_avg_file']: if isinstance(value, bool): fname = self._list_outputs()[name] else: fname = value return spec.argstr % fname return super(SegStats, self)._format_arg(name, spec, value) def _gen_filename(self, name): if name == 'summary_file': return self._list_outputs()[name] return None class Label2VolInputSpec(FSTraitedSpec): label_file = InputMultiPath(File(exists=True), argstr='--label %s...', xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), copyfile=False, mandatory=True, desc='list of label files') annot_file = File(exists=True, argstr='--annot %s', xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), requires=('subject_id', 'hemi'), mandatory=True, copyfile=False, desc='surface annotation file') seg_file = File(exists=True, argstr='--seg %s', xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), mandatory=True, copyfile=False, desc='segmentation file') aparc_aseg = traits.Bool(argstr='--aparc+aseg', xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), mandatory=True, desc='use aparc+aseg.mgz in subjectdir as seg') template_file = File(exists=True, argstr='--temp %s', mandatory=True, desc='output template volume') reg_file = File(exists=True, argstr='--reg %s', xor=('reg_file', 'reg_header', 'identity'), desc='tkregister style matrix VolXYZ = R*LabelXYZ') reg_header = File(exists=True, argstr='--regheader %s', xor=('reg_file', 'reg_header', 'identity'), desc='label template volume') identity = traits.Bool(argstr='--identity', xor=('reg_file', 'reg_header', 'identity'), desc='set R=I') invert_mtx = traits.Bool(argstr='--invertmtx', desc='Invert the registration matrix') fill_thresh = traits.Range(0., 1., argstr='--fillthresh %.f', desc='thresh : between 0 and 1') label_voxel_volume = traits.Float(argstr='--labvoxvol %f', desc='volume of each label point (def 1mm3)') proj = traits.Tuple(traits.Enum('abs', 'frac'), traits.Float, traits.Float, traits.Float, argstr='--proj %s %f %f %f', requires=('subject_id', 'hemi'), desc='project along surface normal') subject_id = traits.Str(argstr='--subject %s', desc='subject id') hemi = traits.Enum('lh', 'rh', argstr='--hemi %s', desc='hemisphere to use lh or rh') surface = traits.Str(argstr='--surf %s', desc='use surface instead of white') vol_label_file = File(argstr='--o %s', genfile=True, desc='output volume') label_hit_file = File(argstr='--hits %s', desc='file with each frame is nhits for a label') map_label_stat = File(argstr='--label-stat %s', desc='map the label stats field into the vol') native_vox2ras = traits.Bool(argstr='--native-vox2ras', desc='use native vox2ras xform instead of tkregister-style') class Label2VolOutputSpec(TraitedSpec): vol_label_file = File(exists=True, desc='output volume') class Label2Vol(FSCommand): """Make a binary volume from a Freesurfer label Examples -------- >>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii') >>> binvol.cmdline 'mri_label2vol --fillthresh 0 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii' """ _cmd = 'mri_label2vol' input_spec = Label2VolInputSpec output_spec = Label2VolOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outfile = self.inputs.vol_label_file if not isdefined(outfile): for key in ['label_file', 'annot_file', 'seg_file']: if isdefined(getattr(self.inputs,key)): path = getattr(self.inputs, key) if isinstance(path,list): path = path[0] _, src = os.path.split(path) if isdefined(self.inputs.aparc_aseg): src = 'aparc+aseg.mgz' outfile = fname_presuffix(src, suffix='_vol.nii.gz', newpath=os.getcwd(), use_ext=False) outputs['vol_label_file'] = outfile return outputs def _gen_filename(self, name): if name == 'vol_label_file': return self._list_outputs()[name] return None class MS_LDAInputSpec(FSTraitedSpec): lda_labels = traits.List(traits.Int(), argstr='-lda %s', mandatory=True, minlen=2, maxlen=2, sep=' ', desc='pair of class labels to optimize') weight_file = traits.File(argstr='-weight %s', mandatory=True, desc='filename for the LDA weights (input or output)') vol_synth_file = traits.File(exists=False, argstr='-synth %s', mandatory=True, desc=('filename for the synthesized output ' 'volume')) label_file = traits.File(exists=True, argstr='-label %s', desc='filename of the label volume') mask_file = traits.File(exists=True, argstr='-mask %s', desc='filename of the brain mask volume') shift = traits.Int(argstr='-shift %d', desc='shift all values equal to the given value to zero') conform = traits.Bool(argstr='-conform', desc=('Conform the input volumes (brain mask ' 'typically already conformed)')) use_weights = traits.Bool(argstr='-W', desc=('Use the weights from a previously ' 'generated weight file')) images = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, copyfile=False, desc='list of input FLASH images', position=-1) class MS_LDAOutputSpec(TraitedSpec): weight_file = File(exists=True, desc='') vol_synth_file = File(exists=True, desc='') class MS_LDA(FSCommand): """Perform LDA reduction on the intensity space of an arbitrary # of FLASH images Examples -------- >>> grey_label = 2 >>> white_label = 3 >>> zero_value = 1 >>> optimalWeights = MS_LDA(lda_labels=[grey_label, white_label], \ label_file='label.mgz', weight_file='weights.txt', \ shift=zero_value, vol_synth_file='synth_out.mgz', \ conform=True, use_weights=True, \ images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz']) >>> optimalWeights.cmdline 'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz' """ _cmd = 'mri_ms_LDA' input_spec = MS_LDAInputSpec output_spec = MS_LDAOutputSpec def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.output_synth): outputs['vol_synth_file'] = os.path.abspath(self.inputs.output_synth) else: outputs['vol_synth_file'] = os.path.abspath(self.inputs.vol_synth_file) if not isdefined(self.inputs.use_weights) or self.inputs.use_weights is False: outputs['weight_file'] = os.path.abspath(self.inputs.weight_file) return outputs def _verify_weights_file_exists(self): if not os.path.exists(os.path.abspath(self.inputs.weight_file)): raise traits.TraitError("MS_LDA: use_weights must accompany an existing weights file") def _format_arg(self, name, spec, value): if name is 'use_weights': if self.inputs.use_weights is True: self._verify_weights_file_exists() else: return '' # TODO: Fix bug when boolean values are set explicitly to false return super(MS_LDA, self)._format_arg(name, spec, value) def _gen_filename(self, name): pass nipype-0.9.2/nipype/interfaces/freesurfer/preprocess.py000066400000000000000000001700551227300005300233640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by freeusrfer Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ __docformat__ = 'restructuredtext' import os import os.path as op from glob import glob #import itertools import numpy as np from nibabel import load from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.io import FreeSurferSource from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec from nipype.interfaces.base import (TraitedSpec, File, traits, Directory, InputMultiPath, OutputMultiPath, CommandLine, CommandLineInputSpec, isdefined) from ... import logging iflogger = logging.getLogger('interface') class ParseDICOMDirInputSpec(FSTraitedSpec): dicom_dir = Directory(exists=True, argstr='--d %s', mandatory=True, desc='path to siemens dicom directory') dicom_info_file = File('dicominfo.txt', argstr='--o %s', usedefault=True, desc='file to which results are written') sortbyrun = traits.Bool(argstr='--sortbyrun', desc='assign run numbers') summarize = traits.Bool(argstr='--summarize', desc='only print out info for run leaders') class ParseDICOMDirOutputSpec(TraitedSpec): dicom_info_file = File(exists=True, desc='text file containing dicom information') class ParseDICOMDir(FSCommand): """Uses mri_parse_sdcmdir to get information from dicom directories Examples -------- >>> from nipype.interfaces.freesurfer import ParseDICOMDir >>> dcminfo = ParseDICOMDir() >>> dcminfo.inputs.dicom_dir = '.' >>> dcminfo.inputs.sortbyrun = True >>> dcminfo.inputs.summarize = True >>> dcminfo.cmdline 'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize' """ _cmd = 'mri_parse_sdcmdir' input_spec = ParseDICOMDirInputSpec output_spec = ParseDICOMDirOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.dicom_info_file): outputs['dicom_info_file'] = os.path.join(os.getcwd(), self.inputs.dicom_info_file) return outputs class UnpackSDICOMDirInputSpec(FSTraitedSpec): source_dir = Directory(exists=True, argstr='-src %s', mandatory=True, desc='directory with the DICOM files') output_dir = Directory(argstr='-targ %s', desc='top directory into which the files will be unpacked') run_info = traits.Tuple(traits.Int, traits.Str, traits.Str, traits.Str, mandatory=True, argstr='-run %d %s %s %s', xor=('run_info', 'config', 'seq_config'), desc='runno subdir format name : spec unpacking rules on cmdline') config = File(exists=True, argstr='-cfg %s', mandatory=True, xor=('run_info', 'config', 'seq_config'), desc='specify unpacking rules in file') seq_config = File(exists=True, argstr='-seqcfg %s', mandatory=True, xor=('run_info', 'config', 'seq_config'), desc='specify unpacking rules based on sequence') dir_structure = traits.Enum('fsfast', 'generic', argstr='-%s', desc='unpack to specified directory structures') no_info_dump = traits.Bool(argstr='-noinfodump', desc='do not create infodump file') scan_only = File(exists=True, argstr='-scanonly %s', desc='only scan the directory and put result in file') log_file = File(exists=True, argstr='-log %s', desc='explicilty set log file') spm_zeropad = traits.Int(argstr='-nspmzeropad %d', desc='set frame number zero padding width for SPM') no_unpack_err = traits.Bool(argstr='-no-unpackerr', desc='do not try to unpack runs with errors') class UnpackSDICOMDir(FSCommand): """Use unpacksdcmdir to convert dicom files Call unpacksdcmdir -help from the command line to see more information on using this command. Examples -------- >>> from nipype.interfaces.freesurfer import UnpackSDICOMDir >>> unpack = UnpackSDICOMDir() >>> unpack.inputs.source_dir = '.' >>> unpack.inputs.output_dir = '.' >>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct') >>> unpack.inputs.dir_structure = 'generic' >>> unpack.cmdline 'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .' """ _cmd = 'unpacksdcmdir' input_spec = UnpackSDICOMDirInputSpec class MRIConvertInputSpec(FSTraitedSpec): read_only = traits.Bool(argstr='--read_only', desc='read the input volume') no_write = traits.Bool(argstr='--no_write', desc='do not write output') in_info = traits.Bool(argstr='--in_info', desc='display input info') out_info = traits.Bool(argstr='--out_info', desc='display output info') in_stats = traits.Bool(argstr='--in_stats', desc='display input stats') out_stats = traits.Bool(argstr='--out_stats', desc='display output stats') in_matrix = traits.Bool(argstr='--in_matrix', desc='display input matrix') out_matrix = traits.Bool(argstr='--out_matrix', desc='display output matrix') in_i_size = traits.Int(argstr='--in_i_size %d', desc='input i size') in_j_size = traits.Int(argstr='--in_j_size %d', desc='input j size') in_k_size = traits.Int(argstr='--in_k_size %d', desc='input k size') force_ras = traits.Bool(argstr='--force_ras_good', desc='use default when orientation info absent') in_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='--in_i_direction %f %f %f', desc=' ') in_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='--in_j_direction %f %f %f', desc=' ') in_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='--in_k_direction %f %f %f', desc=' ') _orientations = ['LAI', 'LIA', 'ALI', 'AIL', 'ILA', 'IAL', 'LAS', 'LSA', 'ALS', 'ASL', 'SLA', 'SAL', 'LPI', 'LIP', 'PLI', 'PIL', 'ILP', 'IPL', 'LPS', 'LSP', 'PLS', 'PSL', 'SLP', 'SPL', 'RAI', 'RIA', 'ARI', 'AIR', 'IRA', 'IAR', 'RAS', 'RSA', 'ARS', 'ASR', 'SRA', 'SAR', 'RPI', 'RIP', 'PRI', 'PIR', 'IRP', 'IPR', 'RPS', 'RSP', 'PRS', 'PSR', 'SRP', 'SPR'] #_orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])] in_orientation = traits.Enum(_orientations, argstr='--in_orientation %s', desc='specify the input orientation') in_center = traits.List(traits.Float, maxlen=3, argstr='--in_center %s', desc=' ') sphinx = traits.Bool(argstr='--sphinx', desc='change orientation info to sphinx') out_i_count = traits.Int(argstr='--out_i_count %d', desc='some count ?? in i direction') out_j_count = traits.Int(argstr='--out_j_count %d', desc='some count ?? in j direction') out_k_count = traits.Int(argstr='--out_k_count %d', desc='some count ?? in k direction') vox_size = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='-voxsize %f %f %f', desc=' specify the size (mm) - useful for upsampling or downsampling') out_i_size = traits.Int(argstr='--out_i_size %d', desc='output i size') out_j_size = traits.Int(argstr='--out_j_size %d', desc='output j size') out_k_size = traits.Int(argstr='--out_k_size %d', desc='output k size') out_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='--out_i_direction %f %f %f', desc=' ') out_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='--out_j_direction %f %f %f', desc=' ') out_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='--out_k_direction %f %f %f', desc=' ') out_orientation = traits.Enum(_orientations, argstr='--out_orientation %s', desc='specify the output orientation') out_center = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='--out_center %f %f %f', desc=' ') out_datatype = traits.Enum('uchar', 'short', 'int', 'float', argstr='--out_data_type %s', desc='output data type ') resample_type = traits.Enum('interpolate', 'weighted', 'nearest', 'sinc', 'cubic', argstr='--resample_type %s', desc=' (default is interpolate)') no_scale = traits.Bool(argstr='--no_scale 1', desc='dont rescale values for COR') no_change = traits.Bool(argstr='--nochange', desc="don't change type of input to that of template") autoalign_matrix = File(exists=True, argstr='--autoalign %s', desc='text file with autoalign matrix') unwarp_gradient = traits.Bool(argstr='--unwarp_gradient_nonlinearity', desc='unwarp gradient nonlinearity') apply_transform = File(exists=True, argstr='--apply_transform %s', desc='apply xfm file') apply_inv_transform = File(exists=True, argstr='--apply_inverse_transform %s', desc='apply inverse transformation xfm file') devolve_transform = traits.Str(argstr='--devolvexfm %s', desc='subject id') crop_center = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--crop %d %d %d', desc=' crop to 256 around center (x, y, z)') crop_size = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--cropsize %d %d %d', desc=' crop to size ') cut_ends = traits.Int(argstr='--cutends %d', desc='remove ncut slices from the ends') slice_crop = traits.Tuple(traits.Int, traits.Int, argstr='--slice-crop %d %d', desc='s_start s_end : keep slices s_start to s_end') slice_reverse = traits.Bool(argstr='--slice-reverse', desc='reverse order of slices, update vox2ras') slice_bias = traits.Float(argstr='--slice-bias %f', desc='apply half-cosine bias field') fwhm = traits.Float(argstr='--fwhm %f', desc='smooth input volume by fwhm mm') _filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze', 'analyze4d', 'spm', 'afni', 'brik', 'bshort', 'bfloat', 'sdt', 'outline', 'otl', 'gdf', 'nifti1', 'nii', 'niigz'] _infiletypes = ['ge', 'gelx', 'lx', 'ximg', 'siemens', 'dicom', 'siemens_dicom'] in_type = traits.Enum(_filetypes + _infiletypes, argstr='--in_type %s', desc='input file type') out_type = traits.Enum(_filetypes, argstr='--out_type %s', desc='output file type') ascii = traits.Bool(argstr='--ascii', desc='save output as ascii col>row>slice>frame') reorder = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--reorder %d %d %d', desc='olddim1 olddim2 olddim3') invert_contrast = traits.Float(argstr='--invert_contrast %f', desc='threshold for inversting contrast') in_file = File(exists=True, mandatory=True, position=-2, argstr='--input_volume %s', desc='File to read/convert') out_file = File(argstr='--output_volume %s', position=-1, genfile=True, desc='output filename or True to generate one') conform = traits.Bool(argstr='--conform', desc='conform to 256^3') conform_min = traits.Bool(argstr='--conform_min', desc='conform to smallest size') conform_size = traits.Float(argstr='--conform_size %s', desc='conform to size_in_mm') parse_only = traits.Bool(argstr='--parse_only', desc='parse input only') subject_name = traits.Str(argstr='--subject_name %s', desc='subject name ???') reslice_like = File(exists=True, argstr='--reslice_like %s', desc='reslice output to match file') template_type = traits.Enum(_filetypes + _infiletypes, argstr='--template_type %s', desc='template file type') split = traits.Bool(argstr='--split', desc='split output frames into separate output files.') frame = traits.Int(argstr='--frame %d', desc='keep only 0-based frame number') midframe = traits.Bool(argstr='--mid-frame', desc='keep only the middle frame') skip_n = traits.Int(argstr='--nskip %d', desc='skip the first n frames') drop_n = traits.Int(argstr='--ndrop %d', desc='drop the last n frames') frame_subsample = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--fsubsample %d %d %d', desc='start delta end : frame subsampling (end = -1 for end)') in_scale = traits.Float(argstr='--scale %f', desc='input intensity scale factor') out_scale = traits.Float(argstr='--out-scale %d', desc='output intensity scale factor') in_like = File(exists=True, argstr='--in_like %s', desc='input looks like') fill_parcellation = traits.Bool(argstr='--fill_parcellation', desc='fill parcellation') smooth_parcellation = traits.Bool(argstr='--smooth_parcellation', desc='smooth parcellation') zero_outlines = traits.Bool(argstr='--zero_outlines', desc='zero outlines') color_file = File(exists=True, argstr='--color_file %s', desc='color file') no_translate = traits.Bool(argstr='--no_translate', desc='???') status_file = File(argstr='--status %s', desc='status file for DICOM conversion') sdcm_list = File(exists=True, argstr='--sdcmlist %s', desc='list of DICOM files for conversion') template_info = traits.Bool('--template_info', desc='dump info about template') crop_gdf = traits.Bool(argstr='--crop_gdf', desc='apply GDF cropping') zero_ge_z_offset = traits.Bool(argstr='--zero_ge_z_offset', desc='zero ge z offset ???') class MRIConvertOutputSpec(TraitedSpec): out_file = OutputMultiPath(File(exists=True), desc='converted output file') class MRIConvert(FSCommand): """use fs mri_convert to manipulate files .. note:: Adds niigz as an output type option Examples -------- >>> mc = MRIConvert() >>> mc.inputs.in_file = 'structural.nii' >>> mc.inputs.out_file = 'outfile.mgz' >>> mc.inputs.out_type = 'mgz' >>> mc.cmdline 'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz' """ _cmd = 'mri_convert' input_spec = MRIConvertInputSpec output_spec = MRIConvertOutputSpec filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc', afni='brik', brik='brik', bshort='bshort', spm='img', analyze='img', analyze4d='img', bfloat='bfloat', nifti1='img', nii='nii', niigz='nii.gz') def _format_arg(self, name, spec, value): if name in ['in_type', 'out_type', 'template_type']: if value == 'niigz': return spec.argstr % 'nii' return super(MRIConvert, self)._format_arg(name, spec, value) def _get_outfilename(self): outfile = self.inputs.out_file if not isdefined(outfile): if isdefined(self.inputs.out_type): suffix = '_out.' + self.filemap[self.inputs.out_type] else: suffix = '_out.nii.gz' outfile = fname_presuffix(self.inputs.in_file, newpath=os.getcwd(), suffix=suffix, use_ext=False) return os.path.abspath(outfile) def _list_outputs(self): outputs = self.output_spec().get() outfile = self._get_outfilename() if isdefined(self.inputs.split) and self.inputs.split: size = load(self.inputs.in_file).get_shape() if len(size) == 3: tp = 1 else: tp = size[-1] if outfile.endswith('.mgz'): stem = outfile.split('.mgz')[0] ext = '.mgz' elif outfile.endswith('.nii.gz'): stem = outfile.split('.nii.gz')[0] ext = '.nii.gz' else: stem = '.'.join(outfile.split('.')[:-1]) ext = '.' + outfile.split('.')[-1] outfile = [] for idx in range(0, tp): outfile.append(stem + '%04d' % idx + ext) if isdefined(self.inputs.out_type): if self.inputs.out_type in ['spm', 'analyze']: # generate all outputs size = load(self.inputs.in_file).get_shape() if len(size) == 3: tp = 1 else: tp = size[-1] # have to take care of all the frame manipulations raise Exception('Not taking frame manipulations into account- please warn the developers') outfiles = [] outfile = self._get_outfilename() for i in range(tp): outfiles.append(fname_presuffix(outfile, suffix='%03d' % (i + 1))) outfile = outfiles outputs['out_file'] = outfile return outputs def _gen_filename(self, name): if name == 'out_file': return self._get_outfilename() return None class DICOMConvertInputSpec(FSTraitedSpec): dicom_dir = Directory(exists=True, mandatory=True, desc='dicom directory from which to convert dicom files') base_output_dir = Directory(mandatory=True, desc='directory in which subject directories are created') subject_dir_template = traits.Str('S.%04d', usedefault=True, desc='template for subject directory name') subject_id = traits.Any(desc='subject identifier to insert into template') file_mapping = traits.List(traits.Tuple(traits.Str, traits.Str), desc='defines the output fields of interface') out_type = traits.Enum('niigz', MRIConvertInputSpec._filetypes, usedefault=True, desc='defines the type of output file produced') dicom_info = File(exists=True, desc='File containing summary information from mri_parse_sdcmdir') seq_list = traits.List(traits.Str, requires=['dicom_info'], desc='list of pulse sequence names to be converted.') ignore_single_slice = traits.Bool(requires=['dicom_info'], desc='ignore volumes containing a single slice') class DICOMConvert(FSCommand): """use fs mri_convert to convert dicom files Examples -------- >>> from nipype.interfaces.freesurfer import DICOMConvert >>> cvt = DICOMConvert() >>> cvt.inputs.dicom_dir = 'dicomdir' >>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')] """ _cmd = 'mri_convert' input_spec = DICOMConvertInputSpec def _get_dicomfiles(self): """validate fsl bet options if set to None ignore """ return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir, '*-1.dcm'))) def _get_outdir(self): """returns output directory""" subjid = self.inputs.subject_id if not isdefined(subjid): path, fname = os.path.split(self._get_dicomfiles()[0]) subjid = int(fname.split('-')[0]) if isdefined(self.inputs.subject_dir_template): subjid = self.inputs.subject_dir_template % subjid basedir = self.inputs.base_output_dir if not isdefined(basedir): basedir = os.path.abspath('.') outdir = os.path.abspath(os.path.join(basedir, subjid)) return outdir def _get_runs(self): """Returns list of dicom series that should be converted. Requires a dicom info summary file generated by ``DicomDirInfo`` """ seq = np.genfromtxt(self.inputs.dicom_info, dtype=object) runs = [] for s in seq: if self.inputs.seq_list: if self.inputs.ignore_single_slice: if (int(s[8]) > 1) and any([s[12].startswith(sn) for sn in self.inputs.seq_list]): runs.append(int(s[2])) else: if any([s[12].startswith(sn) for sn in self.inputs.seq_list]): runs.append(int(s[2])) else: runs.append(int(s[2])) return runs def _get_filelist(self, outdir): """Returns list of files to be converted""" filemap = {} for f in self._get_dicomfiles(): head, fname = os.path.split(f) fname, ext = os.path.splitext(fname) fileparts = fname.split('-') runno = int(fileparts[1]) out_type = MRIConvert.filemap[self.inputs.out_type] outfile = os.path.join(outdir, '.'.join(('%s-%02d' % (fileparts[0], runno), out_type))) filemap[runno] = (f, outfile) if self.inputs.dicom_info: files = [filemap[r] for r in self._get_runs()] else: files = [filemap[r] for r in filemap.keys()] return files @property def cmdline(self): """ `command` plus any arguments (args) validates arguments and generates command line""" self._check_mandatory_inputs() outdir = self._get_outdir() cmd = [] if not os.path.exists(outdir): cmdstr = 'python -c "import os; os.makedirs(\'%s\')"' % outdir cmd.extend([cmdstr]) infofile = os.path.join(outdir, 'shortinfo.txt') if not os.path.exists(infofile): cmdstr = 'dcmdir-info-mgh %s > %s' % (self.inputs.dicom_dir, infofile) cmd.extend([cmdstr]) files = self._get_filelist(outdir) for infile, outfile in files: if not os.path.exists(outfile): single_cmd = '%s %s %s' % (self.cmd, infile, os.path.join(outdir, outfile)) cmd.extend([single_cmd]) return '; '.join(cmd) class ResampleInputSpec(FSTraitedSpec): in_file = File(exists=True, argstr='-i %s', mandatory=True, desc='file to resample', position=-2) resampled_file = File(argstr='-o %s', desc='output filename', genfile=True, position=-1) voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr='-vs %.2f %.2f %.2f', desc='triplet of output voxel sizes', mandatory=True) class ResampleOutputSpec(TraitedSpec): resampled_file = File(exists=True, desc='output filename') class Resample(FSCommand): """Use FreeSurfer mri_convert to up or down-sample image files Examples -------- >>> from nipype.interfaces import freesurfer >>> resampler = freesurfer.Resample() >>> resampler.inputs.in_file = 'structural.nii' >>> resampler.inputs.resampled_file = 'resampled.nii' >>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1) >>> resampler.cmdline 'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii' """ _cmd = 'mri_convert' input_spec = ResampleInputSpec output_spec = ResampleOutputSpec def _get_outfilename(self): if isdefined(self.inputs.resampled_file): outfile = self.inputs.resampled_file else: outfile = fname_presuffix(self.inputs.in_file, newpath=os.getcwd(), suffix='_resample') return outfile def _list_outputs(self): outputs = self.output_spec().get() outputs['resampled_file'] = self._get_outfilename() return outputs def _gen_filename(self, name): if name == 'resampled_file': return self._get_outfilename() return None class ReconAllInputSpec(CommandLineInputSpec): subject_id = traits.Str("recon_all", argstr='-subjid %s', desc='subject name', usedefault=True) directive = traits.Enum('all', 'autorecon1', 'autorecon2', 'autorecon2-cp', 'autorecon2-wm', 'autorecon2-inflate1', 'autorecon2-perhemi', 'autorecon3', 'localGI', 'qcache', argstr='-%s', desc='process directive', usedefault=True, position=0) hemi = traits.Enum('lh', 'rh', desc='hemisphere to process', argstr="-hemi %s") T1_files = InputMultiPath(File(exists=True), argstr='-i %s...', desc='name of T1 file to process') T2_file = File(exists=True, argstr="-T2 %s", min_ver='5.3.0', desc='Use a T2 image to refine the cortical surface') openmp = traits.Int(argstr="-openmp %d", desc="Number of processors to use in parallel") subjects_dir = Directory(exists=True, argstr='-sd %s', hash_files=False, desc='path to subjects directory', genfile=True) flags = traits.Str(argstr='%s', desc='additional parameters') class ReconAllIOutputSpec(FreeSurferSource.output_spec): subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory.') subject_id = traits.Str(desc='Subject name for whom to retrieve data') class ReconAll(CommandLine): """Uses recon-all to generate surfaces and parcellations of structural data from anatomical images of a subject. Examples -------- >>> from nipype.interfaces.freesurfer import ReconAll >>> reconall = ReconAll() >>> reconall.inputs.subject_id = 'foo' >>> reconall.inputs.directive = 'all' >>> reconall.inputs.subjects_dir = '.' >>> reconall.inputs.T1_files = 'structural.nii' >>> reconall.cmdline 'recon-all -all -i structural.nii -subjid foo -sd .' """ _cmd = 'recon-all' _additional_metadata = ['loc', 'altkey'] input_spec = ReconAllInputSpec output_spec = ReconAllIOutputSpec _can_resume = True _steps = [ #autorecon1 ('motioncor', ['mri/rawavg.mgz', 'mri/orig.mgz']), ('talairach', ['mri/transforms/talairach.auto.xfm', 'mri/transforms/talairach.xfm']), ('nuintensitycor', ['mri/nu.mgz']), ('normalization', ['mri/T1.mgz']), ('skullstrip', ['mri/transforms/talairach_with_skull.lta', 'mri/brainmask.auto.mgz', 'mri/brainmask.mgz']), #autorecon2 ('gcareg', ['mri/transforms/talairach.lta']), ('canorm', ['mri/norm.mgz']), ('careg', ['mri/transforms/talairach.m3z']), ('careginv', ['mri/transforms/talairach.m3z.inv.x.mgz', 'mri/transforms/talairach.m3z.inv.y.mgz', 'mri/transforms/talairach.m3z.inv.z.mgz']), ('rmneck', ['mri/nu_noneck.mgz']), ('skull-lta', ['mri/transforms/talairach_with_skull_2.lta']), ('calabel', ['mri/aseg.auto_noCCseg.mgz', 'mri/aseg.auto.mgz', 'mri/aseg.mgz']), ('normalization2', ['mri/brain.mgz']), ('maskbfs', ['mri/brain.finalsurfs.mgz']), ('segmentation', ['mri/wm.asegedit.mgz', 'mri/wm.mgz']), ('fill', ['mri/filled.mgz']), ('tessellate', ['surf/lh.orig.nofix', 'surf/rh.orig.nofix']), ('smooth1', ['surf/lh.smoothwm.nofix', 'surf/rh.smoothwm.nofix']), ('inflate1', ['surf/lh.inflated.nofix', 'surf/rh.inflated.nofix']), ('qsphere', ['surf/lh.qsphere.nofix', 'surf/rh.qsphere.nofix']), ('fix', ['surf/lh.orig', 'surf/rh.orig']), ('white', ['surf/lh.white', 'surf/rh.white', 'surf/lh.curv', 'surf/rh.curv', 'surf/lh.area', 'surf/rh.area', 'label/lh.cortex.label', 'label/rh.cortex.label']), ('smooth2', ['surf/lh.smoothwm', 'surf/rh.smoothwm']), ('inflate2', ['surf/lh.inflated', 'surf/rh.inflated', 'surf/lh.sulc', 'surf/rh.sulc', 'surf/lh.inflated.H', 'surf/rh.inflated.H', 'surf/lh.inflated.K', 'surf/rh.inflated.K']), #autorecon3 ('sphere', ['surf/lh.sphere', 'surf/rh.sphere']), ('surfreg', ['surf/lh.sphere.reg', 'surf/rh.sphere.reg']), ('jacobian_white', ['surf/lh.jacobian_white', 'surf/rh.jacobian_white']), ('avgcurv', ['surf/lh.avg_curv', 'surf/rh.avg_curv']), ('cortparc', ['label/lh.aparc.annot', 'label/rh.aparc.annot']), ('pial', ['surf/lh.pial', 'surf/rh.pial', 'surf/lh.curv.pial', 'surf/rh.curv.pial', 'surf/lh.area.pial', 'surf/rh.area.pial', 'surf/lh.thickness', 'surf/rh.thickness']), ('cortparc2', ['label/lh.aparc.a2009s.annot', 'label/rh.aparc.a2009s.annot']), ('parcstats2', ['stats/lh.aparc.a2009s.stats', 'stats/rh.aparc.a2009s.stats', 'stats/aparc.annot.a2009s.ctab']), ('cortribbon', ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz', 'mri/ribbon.mgz']), ('segstats', ['stats/aseg.stats']), ('aparc2aseg', ['mri/aparc+aseg.mgz', 'mri/aparc.a2009s+aseg.mgz']), ('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats']), ('balabels', ['BA.ctab', 'BA.thresh.ctab']), ('label-exvivo-ec', ['label/lh.entorhinal_exvivo.label', 'label/rh.entorhinal_exvivo.label'])] def _gen_subjects_dir(self): return os.getcwd() def _gen_filename(self, name): if name == 'subjects_dir': return self._gen_subjects_dir() return None def _list_outputs(self): """ See io.FreeSurferSource.outputs for the list of outputs returned """ if isdefined(self.inputs.subjects_dir): subjects_dir = self.inputs.subjects_dir else: subjects_dir = self._gen_subjects_dir() if isdefined(self.inputs.hemi): hemi = self.inputs.hemi else: hemi = 'both' outputs = self._outputs().get() outputs.update(FreeSurferSource(subject_id=self.inputs.subject_id, subjects_dir=subjects_dir, hemi=hemi)._list_outputs()) outputs['subject_id'] = self.inputs.subject_id outputs['subjects_dir'] = subjects_dir return outputs def _is_resuming(self): subjects_dir = self.inputs.subjects_dir if not isdefined(subjects_dir): subjects_dir = self._gen_subjects_dir() if os.path.isdir(os.path.join(subjects_dir, self.inputs.subject_id, 'mri')): return True return False def _format_arg(self, name, trait_spec, value): if name == 'T1_files': if self._is_resuming(): return '' return super(ReconAll, self)._format_arg(name, trait_spec, value) @property def cmdline(self): cmd = super(ReconAll, self).cmdline if not self._is_resuming(): return cmd subjects_dir = self.inputs.subjects_dir if not isdefined(subjects_dir): subjects_dir = self._gen_subjects_dir() #cmd = cmd.replace(' -all ', ' -make all ') iflogger.info('Overriding recon-all directive') flags = [] directive = 'all' for idx, step in enumerate(self._steps): step, outfiles = step if all([os.path.exists(os.path.join(subjects_dir, self.inputs.subject_id,f)) for f in outfiles]): flags.append('-no%s'%step) if idx > 4: directive = 'autorecon2' elif idx > 23: directive = 'autorecon3' else: flags.append('-%s'%step) cmd = cmd.replace(' -%s ' % self.inputs.directive, ' -%s ' % directive) cmd += ' ' + ' '.join(flags) iflogger.info('resume recon-all : %s' % cmd) return cmd class BBRegisterInputSpec(FSTraitedSpec): subject_id = traits.Str(argstr='--s %s', desc='freesurfer subject id', mandatory=True) source_file = File(argstr='--mov %s', desc='source file to be registered', mandatory=True, copyfile=False) init = traits.Enum('spm', 'fsl', 'header', argstr='--init-%s', mandatory=True, xor=['init_reg_file'], desc='initialize registration spm, fsl, header') init_reg_file = File(exists=True, desc='existing registration file', xor=['init'], mandatory=True) contrast_type = traits.Enum('t1', 't2', argstr='--%s', desc='contrast type of image', mandatory=True) intermediate_file = File(exists=True, argstr="--int %s", desc="Intermediate image, e.g. in case of partial FOV") reg_frame = traits.Int(argstr="--frame %d", xor=["reg_middle_frame"], desc="0-based frame index for 4D source file") reg_middle_frame = traits.Bool(argstr="--mid-frame", xor=["reg_frame"], desc="Register middle frame of 4D source file") out_reg_file = File(argstr='--reg %s', desc='output registration file', genfile=True) spm_nifti = traits.Bool(argstr="--spm-nii", desc="force use of nifti rather than analyze with SPM") epi_mask = traits.Bool(argstr="--epi-mask", desc="mask out B0 regions in stages 1 and 2") out_fsl_file = traits.Either(traits.Bool, File, argstr="--fslmat %s", desc="write the transformation matrix in FSL FLIRT format") registered_file = traits.Either(traits.Bool, File, argstr='--o %s', desc='output warped sourcefile either True or filename') class BBRegisterOutputSpec(TraitedSpec): out_reg_file = File(exists=True, desc='Output registration file') out_fsl_file = File(desc='Output FLIRT-style registration file') min_cost_file = File(exists=True, desc='Output registration minimum cost file') registered_file = File(desc='Registered and resampled source file') class BBRegister(FSCommand): """Use FreeSurfer bbregister to register a volume to the Freesurfer anatomical. This program performs within-subject, cross-modal registration using a boundary-based cost function. The registration is constrained to be 6 DOF (rigid). It is required that you have an anatomical scan of the subject that has already been recon-all-ed using freesurfer. Examples -------- >>> from nipype.interfaces.freesurfer import BBRegister >>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2') >>> bbreg.cmdline 'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me' """ _cmd = 'bbregister' input_spec = BBRegisterInputSpec output_spec = BBRegisterOutputSpec def _list_outputs(self): outputs = self.output_spec().get() _in = self.inputs if isdefined(_in.out_reg_file): outputs['out_reg_file'] = op.abspath(_in.out_reg_file) elif _in.source_file: suffix = '_bbreg_%s.dat' % _in.subject_id outputs['out_reg_file'] = fname_presuffix(_in.source_file, suffix=suffix, use_ext=False) if isdefined(_in.registered_file): if isinstance(_in.registered_file, bool): outputs['registered_file'] = fname_presuffix(_in.source_file, suffix='_bbreg') else: outputs['registered_file'] = op.abspath(_in.registered_file) if isdefined(_in.out_fsl_file): if isinstance(_in.out_fsl_file, bool): suffix='_bbreg_%s.mat' % _in.subject_id out_fsl_file = fname_presuffix(_in.source_file, suffix=suffix, use_ext=False) outputs['out_fsl_file'] = out_fsl_file else: outputs['out_fsl_file'] = op.abspath(_in.out_fsl_file) outputs['min_cost_file'] = outputs['out_reg_file'] + '.mincost' return outputs def _format_arg(self, name, spec, value): if name in ['registered_file', 'out_fsl_file']: if isinstance(value, bool): fname = self._list_outputs()[name] else: fname = value return spec.argstr % fname return super(BBRegister, self)._format_arg(name, spec, value) def _gen_filename(self, name): if name == 'out_reg_file': return self._list_outputs()[name] return None class ApplyVolTransformInputSpec(FSTraitedSpec): source_file = File(exists=True, argstr='--mov %s', copyfile=False, mandatory=True, desc='Input volume you wish to transform') transformed_file = File(desc='Output volume', argstr='--o %s', genfile=True) _targ_xor = ('target_file', 'tal', 'fs_target') target_file = File(exists=True, argstr='--targ %s', xor=_targ_xor, desc='Output template volume', mandatory=True) tal = traits.Bool(argstr='--tal', xor=_targ_xor, mandatory=True, desc='map to a sub FOV of MNI305 (with --reg only)') tal_resolution = traits.Float(argstr="--talres %.10f", desc="Resolution to sample when using tal") fs_target = traits.Bool(argstr='--fstarg', xor=_targ_xor, mandatory=True, requires=['reg_file'], desc='use orig.mgz from subject in regfile as target') _reg_xor = ('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject') reg_file = File(exists=True, xor=_reg_xor, argstr='--reg %s', mandatory=True, desc='tkRAS-to-tkRAS matrix (tkregister2 format)') fsl_reg_file = File(exists=True, xor=_reg_xor, argstr='--fsl %s', mandatory=True, desc='fslRAS-to-fslRAS matrix (FSL format)') xfm_reg_file = File(exists=True, xor=_reg_xor, argstr='--xfm %s', mandatory=True, desc='ScannerRAS-to-ScannerRAS matrix (MNI format)') reg_header = traits.Bool(xor=_reg_xor, argstr='--regheader', mandatory=True, desc='ScannerRAS-to-ScannerRAS matrix = identity') subject = traits.Str(xor=_reg_xor, argstr='--s %s', mandatory=True, desc='set matrix = identity and use subject for any templates') inverse = traits.Bool(desc='sample from target to source', argstr='--inv') interp = traits.Enum('trilin', 'nearest', 'cubic', argstr='--interp %s', desc='Interpolation method ( or nearest)') no_resample = traits.Bool(desc='Do not resample; just change vox2ras matrix', argstr='--no-resample') m3z_file = File(argstr="--m3z %s", desc=('This is the morph to be applied to the volume. ' 'Unless the morph is in mri/transforms (eg.: for ' 'talairach.m3z computed by reconall), you will need ' 'to specify the full path to this morph and use the ' '--noDefM3zPath flag.')) no_ded_m3z_path = traits.Bool(argstr="--noDefM3zPath", requires=['m3z_file'], desc=('To be used with the m3z flag. ' 'Instructs the code not to look for the' 'm3z morph in the default location ' '(SUBJECTS_DIR/subj/mri/transforms), ' 'but instead just use the path ' 'indicated in --m3z.')) invert_morph = traits.Bool(argstr="--inv-morph", requires=['m3z_file'], desc=('Compute and use the inverse of the ' 'non-linear morph to resample the input ' 'volume. To be used by --m3z.')) class ApplyVolTransformOutputSpec(TraitedSpec): transformed_file = File(exists=True, desc='Path to output file if used normally') class ApplyVolTransform(FSCommand): """Use FreeSurfer mri_vol2vol to apply a transform. Examples -------- >>> from nipype.interfaces.freesurfer import ApplyVolTransform >>> applyreg = ApplyVolTransform() >>> applyreg.inputs.source_file = 'structural.nii' >>> applyreg.inputs.reg_file = 'register.dat' >>> applyreg.inputs.transformed_file = 'struct_warped.nii' >>> applyreg.inputs.fs_target = True >>> applyreg.cmdline 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii' """ _cmd = 'mri_vol2vol' input_spec = ApplyVolTransformInputSpec output_spec = ApplyVolTransformOutputSpec def _get_outfile(self): outfile = self.inputs.transformed_file if not isdefined(outfile): if self.inputs.inverse == True: if self.inputs.fs_target == True: src = 'orig.mgz' else: src = self.inputs.target_file else: src = self.inputs.source_file outfile = fname_presuffix(src, newpath=os.getcwd(), suffix='_warped') return outfile def _list_outputs(self): outputs = self.output_spec().get() outputs['transformed_file'] = os.path.abspath(self._get_outfile()) return outputs def _gen_filename(self, name): if name == 'transformed_file': return self._get_outfile() return None class SmoothInputSpec(FSTraitedSpec): in_file = File(exists=True, desc='source volume', argstr='--i %s', mandatory=True) reg_file = File(desc='registers volume to surface anatomical ', argstr='--reg %s', mandatory=True, exists=True) smoothed_file = File(desc='output volume', argstr='--o %s', genfile=True) proj_frac_avg = traits.Tuple(traits.Float, traits.Float, traits.Float, xor=['proj_frac'], desc='average a long normal min max delta', argstr='--projfrac-avg %.2f %.2f %.2f') proj_frac = traits.Float(desc='project frac of thickness a long surface normal', xor=['proj_frac_avg'], argstr='--projfrac %s') surface_fwhm = traits.Range(low=0.0, requires=['reg_file'], mandatory=True, xor=['num_iters'], desc='surface FWHM in mm', argstr='--fwhm %f') num_iters = traits.Range(low=1, xor=['surface_fwhm'], mandatory=True, argstr='--niters %d', desc='number of iterations instead of fwhm') vol_fwhm = traits.Range(low=0.0, argstr='--vol-fwhm %f', desc='volume smoothing outside of surface') class SmoothOutputSpec(TraitedSpec): smoothed_file = File(exists=True, desc='smoothed input volume') class Smooth(FSCommand): """Use FreeSurfer mris_volsmooth to smooth a volume This function smoothes cortical regions on a surface and non-cortical regions in volume. .. note:: Cortical voxels are mapped to the surface (3D->2D) and then the smoothed values from the surface are put back into the volume to fill the cortical ribbon. If data is smoothed with this algorithm, one has to be careful about how further processing is interpreted. Examples -------- >>> from nipype.interfaces.freesurfer import Smooth >>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6) >>> smoothvol.cmdline 'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000' """ _cmd = 'mris_volsmooth' input_spec = SmoothInputSpec output_spec = SmoothOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outfile = self.inputs.smoothed_file if not isdefined(outfile): outfile = self._gen_fname(self.inputs.in_file, suffix='_smooth') outputs['smoothed_file'] = outfile return outputs def _gen_filename(self, name): if name == 'smoothed_file': return self._list_outputs()[name] return None class RobustRegisterInputSpec(FSTraitedSpec): source_file = File(mandatory=True, argstr='--mov %s', desc='volume to be registered') target_file = File(mandatory=True, argstr='--dst %s', desc='target volume for the registration') out_reg_file = File(genfile=True, argstr='--lta %s', desc='registration file to write') registered_file = traits.Either(traits.Bool, File, argstr='--warp %s', desc='registered image; either True or filename') weights_file = traits.Either(traits.Bool, File, argstr='--weights %s', desc='weights image to write; either True or filename') est_int_scale = traits.Bool(argstr='--iscale', desc='estimate intensity scale (recommended for unnormalized images)') trans_only = traits.Bool(argstr='--transonly', desc='find 3 parameter translation only') in_xfm_file = File(exists=True, argstr='--transform', desc='use initial transform on source') half_source = traits.Either(traits.Bool, File, argstr='--halfmov %s', desc="write source volume mapped to halfway space") half_targ = traits.Either(traits.Bool, File, argstr="--halfdst %s", desc="write target volume mapped to halfway space") half_weights = traits.Either(traits.Bool, File, argstr="--halfweights %s", desc="write weights volume mapped to halfway space") half_source_xfm = traits.Either(traits.Bool, File, argstr="--halfmovlta %s", desc="write transform from source to halfway space") half_targ_xfm = traits.Either(traits.Bool, File, argstr="--halfdstlta %s", desc="write transform from target to halfway space") auto_sens = traits.Bool(argstr='--satit', xor=['outlier_sens'], mandatory=True, desc='auto-detect good sensitivity') outlier_sens = traits.Float(argstr='--sat %.4f', xor=['auto_sens'], mandatory=True, desc='set outlier sensitivity explicitly') least_squares = traits.Bool(argstr='--leastsquares', desc='use least squares instead of robust estimator') no_init = traits.Bool(argstr='--noinit', desc='skip transform init') init_orient = traits.Bool(argstr='--initorient', desc='use moments for initial orient (recommended for stripped brains)') max_iterations = traits.Int(argstr='--maxit %d', desc='maximum # of times on each resolution') high_iterations = traits.Int(argstr='--highit %d', desc='max # of times on highest resolution') iteration_thresh = traits.Float(argstr='--epsit %.3f', desc='stop iterations when below threshold') subsample_thresh = traits.Int(argstr='--subsample %d', desc='subsample if dimension is above threshold size') outlier_limit = traits.Float(argstr='--wlimit %.3f', desc='set maximal outlier limit in satit') write_vo2vox = traits.Bool(argstr='--vox2vox', desc='output vox2vox matrix (default is RAS2RAS)') no_multi = traits.Bool(argstr='--nomulti', desc='work on highest resolution') mask_source = File(exists=True, argstr='--maskmov %s', desc='image to mask source volume with') mask_target = File(exists=True, argstr='--maskdst %s', desc='image to mask target volume with') force_double = traits.Bool(argstr='--doubleprec', desc='use double-precision intensities') force_float = traits.Bool(argstr='--floattype', desc='use float intensities') class RobustRegisterOutputSpec(TraitedSpec): out_reg_file = File(exists=True, desc="output registration file") registered_file = File(desc="output image with registration applied") weights_file = File(desc="image of weights used") half_source = File(desc="source image mapped to halfway space") half_targ = File(desc="target image mapped to halfway space") half_weights = File(desc="weights image mapped to halfway space") half_source_xfm = File(desc="transform file to map source image to halfway space") half_targ_xfm = File(desc="transform file to map target image to halfway space") class RobustRegister(FSCommand): """Perform intramodal linear registration (translation and rotation) using robust statistics. Examples -------- >>> from nipype.interfaces.freesurfer import RobustRegister >>> reg = RobustRegister() >>> reg.inputs.source_file = 'structural.nii' >>> reg.inputs.target_file = 'T1.nii' >>> reg.inputs.auto_sens = True >>> reg.inputs.init_orient = True >>> reg.cmdline 'mri_robust_register --satit --initorient --lta structural_robustreg.lta --mov structural.nii --dst T1.nii' References ---------- Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse Consistent Registration: A Robust Approach. Neuroimage 53(4) 1181-96. """ _cmd = 'mri_robust_register' input_spec = RobustRegisterInputSpec output_spec = RobustRegisterOutputSpec def _format_arg(self, name, spec, value): for option in ["registered_file", "weights_file", "half_source", "half_targ", "half_weights", "half_source_xfm", "half_targ_xfm"]: if name == option: if isinstance(value, bool): fname = self._list_outputs()[name] else: fname = value return spec.argstr % fname return super(RobustRegister, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_reg_file'] = self.inputs.out_reg_file if not isdefined(self.inputs.out_reg_file) and self.inputs.source_file: outputs['out_reg_file'] = fname_presuffix(self.inputs.source_file, suffix='_robustreg.lta', use_ext=False) prefices = dict(src=self.inputs.source_file, trg=self.inputs.target_file) suffices = dict(registered_file=("src", "_robustreg", True), weights_file=("src", "_robustweights", True), half_source=("src", "_halfway", True), half_targ=("trg", "_halfway", True), half_weights=("src", "_halfweights", True), half_source_xfm=("src", "_robustxfm.lta", False), half_targ_xfm=("trg", "_robustxfm.lta", False)) for name, sufftup in suffices.items(): value = getattr(self.inputs, name) if isdefined(value): if isinstance(value, bool): outputs[name] = fname_presuffix(prefices[sufftup[0]], suffix=sufftup[1], newpath=os.getcwd(), use_ext=sufftup[2]) else: outputs[name] = value return outputs def _gen_filename(self, name): if name == 'out_reg_file': return self._list_outputs()[name] return None class FitMSParamsInputSpec(FSTraitedSpec): in_files = traits.List(File(exists=True), argstr="%s", position=-2, mandatory=True, desc="list of FLASH images (must be in mgh format)") tr_list = traits.List(traits.Int, desc="list of TRs of the input files (in msec)") te_list = traits.List(traits.Float, desc="list of TEs of the input files (in msec)") flip_list = traits.List(traits.Int, desc="list of flip angles of the input files") xfm_list = traits.List(File(exists=True), desc="list of transform files to apply to each FLASH image") out_dir = Directory(argstr="%s", position=-1, genfile=True, desc="directory to store output in") class FitMSParamsOutputSpec(TraitedSpec): t1_image = File(exists=True, desc="image of estimated T1 relaxation values") pd_image = File(exists=True, desc="image of estimated proton density values") t2star_image = File(exists=True, desc="image of estimated T2* values") class FitMSParams(FSCommand): """Estimate tissue paramaters from a set of FLASH images. Examples -------- >>> from nipype.interfaces.freesurfer import FitMSParams >>> msfit = FitMSParams() >>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz'] >>> msfit.inputs.out_dir = 'flash_parameters' >>> msfit.cmdline 'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters' """ _cmd = "mri_ms_fitparms" input_spec = FitMSParamsInputSpec output_spec = FitMSParamsOutputSpec def _format_arg(self, name, spec, value): if name == "in_files": cmd = "" for i, file in enumerate(value): if isdefined(self.inputs.tr_list): cmd = " ".join((cmd, "-tr %.1f" % self.inputs.tr_list[i])) if isdefined(self.inputs.te_list): cmd = " ".join((cmd, "-te %.3f" % self.inputs.te_list[i])) if isdefined(self.inputs.flip_list): cmd = " ".join((cmd, "-fa %.1f" % self.inputs.flip_list[i])) if isdefined(self.inputs.xfm_list): cmd = " ".join((cmd, "-at %s" % self.inputs.xfm_list[i])) cmd = " ".join((cmd, file)) return cmd return super(FitMSParams, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_dir): out_dir = self._gen_filename("out_dir") else: out_dir = self.inputs.out_dir outputs["t1_image"] = os.path.join(out_dir, "T1.mgz") outputs["pd_image"] = os.path.join(out_dir, "PD.mgz") outputs["t2star_image"] = os.path.join(out_dir, "T2star.mgz") return outputs def _gen_filename(self, name): if name == "out_dir": return os.getcwd() return None class SynthesizeFLASHInputSpec(FSTraitedSpec): fixed_weighting = traits.Bool(position=1, argstr="-w", desc="use a fixed weighting to generate optimal gray/white contrast") tr = traits.Float(mandatory=True, position=2, argstr="%.2f", desc="repetition time (in msec)") flip_angle = traits.Float(mandatory=True, position=3, argstr="%.2f", desc="flip angle (in degrees)") te = traits.Float(mandatory=True, position=4, argstr="%.3f", desc="echo time (in msec)") t1_image = File(exists=True, mandatory=True, position=5, argstr="%s", desc="image of T1 values") pd_image = File(exists=True, mandatory=True, position=6, argstr="%s", desc="image of proton density values") out_file = File(genfile=True, argstr="%s", desc="image to write") class SynthesizeFLASHOutputSpec(TraitedSpec): out_file = File(exists=True, desc="synthesized FLASH acquisition") class SynthesizeFLASH(FSCommand): """Synthesize a FLASH acquisition from T1 and proton density maps. Examples -------- >>> from nipype.interfaces.freesurfer import SynthesizeFLASH >>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30) >>> syn.inputs.t1_image = 'T1.mgz' >>> syn.inputs.pd_image = 'PD.mgz' >>> syn.inputs.out_file = 'flash_30syn.mgz' >>> syn.cmdline 'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz' """ _cmd = "mri_synthesize" input_spec = SynthesizeFLASHInputSpec output_spec = SynthesizeFLASHOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_file): outputs["out_file"] = self.inputs.out_file else: outputs["out_file"] = self._gen_fname("synth-flash_%02d.mgz" % self.inputs.flip_angle, suffix="") return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()["out_file"] return None nipype-0.9.2/nipype/interfaces/freesurfer/setup.py000066400000000000000000000007171227300005300223340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('freesurfer', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/freesurfer/tests/000077500000000000000000000000001227300005300217575ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py000066400000000000000000000026441227300005300263270ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import ApplyMask def test_ApplyMask_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-3, ), invert_xfm=dict(argstr='-invert', ), mask_file=dict(argstr='%s', mandatory=True, position=-2, ), mask_thresh=dict(argstr='-T %.4f', ), out_file=dict(argstr='%s', genfile=True, position=-1, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), use_abs=dict(argstr='-abs', ), xfm_file=dict(argstr='-xform %s', ), xfm_source=dict(argstr='-lta_src %s', ), xfm_target=dict(argstr='-lta_dst %s', ), ) inputs = ApplyMask.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyMask_outputs(): output_map = dict(out_file=dict(), ) outputs = ApplyMask.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py000066400000000000000000000051071227300005300300650ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import ApplyVolTransform def test_ApplyVolTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fs_target=dict(argstr='--fstarg', mandatory=True, requires=['reg_file'], xor=('target_file', 'tal', 'fs_target'), ), fsl_reg_file=dict(argstr='--fsl %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), ignore_exception=dict(nohash=True, usedefault=True, ), interp=dict(argstr='--interp %s', ), inverse=dict(argstr='--inv', ), invert_morph=dict(argstr='--inv-morph', requires=['m3z_file'], ), m3z_file=dict(argstr='--m3z %s', ), no_ded_m3z_path=dict(argstr='--noDefM3zPath', requires=['m3z_file'], ), no_resample=dict(argstr='--no-resample', ), reg_file=dict(argstr='--reg %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), reg_header=dict(argstr='--regheader', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), source_file=dict(argstr='--mov %s', copyfile=False, mandatory=True, ), subject=dict(argstr='--s %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), subjects_dir=dict(), tal=dict(argstr='--tal', mandatory=True, xor=('target_file', 'tal', 'fs_target'), ), tal_resolution=dict(argstr='--talres %.10f', ), target_file=dict(argstr='--targ %s', mandatory=True, xor=('target_file', 'tal', 'fs_target'), ), terminal_output=dict(mandatory=True, nohash=True, ), transformed_file=dict(argstr='--o %s', genfile=True, ), xfm_reg_file=dict(argstr='--xfm %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), ) inputs = ApplyVolTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyVolTransform_outputs(): output_map = dict(transformed_file=dict(), ) outputs = ApplyVolTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_BBRegister.py000066400000000000000000000035231227300005300264130ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import BBRegister def test_BBRegister_inputs(): input_map = dict(args=dict(argstr='%s', ), contrast_type=dict(argstr='--%s', mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), epi_mask=dict(argstr='--epi-mask', ), ignore_exception=dict(nohash=True, usedefault=True, ), init=dict(argstr='--init-%s', mandatory=True, xor=['init_reg_file'], ), init_reg_file=dict(mandatory=True, xor=['init'], ), intermediate_file=dict(argstr='--int %s', ), out_fsl_file=dict(argstr='--fslmat %s', ), out_reg_file=dict(argstr='--reg %s', genfile=True, ), reg_frame=dict(argstr='--frame %d', xor=['reg_middle_frame'], ), reg_middle_frame=dict(argstr='--mid-frame', xor=['reg_frame'], ), registered_file=dict(argstr='--o %s', ), source_file=dict(argstr='--mov %s', copyfile=False, mandatory=True, ), spm_nifti=dict(argstr='--spm-nii', ), subject_id=dict(argstr='--s %s', mandatory=True, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = BBRegister.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BBRegister_outputs(): output_map = dict(min_cost_file=dict(), out_fsl_file=dict(), out_reg_file=dict(), registered_file=dict(), ) outputs = BBRegister.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py000066400000000000000000000043421227300005300261660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import Binarize def test_Binarize_inputs(): input_map = dict(abs=dict(argstr='--abs', ), args=dict(argstr='%s', ), bin_col_num=dict(argstr='--bincol', ), bin_val=dict(argstr='--binval %d', ), bin_val_not=dict(argstr='--binvalnot %d', ), binary_file=dict(argstr='--o %s', genfile=True, ), count_file=dict(argstr='--count %s', ), dilate=dict(argstr='--dilate %d', ), environ=dict(nohash=True, usedefault=True, ), erode=dict(argstr='--erode %d', ), erode2d=dict(argstr='--erode2d %d', ), frame_no=dict(argstr='--frame %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', copyfile=False, mandatory=True, ), invert=dict(argstr='--inv', ), mask_file=dict(argstr='--mask maskvol', ), mask_thresh=dict(argstr='--mask-thresh %f', ), match=dict(argstr='--match %d...', ), max=dict(argstr='--max %f', xor=['wm_ven_csf'], ), merge_file=dict(argstr='--merge %s', ), min=dict(argstr='--min %f', xor=['wm_ven_csf'], ), out_type=dict(argstr='', ), rmax=dict(argstr='--rmax %f', ), rmin=dict(argstr='--rmin %f', ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ventricles=dict(argstr='--ventricles', ), wm=dict(argstr='--wm', ), wm_ven_csf=dict(argstr='--wm+vcsf', xor=['min', 'max'], ), zero_edges=dict(argstr='--zero-edges', ), zero_slice_edge=dict(argstr='--zero-slice-edges', ), ) inputs = Binarize.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Binarize_outputs(): output_map = dict(binary_file=dict(), count_file=dict(), ) outputs = Binarize.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py000066400000000000000000000033671227300005300266550ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import Concatenate def test_Concatenate_inputs(): input_map = dict(add_val=dict(argstr='--add %f', ), args=dict(argstr='%s', ), combine=dict(argstr='--combine', ), concatenated_file=dict(argstr='--o %s', genfile=True, ), environ=dict(nohash=True, usedefault=True, ), gmean=dict(argstr='--gmean %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='--i %s...', mandatory=True, ), keep_dtype=dict(argstr='--keep-datatype', ), mask_file=dict(argstr='--mask %s', ), max_bonfcor=dict(argstr='--max-bonfcor', ), max_index=dict(argstr='--max-index', ), mean_div_n=dict(argstr='--mean-div-n', ), multiply_by=dict(argstr='--mul %f', ), multiply_matrix_file=dict(argstr='--mtx %s', ), paired_stats=dict(argstr='--paired-%s', ), sign=dict(argstr='--%s', ), sort=dict(argstr='--sort', ), stats=dict(argstr='--%s', ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), vote=dict(argstr='--vote', ), ) inputs = Concatenate.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Concatenate_outputs(): output_map = dict(concatenated_file=dict(), ) outputs = Concatenate.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py000066400000000000000000000020231227300005300266110ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import DICOMConvert def test_DICOMConvert_inputs(): input_map = dict(args=dict(argstr='%s', ), base_output_dir=dict(mandatory=True, ), dicom_dir=dict(mandatory=True, ), dicom_info=dict(), environ=dict(nohash=True, usedefault=True, ), file_mapping=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), ignore_single_slice=dict(requires=['dicom_info'], ), out_type=dict(usedefault=True, ), seq_list=dict(requires=['dicom_info'], ), subject_dir_template=dict(usedefault=True, ), subject_id=dict(), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DICOMConvert.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py000066400000000000000000000022141227300005300305210ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import ExtractMainComponent def test_ExtractMainComponent_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), out_file=dict(argstr='%s', name_source='in_file', name_template='%s.maincmp', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ExtractMainComponent.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ExtractMainComponent_outputs(): output_map = dict(out_file=dict(), ) outputs = ExtractMainComponent.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py000066400000000000000000000012241227300005300262260ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.base import FSCommand def test_FSCommand_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = FSCommand.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py000066400000000000000000000023271227300005300265520ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import FitMSParams def test_FitMSParams_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), flip_list=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=-2, ), out_dir=dict(argstr='%s', genfile=True, position=-1, ), subjects_dir=dict(), te_list=dict(), terminal_output=dict(mandatory=True, nohash=True, ), tr_list=dict(), xfm_list=dict(), ) inputs = FitMSParams.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FitMSParams_outputs(): output_map = dict(pd_image=dict(), t1_image=dict(), t2star_image=dict(), ) outputs = FitMSParams.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py000066400000000000000000000104041227300005300255010ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import GLMFit def test_GLMFit_inputs(): input_map = dict(allow_ill_cond=dict(argstr='--illcond', ), allow_repeated_subjects=dict(argstr='--allowsubjrep', ), args=dict(argstr='%s', ), calc_AR1=dict(argstr='--tar1', ), check_opts=dict(argstr='--checkopts', ), compute_log_y=dict(argstr='--logy', ), contrast=dict(argstr='--C %s...', ), cortex=dict(argstr='--cortex', xor=['label_file'], ), debug=dict(argstr='--debug', ), design=dict(argstr='--X %s', xor=('fsgd', 'design', 'one_sample'), ), diag=dict(), diag_cluster=dict(argstr='--diag-cluster', ), environ=dict(nohash=True, usedefault=True, ), fixed_fx_dof=dict(argstr='--ffxdof %d', xor=['fixed_fx_dof_file'], ), fixed_fx_dof_file=dict(argstr='--ffxdofdat %d', xor=['fixed_fx_dof'], ), fixed_fx_var=dict(argstr='--yffxvar %s', ), force_perm=dict(argstr='--perm-force', ), fsgd=dict(argstr='--fsgd %s %s', xor=('fsgd', 'design', 'one_sample'), ), fwhm=dict(argstr='--fwhm %f', ), glm_dir=dict(argstr='--glmdir %s', genfile=True, ), hemi=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--y %s', copyfile=False, mandatory=True, ), invert_mask=dict(argstr='--mask-inv', ), label_file=dict(argstr='--label %s', xor=['cortex'], ), mask_file=dict(argstr='--mask %s', ), no_contrast_sok=dict(argstr='--no-contrasts-ok', ), no_est_fwhm=dict(argstr='--no-est-fwhm', ), no_mask_smooth=dict(argstr='--no-mask-smooth', ), no_prune=dict(argstr='--no-prune', xor=['prunethresh'], ), one_sample=dict(argstr='--osgm', xor=('one_sample', 'fsgd', 'design', 'contrast'), ), pca=dict(argstr='--pca', ), per_voxel_reg=dict(argstr='--pvr %s...', ), profile=dict(argstr='--profile %d', ), prune=dict(argstr='--prune', ), prune_thresh=dict(argstr='--prune_thr %f', xor=['noprune'], ), resynth_test=dict(argstr='--resynthtest %d', ), save_cond=dict(argstr='--save-cond', ), save_estimate=dict(argstr='--yhat-save', ), save_res_corr_mtx=dict(argstr='--eres-scm', ), save_residual=dict(argstr='--eres-save', ), seed=dict(argstr='--seed %d', ), self_reg=dict(argstr='--selfreg %d %d %d', ), sim_done_file=dict(argstr='--sim-done %s', ), sim_sign=dict(argstr='--sim-sign %s', ), simulation=dict(argstr='--sim %s %d %f %s', ), subject_id=dict(), subjects_dir=dict(), surf=dict(argstr='--surf %s %s %s', requires=['subject_id', 'hemi'], ), surf_geo=dict(usedefault=True, ), synth=dict(argstr='--synth', ), terminal_output=dict(mandatory=True, nohash=True, ), uniform=dict(argstr='--uniform %f %f', ), var_fwhm=dict(argstr='--var-fwhm %f', ), vox_dump=dict(argstr='--voxdump %d %d %d', ), weight_file=dict(xor=['weighted_ls'], ), weight_inv=dict(argstr='--w-inv', xor=['weighted_ls'], ), weight_sqrt=dict(argstr='--w-sqrt', xor=['weighted_ls'], ), weighted_ls=dict(argstr='--wls %s', xor=('weight_file', 'weight_inv', 'weight_sqrt'), ), ) inputs = GLMFit.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GLMFit_outputs(): output_map = dict(beta_file=dict(), dof_file=dict(), error_file=dict(), error_stddev_file=dict(), error_var_file=dict(), estimate_file=dict(), frame_eigenvectors=dict(), ftest_file=dict(), fwhm_file=dict(), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), mask_file=dict(), sig_file=dict(), singular_values=dict(), spatial_eigenvectors=dict(), svd_stats_file=dict(), ) outputs = GLMFit.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py000066400000000000000000000022611227300005300262570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import ImageInfo def test_ImageInfo_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', position=1, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ImageInfo.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ImageInfo_outputs(): output_map = dict(TE=dict(), TI=dict(), TR=dict(), data_type=dict(), dimensions=dict(), file_format=dict(), info=dict(), orientation=dict(), out_file=dict(), ph_enc_dir=dict(), vox_sizes=dict(), ) outputs = ImageInfo.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py000066400000000000000000000047721227300005300262140ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import Label2Vol def test_Label2Vol_inputs(): input_map = dict(annot_file=dict(argstr='--annot %s', copyfile=False, mandatory=True, requires=('subject_id', 'hemi'), xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), ), aparc_aseg=dict(argstr='--aparc+aseg', mandatory=True, xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fill_thresh=dict(argstr='--fillthresh %.f', ), hemi=dict(argstr='--hemi %s', ), identity=dict(argstr='--identity', xor=('reg_file', 'reg_header', 'identity'), ), ignore_exception=dict(nohash=True, usedefault=True, ), invert_mtx=dict(argstr='--invertmtx', ), label_file=dict(argstr='--label %s...', copyfile=False, mandatory=True, xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), ), label_hit_file=dict(argstr='--hits %s', ), label_voxel_volume=dict(argstr='--labvoxvol %f', ), map_label_stat=dict(argstr='--label-stat %s', ), native_vox2ras=dict(argstr='--native-vox2ras', ), proj=dict(argstr='--proj %s %f %f %f', requires=('subject_id', 'hemi'), ), reg_file=dict(argstr='--reg %s', xor=('reg_file', 'reg_header', 'identity'), ), reg_header=dict(argstr='--regheader %s', xor=('reg_file', 'reg_header', 'identity'), ), seg_file=dict(argstr='--seg %s', copyfile=False, mandatory=True, xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), ), subject_id=dict(argstr='--subject %s', ), subjects_dir=dict(), surface=dict(argstr='--surf %s', ), template_file=dict(argstr='--temp %s', mandatory=True, ), terminal_output=dict(mandatory=True, nohash=True, ), vol_label_file=dict(argstr='--o %s', genfile=True, ), ) inputs = Label2Vol.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Label2Vol_outputs(): output_map = dict(vol_label_file=dict(), ) outputs = Label2Vol.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py000066400000000000000000000121571227300005300264160ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import MRIConvert def test_MRIConvert_inputs(): input_map = dict(apply_inv_transform=dict(argstr='--apply_inverse_transform %s', ), apply_transform=dict(argstr='--apply_transform %s', ), args=dict(argstr='%s', ), ascii=dict(argstr='--ascii', ), autoalign_matrix=dict(argstr='--autoalign %s', ), color_file=dict(argstr='--color_file %s', ), conform=dict(argstr='--conform', ), conform_min=dict(argstr='--conform_min', ), conform_size=dict(argstr='--conform_size %s', ), crop_center=dict(argstr='--crop %d %d %d', ), crop_gdf=dict(argstr='--crop_gdf', ), crop_size=dict(argstr='--cropsize %d %d %d', ), cut_ends=dict(argstr='--cutends %d', ), devolve_transform=dict(argstr='--devolvexfm %s', ), drop_n=dict(argstr='--ndrop %d', ), environ=dict(nohash=True, usedefault=True, ), fill_parcellation=dict(argstr='--fill_parcellation', ), force_ras=dict(argstr='--force_ras_good', ), frame=dict(argstr='--frame %d', ), frame_subsample=dict(argstr='--fsubsample %d %d %d', ), fwhm=dict(argstr='--fwhm %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_center=dict(argstr='--in_center %s', ), in_file=dict(argstr='--input_volume %s', mandatory=True, position=-2, ), in_i_dir=dict(argstr='--in_i_direction %f %f %f', ), in_i_size=dict(argstr='--in_i_size %d', ), in_info=dict(argstr='--in_info', ), in_j_dir=dict(argstr='--in_j_direction %f %f %f', ), in_j_size=dict(argstr='--in_j_size %d', ), in_k_dir=dict(argstr='--in_k_direction %f %f %f', ), in_k_size=dict(argstr='--in_k_size %d', ), in_like=dict(argstr='--in_like %s', ), in_matrix=dict(argstr='--in_matrix', ), in_orientation=dict(argstr='--in_orientation %s', ), in_scale=dict(argstr='--scale %f', ), in_stats=dict(argstr='--in_stats', ), in_type=dict(argstr='--in_type %s', ), invert_contrast=dict(argstr='--invert_contrast %f', ), midframe=dict(argstr='--mid-frame', ), no_change=dict(argstr='--nochange', ), no_scale=dict(argstr='--no_scale 1', ), no_translate=dict(argstr='--no_translate', ), no_write=dict(argstr='--no_write', ), out_center=dict(argstr='--out_center %f %f %f', ), out_datatype=dict(argstr='--out_data_type %s', ), out_file=dict(argstr='--output_volume %s', genfile=True, position=-1, ), out_i_count=dict(argstr='--out_i_count %d', ), out_i_dir=dict(argstr='--out_i_direction %f %f %f', ), out_i_size=dict(argstr='--out_i_size %d', ), out_info=dict(argstr='--out_info', ), out_j_count=dict(argstr='--out_j_count %d', ), out_j_dir=dict(argstr='--out_j_direction %f %f %f', ), out_j_size=dict(argstr='--out_j_size %d', ), out_k_count=dict(argstr='--out_k_count %d', ), out_k_dir=dict(argstr='--out_k_direction %f %f %f', ), out_k_size=dict(argstr='--out_k_size %d', ), out_matrix=dict(argstr='--out_matrix', ), out_orientation=dict(argstr='--out_orientation %s', ), out_scale=dict(argstr='--out-scale %d', ), out_stats=dict(argstr='--out_stats', ), out_type=dict(argstr='--out_type %s', ), parse_only=dict(argstr='--parse_only', ), read_only=dict(argstr='--read_only', ), reorder=dict(argstr='--reorder %d %d %d', ), resample_type=dict(argstr='--resample_type %s', ), reslice_like=dict(argstr='--reslice_like %s', ), sdcm_list=dict(argstr='--sdcmlist %s', ), skip_n=dict(argstr='--nskip %d', ), slice_bias=dict(argstr='--slice-bias %f', ), slice_crop=dict(argstr='--slice-crop %d %d', ), slice_reverse=dict(argstr='--slice-reverse', ), smooth_parcellation=dict(argstr='--smooth_parcellation', ), sphinx=dict(argstr='--sphinx', ), split=dict(argstr='--split', ), status_file=dict(argstr='--status %s', ), subject_name=dict(argstr='--subject_name %s', ), subjects_dir=dict(), template_info=dict(), template_type=dict(argstr='--template_type %s', ), terminal_output=dict(mandatory=True, nohash=True, ), unwarp_gradient=dict(argstr='--unwarp_gradient_nonlinearity', ), vox_size=dict(argstr='-voxsize %f %f %f', ), zero_ge_z_offset=dict(argstr='--zero_ge_z_offset', ), zero_outlines=dict(argstr='--zero_outlines', ), ) inputs = MRIConvert.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRIConvert_outputs(): output_map = dict(out_file=dict(), ) outputs = MRIConvert.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py000066400000000000000000000024151227300005300275040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import MRIMarchingCubes def test_MRIMarchingCubes_inputs(): input_map = dict(args=dict(argstr='%s', ), connectivity_value=dict(argstr='%d', position=-1, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), label_value=dict(argstr='%d', mandatory=True, position=2, ), out_file=dict(argstr='./%s', genfile=True, position=-2, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MRIMarchingCubes.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRIMarchingCubes_outputs(): output_map = dict(surface=dict(), ) outputs = MRIMarchingCubes.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py000066400000000000000000000044361227300005300265340ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import MRISPreproc def test_MRISPreproc_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fsgd_file=dict(argstr='--fsgd %s', xor=('subjects', 'fsgd_file', 'subject_file'), ), fwhm=dict(argstr='--fwhm %f', xor=['num_iters'], ), fwhm_source=dict(argstr='--fwhm-src %f', xor=['num_iters_source'], ), hemi=dict(argstr='--hemi %s', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), num_iters=dict(argstr='--niters %d', xor=['fwhm'], ), num_iters_source=dict(argstr='--niterssrc %d', xor=['fwhm_source'], ), out_file=dict(argstr='--out %s', genfile=True, ), proj_frac=dict(argstr='--projfrac %s', ), smooth_cortex_only=dict(argstr='--smooth-cortex-only', ), source_format=dict(argstr='--srcfmt %s', ), subject_file=dict(argstr='--f %s', xor=('subjects', 'fsgd_file', 'subject_file'), ), subjects=dict(argstr='--s %s...', xor=('subjects', 'fsgd_file', 'subject_file'), ), subjects_dir=dict(), surf_area=dict(argstr='--area %s', xor=('surf_measure', 'surf_measure_file', 'surf_area'), ), surf_dir=dict(argstr='--surfdir %s', ), surf_measure=dict(argstr='--meas %s', xor=('surf_measure', 'surf_measure_file', 'surf_area'), ), surf_measure_file=dict(argstr='--is %s...', xor=('surf_measure', 'surf_measure_file', 'surf_area'), ), target=dict(argstr='--target %s', mandatory=True, ), terminal_output=dict(mandatory=True, nohash=True, ), vol_measure_file=dict(argstr='--iv %s %s...', ), ) inputs = MRISPreproc.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRISPreproc_outputs(): output_map = dict(out_file=dict(), ) outputs = MRISPreproc.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py000066400000000000000000000024221227300005300270750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import MRITessellate def test_MRITessellate_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-3, ), label_value=dict(argstr='%d', mandatory=True, position=-2, ), out_file=dict(argstr='./%s', genfile=True, position=-1, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), tesselate_all_voxels=dict(argstr='-a', ), use_real_RAS_coordinates=dict(argstr='-n', ), ) inputs = MRITessellate.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRITessellate_outputs(): output_map = dict(surface=dict(), ) outputs = MRITessellate.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py000066400000000000000000000034461227300005300266020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import MRIsConvert def test_MRIsConvert_inputs(): input_map = dict(annot_file=dict(argstr='--annot %s', ), args=dict(argstr='%s', ), dataarray_num=dict(argstr='--da_num %d', ), environ=dict(nohash=True, usedefault=True, ), functional_file=dict(argstr='-f %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), label_file=dict(argstr='--label %s', ), labelstats_outfile=dict(argstr='--labelstats %s', ), normal=dict(argstr='-n', ), origname=dict(argstr='-o %s', ), out_datatype=dict(mandatory=True, ), out_file=dict(argstr='./%s', genfile=True, position=-1, ), parcstats_file=dict(argstr='--parcstats %s', ), patch=dict(argstr='-p', ), rescale=dict(argstr='-r', ), scalarcurv_file=dict(argstr='-c %s', ), scale=dict(argstr='-s %.3f', ), subjects_dir=dict(), talairachxfm_subjid=dict(argstr='-t %s', ), terminal_output=dict(mandatory=True, nohash=True, ), vertex=dict(argstr='-v', ), xyz_ascii=dict(argstr='-a', ), ) inputs = MRIsConvert.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRIsConvert_outputs(): output_map = dict(converted=dict(), ) outputs = MRIsConvert.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py000066400000000000000000000027241227300005300254240ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import MS_LDA def test_MS_LDA_inputs(): input_map = dict(args=dict(argstr='%s', ), conform=dict(argstr='-conform', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), images=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), label_file=dict(argstr='-label %s', ), lda_labels=dict(argstr='-lda %s', mandatory=True, sep=' ', ), mask_file=dict(argstr='-mask %s', ), shift=dict(argstr='-shift %d', ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), use_weights=dict(argstr='-W', ), vol_synth_file=dict(argstr='-synth %s', mandatory=True, ), weight_file=dict(argstr='-weight %s', mandatory=True, ), ) inputs = MS_LDA.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MS_LDA_outputs(): output_map = dict(vol_synth_file=dict(), weight_file=dict(), ) outputs = MS_LDA.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py000066400000000000000000000022041227300005300301060ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import MakeAverageSubject def test_MakeAverageSubject_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), out_name=dict(argstr='--out %s', usedefault=True, ), subjects_dir=dict(), subjects_ids=dict(argstr='--subjects %s', mandatory=True, sep=' ', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MakeAverageSubject.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MakeAverageSubject_outputs(): output_map = dict(average_subject_name=dict(), ) outputs = MakeAverageSubject.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py000066400000000000000000000104541227300005300272730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import OneSampleTTest def test_OneSampleTTest_inputs(): input_map = dict(allow_ill_cond=dict(argstr='--illcond', ), allow_repeated_subjects=dict(argstr='--allowsubjrep', ), args=dict(argstr='%s', ), calc_AR1=dict(argstr='--tar1', ), check_opts=dict(argstr='--checkopts', ), compute_log_y=dict(argstr='--logy', ), contrast=dict(argstr='--C %s...', ), cortex=dict(argstr='--cortex', xor=['label_file'], ), debug=dict(argstr='--debug', ), design=dict(argstr='--X %s', xor=('fsgd', 'design', 'one_sample'), ), diag=dict(), diag_cluster=dict(argstr='--diag-cluster', ), environ=dict(nohash=True, usedefault=True, ), fixed_fx_dof=dict(argstr='--ffxdof %d', xor=['fixed_fx_dof_file'], ), fixed_fx_dof_file=dict(argstr='--ffxdofdat %d', xor=['fixed_fx_dof'], ), fixed_fx_var=dict(argstr='--yffxvar %s', ), force_perm=dict(argstr='--perm-force', ), fsgd=dict(argstr='--fsgd %s %s', xor=('fsgd', 'design', 'one_sample'), ), fwhm=dict(argstr='--fwhm %f', ), glm_dir=dict(argstr='--glmdir %s', genfile=True, ), hemi=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--y %s', copyfile=False, mandatory=True, ), invert_mask=dict(argstr='--mask-inv', ), label_file=dict(argstr='--label %s', xor=['cortex'], ), mask_file=dict(argstr='--mask %s', ), no_contrast_sok=dict(argstr='--no-contrasts-ok', ), no_est_fwhm=dict(argstr='--no-est-fwhm', ), no_mask_smooth=dict(argstr='--no-mask-smooth', ), no_prune=dict(argstr='--no-prune', xor=['prunethresh'], ), one_sample=dict(argstr='--osgm', xor=('one_sample', 'fsgd', 'design', 'contrast'), ), pca=dict(argstr='--pca', ), per_voxel_reg=dict(argstr='--pvr %s...', ), profile=dict(argstr='--profile %d', ), prune=dict(argstr='--prune', ), prune_thresh=dict(argstr='--prune_thr %f', xor=['noprune'], ), resynth_test=dict(argstr='--resynthtest %d', ), save_cond=dict(argstr='--save-cond', ), save_estimate=dict(argstr='--yhat-save', ), save_res_corr_mtx=dict(argstr='--eres-scm', ), save_residual=dict(argstr='--eres-save', ), seed=dict(argstr='--seed %d', ), self_reg=dict(argstr='--selfreg %d %d %d', ), sim_done_file=dict(argstr='--sim-done %s', ), sim_sign=dict(argstr='--sim-sign %s', ), simulation=dict(argstr='--sim %s %d %f %s', ), subject_id=dict(), subjects_dir=dict(), surf=dict(argstr='--surf %s %s %s', requires=['subject_id', 'hemi'], ), surf_geo=dict(usedefault=True, ), synth=dict(argstr='--synth', ), terminal_output=dict(mandatory=True, nohash=True, ), uniform=dict(argstr='--uniform %f %f', ), var_fwhm=dict(argstr='--var-fwhm %f', ), vox_dump=dict(argstr='--voxdump %d %d %d', ), weight_file=dict(xor=['weighted_ls'], ), weight_inv=dict(argstr='--w-inv', xor=['weighted_ls'], ), weight_sqrt=dict(argstr='--w-sqrt', xor=['weighted_ls'], ), weighted_ls=dict(argstr='--wls %s', xor=('weight_file', 'weight_inv', 'weight_sqrt'), ), ) inputs = OneSampleTTest.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_OneSampleTTest_outputs(): output_map = dict(beta_file=dict(), dof_file=dict(), error_file=dict(), error_stddev_file=dict(), error_var_file=dict(), estimate_file=dict(), frame_eigenvectors=dict(), ftest_file=dict(), fwhm_file=dict(), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), mask_file=dict(), sig_file=dict(), singular_values=dict(), spatial_eigenvectors=dict(), svd_stats_file=dict(), ) outputs = OneSampleTTest.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py000066400000000000000000000022711227300005300267070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import ParseDICOMDir def test_ParseDICOMDir_inputs(): input_map = dict(args=dict(argstr='%s', ), dicom_dir=dict(argstr='--d %s', mandatory=True, ), dicom_info_file=dict(argstr='--o %s', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), sortbyrun=dict(argstr='--sortbyrun', ), subjects_dir=dict(), summarize=dict(argstr='--summarize', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ParseDICOMDir.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ParseDICOMDir_outputs(): output_map = dict(dicom_info_file=dict(), ) outputs = ParseDICOMDir.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py000066400000000000000000000054201227300005300261200ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import ReconAll def test_ReconAll_inputs(): input_map = dict(T1_files=dict(argstr='-i %s...', ), T2_file=dict(argstr='-T2 %s', min_ver='5.3.0', ), args=dict(argstr='%s', ), directive=dict(argstr='-%s', position=0, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), flags=dict(argstr='%s', ), hemi=dict(argstr='-hemi %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), openmp=dict(argstr='-openmp %d', ), subject_id=dict(argstr='-subjid %s', usedefault=True, ), subjects_dir=dict(argstr='-sd %s', genfile=True, hash_files=False, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ReconAll.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ReconAll_outputs(): output_map = dict(BA_stats=dict(altkey='BA', loc='stats', ), T1=dict(loc='mri', ), annot=dict(altkey='*annot', loc='label', ), aparc_a2009s_stats=dict(altkey='aparc.a2009s', loc='stats', ), aparc_aseg=dict(altkey='aparc*aseg', loc='mri', ), aparc_stats=dict(altkey='aparc', loc='stats', ), aseg=dict(loc='mri', ), aseg_stats=dict(altkey='aseg', loc='stats', ), brain=dict(loc='mri', ), brainmask=dict(loc='mri', ), curv=dict(loc='surf', ), curv_stats=dict(altkey='curv', loc='stats', ), entorhinal_exvivo_stats=dict(altkey='entorhinal_exvivo', loc='stats', ), filled=dict(loc='mri', ), inflated=dict(loc='surf', ), label=dict(altkey='*label', loc='label', ), norm=dict(loc='mri', ), nu=dict(loc='mri', ), orig=dict(loc='mri', ), pial=dict(loc='surf', ), rawavg=dict(loc='mri', ), ribbon=dict(altkey='*ribbon', loc='mri', ), smoothwm=dict(loc='surf', ), sphere=dict(loc='surf', ), sphere_reg=dict(altkey='sphere.reg', loc='surf', ), subject_id=dict(), subjects_dir=dict(), sulc=dict(loc='surf', ), thickness=dict(loc='surf', ), volume=dict(loc='surf', ), white=dict(loc='surf', ), wm=dict(loc='mri', ), wmparc=dict(loc='mri', ), wmparc_stats=dict(altkey='wmparc', loc='stats', ), ) outputs = ReconAll.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_Resample.py000066400000000000000000000022451227300005300261730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import Resample def test_Resample_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=-2, ), resampled_file=dict(argstr='-o %s', genfile=True, position=-1, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), voxel_size=dict(argstr='-vs %.2f %.2f %.2f', mandatory=True, ), ) inputs = Resample.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Resample_outputs(): output_map = dict(resampled_file=dict(), ) outputs = Resample.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py000066400000000000000000000053421227300005300274070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import RobustRegister def test_RobustRegister_inputs(): input_map = dict(args=dict(argstr='%s', ), auto_sens=dict(argstr='--satit', mandatory=True, xor=['outlier_sens'], ), environ=dict(nohash=True, usedefault=True, ), est_int_scale=dict(argstr='--iscale', ), force_double=dict(argstr='--doubleprec', ), force_float=dict(argstr='--floattype', ), half_source=dict(argstr='--halfmov %s', ), half_source_xfm=dict(argstr='--halfmovlta %s', ), half_targ=dict(argstr='--halfdst %s', ), half_targ_xfm=dict(argstr='--halfdstlta %s', ), half_weights=dict(argstr='--halfweights %s', ), high_iterations=dict(argstr='--highit %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_xfm_file=dict(argstr='--transform', ), init_orient=dict(argstr='--initorient', ), iteration_thresh=dict(argstr='--epsit %.3f', ), least_squares=dict(argstr='--leastsquares', ), mask_source=dict(argstr='--maskmov %s', ), mask_target=dict(argstr='--maskdst %s', ), max_iterations=dict(argstr='--maxit %d', ), no_init=dict(argstr='--noinit', ), no_multi=dict(argstr='--nomulti', ), out_reg_file=dict(argstr='--lta %s', genfile=True, ), outlier_limit=dict(argstr='--wlimit %.3f', ), outlier_sens=dict(argstr='--sat %.4f', mandatory=True, xor=['auto_sens'], ), registered_file=dict(argstr='--warp %s', ), source_file=dict(argstr='--mov %s', mandatory=True, ), subjects_dir=dict(), subsample_thresh=dict(argstr='--subsample %d', ), target_file=dict(argstr='--dst %s', mandatory=True, ), terminal_output=dict(mandatory=True, nohash=True, ), trans_only=dict(argstr='--transonly', ), weights_file=dict(argstr='--weights %s', ), write_vo2vox=dict(argstr='--vox2vox', ), ) inputs = RobustRegister.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_RobustRegister_outputs(): output_map = dict(half_source=dict(), half_source_xfm=dict(), half_targ=dict(), half_targ_xfm=dict(), half_weights=dict(), out_reg_file=dict(), registered_file=dict(), weights_file=dict(), ) outputs = RobustRegister.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py000066400000000000000000000063251227300005300274630ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import SampleToSurface def test_SampleToSurface_inputs(): input_map = dict(apply_rot=dict(argstr='--rot %.3f %.3f %.3f', ), apply_trans=dict(argstr='--trans %.3f %.3f %.3f', ), args=dict(argstr='%s', ), cortex_mask=dict(argstr='--cortex', xor=['mask_label'], ), environ=dict(nohash=True, usedefault=True, ), fix_tk_reg=dict(argstr='--fixtkreg', ), float2int_method=dict(argstr='--float2int %s', ), frame=dict(argstr='--frame %d', ), hemi=dict(argstr='--hemi %s', mandatory=True, ), hits_file=dict(argstr='--srchit %s', ), hits_type=dict(argstr='--srchit_type', ), ico_order=dict(argstr='--icoorder %d', requires=['target_subject'], ), ignore_exception=dict(nohash=True, usedefault=True, ), interp_method=dict(argstr='--interp %s', ), mask_label=dict(argstr='--mask %s', xor=['cortex_mask'], ), mni152reg=dict(argstr='--mni152reg', mandatory=True, xor=['reg_file', 'reg_header', 'mni152reg'], ), no_reshape=dict(argstr='--noreshape', xor=['reshape'], ), out_file=dict(argstr='--o %s', genfile=True, ), out_type=dict(argstr='--out_type %s', ), override_reg_subj=dict(argstr='--srcsubject %s', requires=['subject_id'], ), projection_stem=dict(mandatory=True, xor=['sampling_method'], ), reference_file=dict(argstr='--ref %s', ), reg_file=dict(argstr='--reg %s', mandatory=True, xor=['reg_file', 'reg_header', 'mni152reg'], ), reg_header=dict(argstr='--regheader %s', mandatory=True, requires=['subject_id'], xor=['reg_file', 'reg_header', 'mni152reg'], ), reshape=dict(argstr='--reshape', xor=['no_reshape'], ), reshape_slices=dict(argstr='--rf %d', ), sampling_method=dict(argstr='%s', mandatory=True, requires=['sampling_range', 'sampling_units'], xor=['projection_stem'], ), sampling_range=dict(), sampling_units=dict(), scale_input=dict(argstr='--scale %.3f', ), smooth_surf=dict(argstr='--surf-fwhm %.3f', ), smooth_vol=dict(argstr='--fwhm %.3f', ), source_file=dict(argstr='--mov %s', mandatory=True, ), subject_id=dict(), subjects_dir=dict(), surf_reg=dict(argstr='--surfreg', requires=['target_subject'], ), surface=dict(argstr='--surf %s', ), target_subject=dict(argstr='--trgsubject %s', ), terminal_output=dict(mandatory=True, nohash=True, ), vox_file=dict(argstr='--nvox %s', ), ) inputs = SampleToSurface.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SampleToSurface_outputs(): output_map = dict(hits_file=dict(), out_file=dict(), vox_file=dict(), ) outputs = SampleToSurface.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py000066400000000000000000000056641227300005300261700ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.model import SegStats def test_SegStats_inputs(): input_map = dict(annot=dict(argstr='--annot %s %s %s', mandatory=True, xor=('segmentation_file', 'annot', 'surf_label'), ), args=dict(argstr='%s', ), avgwf_file=dict(argstr='--avgwfvol %s', ), avgwf_txt_file=dict(argstr='--avgwf %s', ), brain_vol=dict(), calc_power=dict(argstr='--%s', ), calc_snr=dict(argstr='--snr', ), color_table_file=dict(argstr='--ctab %s', xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), cortex_vol_from_surf=dict(argstr='--surf-ctx-vol', ), default_color_table=dict(argstr='--ctab-default', xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), environ=dict(nohash=True, usedefault=True, ), etiv=dict(argstr='--etiv', ), etiv_only=dict(), exclude_ctx_gm_wm=dict(argstr='--excl-ctxgmwm', ), exclude_id=dict(argstr='--excludeid %d', ), frame=dict(argstr='--frame %d', ), gca_color_table=dict(argstr='--ctab-gca %s', xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', ), mask_erode=dict(argstr='--maskerode %d', ), mask_file=dict(argstr='--mask %s', ), mask_frame=dict(requires=['mask_file'], ), mask_invert=dict(argstr='--maskinvert', ), mask_sign=dict(), mask_thresh=dict(argstr='--maskthresh %f', ), multiply=dict(argstr='--mul %f', ), non_empty_only=dict(argstr='--nonempty', ), partial_volume_file=dict(argstr='--pv %f', ), segment_id=dict(argstr='--id %s...', ), segmentation_file=dict(argstr='--seg %s', mandatory=True, xor=('segmentation_file', 'annot', 'surf_label'), ), sf_avg_file=dict(argstr='--sfavg %s', ), subjects_dir=dict(), summary_file=dict(argstr='--sum %s', genfile=True, ), surf_label=dict(argstr='--slabel %s %s %s', mandatory=True, xor=('segmentation_file', 'annot', 'surf_label'), ), terminal_output=dict(mandatory=True, nohash=True, ), vox=dict(argstr='--vox %s', ), wm_vol_from_surf=dict(argstr='--surf-wm-vol', ), ) inputs = SegStats.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SegStats_outputs(): output_map = dict(avgwf_file=dict(), avgwf_txt_file=dict(), sf_avg_file=dict(), summary_file=dict(), ) outputs = SegStats.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py000066400000000000000000000030351227300005300256720ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import Smooth def test_Smooth_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', mandatory=True, ), num_iters=dict(argstr='--niters %d', mandatory=True, xor=['surface_fwhm'], ), proj_frac=dict(argstr='--projfrac %s', xor=['proj_frac_avg'], ), proj_frac_avg=dict(argstr='--projfrac-avg %.2f %.2f %.2f', xor=['proj_frac'], ), reg_file=dict(argstr='--reg %s', mandatory=True, ), smoothed_file=dict(argstr='--o %s', genfile=True, ), subjects_dir=dict(), surface_fwhm=dict(argstr='--fwhm %f', mandatory=True, requires=['reg_file'], xor=['num_iters'], ), terminal_output=dict(mandatory=True, nohash=True, ), vol_fwhm=dict(argstr='--vol-fwhm %f', ), ) inputs = Smooth.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Smooth_outputs(): output_map = dict(smoothed_file=dict(), ) outputs = Smooth.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py000066400000000000000000000035011227300005300302570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import SmoothTessellation def test_SmoothTessellation_inputs(): input_map = dict(args=dict(argstr='%s', ), curvature_averaging_iterations=dict(argstr='-a %d', position=-1, usedefault=True, ), disable_estimates=dict(argstr='-nw', ), environ=dict(nohash=True, usedefault=True, ), gaussian_curvature_norm_steps=dict(argstr='%d ', position=4, ), gaussian_curvature_smoothing_steps=dict(argstr='%d', position=5, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), normalize_area=dict(argstr='-area', ), out_area_file=dict(argstr='-b %s', ), out_curvature_file=dict(argstr='-c %s', ), out_file=dict(argstr='%s', genfile=True, position=2, ), smoothing_iterations=dict(argstr='-n %d', position=-2, usedefault=True, ), snapshot_writing_iterations=dict(argstr='-w %d', ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), use_gaussian_curvature_smoothing=dict(argstr='-g', position=3, ), use_momentum=dict(argstr='-m', ), ) inputs = SmoothTessellation.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SmoothTessellation_outputs(): output_map = dict(surface=dict(), ) outputs = SmoothTessellation.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py000066400000000000000000000035041227300005300304510ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import Surface2VolTransform def test_Surface2VolTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), hemi=dict(argstr='--hemi %s', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mkmask=dict(argstr='--mkmask', ), projfrac=dict(argstr='--projfrac %s', ), reg_file=dict(argstr='--volreg %s', mandatory=True, xor=['subject_id'], ), source_file=dict(argstr='--surfval %s', copyfile=False, mandatory=True, ), subject_id=dict(argstr='--identity %s', xor=['reg_file'], ), subjects_dir=dict(argstr='--sd %s', ), surf_name=dict(argstr='--surf %s', ), template_file=dict(argstr='--template %s', ), terminal_output=dict(mandatory=True, nohash=True, ), transformed_file=dict(argstr='--outvol %s', hash_files=False, name_source=['source_file'], name_template='%s_asVol.nii', ), vertexvol_file=dict(argstr='--vtxvol %s', hash_files=False, name_source=['source_file'], name_template='%s_asVol_vertex.nii', ), ) inputs = Surface2VolTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Surface2VolTransform_outputs(): output_map = dict(transformed_file=dict(), vertexvol_file=dict(), ) outputs = Surface2VolTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py000066400000000000000000000026711227300005300272100ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import SurfaceSmooth def test_SurfaceSmooth_inputs(): input_map = dict(args=dict(argstr='%s', ), cortex=dict(argstr='--cortex', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), fwhm=dict(argstr='--fwhm %.4f', xor=['smooth_iters'], ), hemi=dict(argstr='--hemi %s', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--sval %s', mandatory=True, ), out_file=dict(argstr='--tval %s', genfile=True, ), reshape=dict(argstr='--reshape', ), smooth_iters=dict(argstr='--smooth %d', xor=['fwhm'], ), subject_id=dict(argstr='--s %s', mandatory=True, ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SurfaceSmooth.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SurfaceSmooth_outputs(): output_map = dict(out_file=dict(), ) outputs = SurfaceSmooth.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py000066400000000000000000000056261227300005300277240ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import SurfaceSnapshots def test_SurfaceSnapshots_inputs(): input_map = dict(annot_file=dict(argstr='-annotation %s', xor=['annot_name'], ), annot_name=dict(argstr='-annotation %s', xor=['annot_file'], ), args=dict(argstr='%s', ), colortable=dict(argstr='-colortable %s', ), demean_overlay=dict(argstr='-zm', ), environ=dict(nohash=True, usedefault=True, ), hemi=dict(argstr='%s', mandatory=True, position=2, ), identity_reg=dict(argstr='-overlay-reg-identity', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), ignore_exception=dict(nohash=True, usedefault=True, ), invert_overlay=dict(argstr='-invphaseflag 1', ), label_file=dict(argstr='-label %s', xor=['label_name'], ), label_name=dict(argstr='-label %s', xor=['label_file'], ), label_outline=dict(argstr='-label-outline', ), label_under=dict(argstr='-labels-under', ), mni152_reg=dict(argstr='-mni152reg', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), orig_suffix=dict(argstr='-orig %s', ), overlay=dict(argstr='-overlay %s', requires=['overlay_range'], ), overlay_range=dict(argstr='%s', ), overlay_range_offset=dict(argstr='-foffset %.3f', ), overlay_reg=dict(argstr='-overlay-reg %s', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), patch_file=dict(argstr='-patch %s', ), reverse_overlay=dict(argstr='-revphaseflag 1', ), screenshot_stem=dict(), show_color_scale=dict(argstr='-colscalebarflag 1', ), show_color_text=dict(argstr='-colscaletext 1', ), show_curv=dict(argstr='-curv', xor=['show_gray_curv'], ), show_gray_curv=dict(argstr='-gray', xor=['show_curv'], ), six_images=dict(), sphere_suffix=dict(argstr='-sphere %s', ), stem_template_args=dict(requires=['screenshot_stem'], ), subject_id=dict(argstr='%s', mandatory=True, position=1, ), subjects_dir=dict(), surface=dict(argstr='%s', mandatory=True, position=3, ), tcl_script=dict(argstr='%s', genfile=True, ), terminal_output=dict(mandatory=True, nohash=True, ), truncate_overlay=dict(argstr='-truncphaseflag 1', ), ) inputs = SurfaceSnapshots.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SurfaceSnapshots_outputs(): output_map = dict(snapshots=dict(), ) outputs = SurfaceSnapshots.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py000066400000000000000000000033311227300005300277040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import SurfaceTransform def test_SurfaceTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), hemi=dict(argstr='--hemi %s', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='--tval %s', genfile=True, ), reshape=dict(argstr='--reshape', ), reshape_factor=dict(argstr='--reshape-factor', ), source_annot_file=dict(argstr='--sval-annot %s', mandatory=True, xor=['source_file'], ), source_file=dict(argstr='--sval %s', mandatory=True, xor=['source_annot_file'], ), source_subject=dict(argstr='--srcsubject %s', mandatory=True, ), source_type=dict(argstr='--sfmt %s', requires=['source_file'], ), subjects_dir=dict(), target_ico_order=dict(argstr='--trgicoorder %d', ), target_subject=dict(argstr='--trgsubject %s', mandatory=True, ), target_type=dict(argstr='--tfmt %s', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SurfaceTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SurfaceTransform_outputs(): output_map = dict(out_file=dict(), ) outputs = SurfaceTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py000066400000000000000000000026721227300005300273520ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import SynthesizeFLASH def test_SynthesizeFLASH_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fixed_weighting=dict(argstr='-w', position=1, ), flip_angle=dict(argstr='%.2f', mandatory=True, position=3, ), ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='%s', genfile=True, ), pd_image=dict(argstr='%s', mandatory=True, position=6, ), subjects_dir=dict(), t1_image=dict(argstr='%s', mandatory=True, position=5, ), te=dict(argstr='%.3f', mandatory=True, position=4, ), terminal_output=dict(mandatory=True, nohash=True, ), tr=dict(argstr='%.2f', mandatory=True, position=2, ), ) inputs = SynthesizeFLASH.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SynthesizeFLASH_outputs(): output_map = dict(out_file=dict(), ) outputs = SynthesizeFLASH.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py000066400000000000000000000026251227300005300272040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import UnpackSDICOMDir def test_UnpackSDICOMDir_inputs(): input_map = dict(args=dict(argstr='%s', ), config=dict(argstr='-cfg %s', mandatory=True, xor=('run_info', 'config', 'seq_config'), ), dir_structure=dict(argstr='-%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), log_file=dict(argstr='-log %s', ), no_info_dump=dict(argstr='-noinfodump', ), no_unpack_err=dict(argstr='-no-unpackerr', ), output_dir=dict(argstr='-targ %s', ), run_info=dict(argstr='-run %d %s %s %s', mandatory=True, xor=('run_info', 'config', 'seq_config'), ), scan_only=dict(argstr='-scanonly %s', ), seq_config=dict(argstr='-seqcfg %s', mandatory=True, xor=('run_info', 'config', 'seq_config'), ), source_dir=dict(argstr='-src %s', mandatory=True, ), spm_zeropad=dict(argstr='-nspmzeropad %d', ), subjects_dir=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = UnpackSDICOMDir.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_preprocess.py000066400000000000000000000101621227300005300255550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from shutil import rmtree import nibabel as nif import numpy as np from tempfile import mkdtemp from nipype.testing import (assert_equal, assert_false, assert_true, assert_raises, skipif) import nipype.interfaces.freesurfer as freesurfer def no_freesurfer(): if freesurfer.Info().version is None: return True else: return False def create_files_in_directory(): outdir = os.path.realpath(mkdtemp()) cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii','b.nii'] for f in filelist: hdr = nif.Nifti1Header() shape = (3,3,3,4) hdr.set_data_shape(shape) img = np.random.random(shape) nif.save(nif.Nifti1Image(img,np.eye(4),hdr), os.path.join(outdir,f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) @skipif(no_freesurfer) def test_robustregister(): filelist, outdir, cwd = create_files_in_directory() reg = freesurfer.RobustRegister() # make sure command gets called yield assert_equal, reg.cmd, 'mri_robust_register' # test raising error with mandatory args absent yield assert_raises, ValueError, reg.run # .inputs based parameters setting reg.inputs.source_file = filelist[0] reg.inputs.target_file = filelist[1] reg.inputs.auto_sens = True yield assert_equal, reg.cmdline, ('mri_robust_register ' '--satit --lta %s_robustreg.lta --mov %s --dst %s'%(filelist[0][:-4],filelist[0],filelist[1])) # constructor based parameter setting reg2 = freesurfer.RobustRegister(source_file=filelist[0],target_file=filelist[1],outlier_sens=3.0, out_reg_file='foo.lta', half_targ=True) yield assert_equal, reg2.cmdline, ('mri_robust_register --halfdst %s_halfway.nii --lta foo.lta ' '--sat 3.0000 --mov %s --dst %s' %(os.path.join(outdir,filelist[1][:-4]),filelist[0],filelist[1])) clean_directory(outdir, cwd) @skipif(no_freesurfer) def test_fitmsparams(): filelist, outdir, cwd = create_files_in_directory() fit = freesurfer.FitMSParams() # make sure command gets called yield assert_equal, fit.cmd, 'mri_ms_fitparms' # test raising error with mandatory args absent yield assert_raises, ValueError, fit.run # .inputs based parameters setting fit.inputs.in_files = filelist fit.inputs.out_dir = outdir yield assert_equal, fit.cmdline, 'mri_ms_fitparms %s %s %s'%(filelist[0],filelist[1],outdir) # constructor based parameter setting fit2 = freesurfer.FitMSParams(in_files=filelist,te_list=[1.5,3.5],flip_list=[20,30],out_dir=outdir) yield assert_equal, fit2.cmdline, ('mri_ms_fitparms -te %.3f -fa %.1f %s -te %.3f -fa %.1f %s %s' %(1.500,20.0,filelist[0],3.500,30.0,filelist[1],outdir)) clean_directory(outdir, cwd) @skipif(no_freesurfer) def test_synthesizeflash(): filelist, outdir, cwd = create_files_in_directory() syn = freesurfer.SynthesizeFLASH() # make sure command gets called yield assert_equal, syn.cmd, 'mri_synthesize' # test raising error with mandatory args absent yield assert_raises, ValueError, syn.run # .inputs based parameters setting syn.inputs.t1_image = filelist[0] syn.inputs.pd_image = filelist[1] syn.inputs.flip_angle = 30 syn.inputs.te = 4.5 syn.inputs.tr = 20 yield assert_equal, syn.cmdline, ('mri_synthesize 20.00 30.00 4.500 %s %s %s' %(filelist[0],filelist[1],os.path.join(outdir,'synth-flash_30.mgz'))) # constructor based parameters setting syn2 = freesurfer.SynthesizeFLASH(t1_image=filelist[0],pd_image=filelist[1],flip_angle=20,te=5,tr=25) yield assert_equal, syn2.cmdline, ('mri_synthesize 25.00 20.00 5.000 %s %s %s' %(filelist[0],filelist[1],os.path.join(outdir,'synth-flash_20.mgz'))) nipype-0.9.2/nipype/interfaces/freesurfer/tests/test_utils.py000066400000000000000000000162151227300005300245350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree import numpy as np import nibabel as nif from nipype.testing import (assert_equal, assert_not_equal, assert_raises, skipif) from nipype.interfaces.base import TraitError import nipype.interfaces.freesurfer as fs def no_freesurfer(): if fs.Info().version is None: return True else: return False def create_files_in_directory(): outdir = os.path.realpath(mkdtemp()) cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii','b.nii'] for f in filelist: hdr = nif.Nifti1Header() shape = (3,3,3,4) hdr.set_data_shape(shape) img = np.random.random(shape) nif.save(nif.Nifti1Image(img,np.eye(4),hdr), os.path.join(outdir,f)) with open(os.path.join(outdir, 'reg.dat'), 'wt') as fp: fp.write('dummy file') filelist.append('reg.dat') return filelist, outdir, cwd def create_surf_file(): outdir = os.path.realpath(mkdtemp()) cwd = os.getcwd() os.chdir(outdir) surf = 'lh.a.nii' hdr = nif.Nifti1Header() shape = (1,100,1) hdr.set_data_shape(shape) img = np.random.random(shape) nif.save(nif.Nifti1Image(img,np.eye(4),hdr), os.path.join(outdir,surf)) return surf, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) @skipif(no_freesurfer) def test_sample2surf(): s2s = fs.SampleToSurface() # Test underlying command yield assert_equal, s2s.cmd, 'mri_vol2surf' # Test mandatory args exception yield assert_raises, ValueError, s2s.run # Create testing files files, cwd, oldwd = create_files_in_directory() # Test input settings s2s.inputs.source_file = files[0] s2s.inputs.reference_file = files[1] s2s.inputs.hemi = "lh" s2s.inputs.reg_file = files[2] s2s.inputs.sampling_range = .5 s2s.inputs.sampling_units = "frac" s2s.inputs.sampling_method = "point" # Test a basic command line yield assert_equal, s2s.cmdline, ("mri_vol2surf " "--hemi lh --o %s --ref %s --reg reg.dat --projfrac 0.500 --mov %s" %(os.path.join(cwd, "lh.a.mgz"),files[1],files[0])) # Test identity s2sish = fs.SampleToSurface(source_file = files[1], reference_file = files[0],hemi="rh") yield assert_not_equal, s2s, s2sish # Test hits file name creation s2s.inputs.hits_file = True yield assert_equal, s2s._get_outfilename("hits_file"), os.path.join(cwd, "lh.a_hits.mgz") # Test that a 2-tuple range raises an error def set_illegal_range(): s2s.inputs.sampling_range = (.2, .5) yield assert_raises, TraitError, set_illegal_range # Clean up our mess clean_directory(cwd, oldwd) @skipif(no_freesurfer) def test_surfsmooth(): smooth = fs.SurfaceSmooth() # Test underlying command yield assert_equal, smooth.cmd, "mri_surf2surf" # Test mandatory args exception yield assert_raises, ValueError, smooth.run # Create testing files surf, cwd, oldwd = create_surf_file() # Test input settings smooth.inputs.in_file = surf smooth.inputs.subject_id = "fsaverage" fwhm = 5 smooth.inputs.fwhm = fwhm smooth.inputs.hemi = "lh" # Test the command line yield assert_equal, smooth.cmdline, \ ("mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval %s --tval %s/lh.a_smooth%d.nii --s fsaverage"% (surf, cwd, fwhm)) # Test identity shmooth = fs.SurfaceSmooth( subject_id="fsaverage", fwhm=6, in_file=surf, hemi="lh", out_file="lh.a_smooth.nii") yield assert_not_equal, smooth, shmooth # Clean up clean_directory(cwd, oldwd) @skipif(no_freesurfer) def test_surfxfm(): xfm = fs.SurfaceTransform() # Test underlying command yield assert_equal, xfm.cmd, "mri_surf2surf" # Test mandatory args exception yield assert_raises, ValueError, xfm.run # Create testing files surf, cwd, oldwd = create_surf_file() # Test input settings xfm.inputs.source_file = surf xfm.inputs.source_subject = "my_subject" xfm.inputs.target_subject = "fsaverage" xfm.inputs.hemi = "lh" # Test the command line yield assert_equal, xfm.cmdline, \ ("mri_surf2surf --hemi lh --tval %s/lh.a.fsaverage.nii --sval %s --srcsubject my_subject --trgsubject fsaverage"% (cwd, surf)) # Test identity xfmish = fs.SurfaceTransform( source_subject="fsaverage", target_subject="my_subject", source_file=surf, hemi="lh") yield assert_not_equal, xfm, xfmish # Clean up clean_directory(cwd, oldwd) @skipif(no_freesurfer) def test_applymask(): masker = fs.ApplyMask() filelist, testdir, origdir = create_files_in_directory() # Test underlying command yield assert_equal, masker.cmd, "mri_mask" # Test exception with mandatory args absent yield assert_raises, ValueError, masker.run for input in ["in_file", "mask_file"]: indict = {input:filelist[0]} willbreak = fs.ApplyMask(**indict) yield assert_raises, ValueError, willbreak.run # Now test a basic command line masker.inputs.in_file = filelist[0] masker.inputs.mask_file = filelist[1] outfile = os.path.join(testdir, "a_masked.nii") yield assert_equal, masker.cmdline, "mri_mask a.nii b.nii %s"%outfile # Now test that optional inputs get formatted properly masker.inputs.mask_thresh = 2 yield assert_equal, masker.cmdline, "mri_mask -T 2.0000 a.nii b.nii %s"%outfile masker.inputs.use_abs = True yield assert_equal, masker.cmdline, "mri_mask -T 2.0000 -abs a.nii b.nii %s"%outfile # Now clean up clean_directory(testdir, origdir) @skipif(no_freesurfer) def test_surfshots(): fotos = fs.SurfaceSnapshots() # Test underlying command yield assert_equal, fotos.cmd, "tksurfer" # Test mandatory args exception yield assert_raises, ValueError, fotos.run # Create testing files files, cwd, oldwd = create_files_in_directory() # Test input settins fotos.inputs.subject_id = "fsaverage" fotos.inputs.hemi = "lh" fotos.inputs.surface = "pial" # Test a basic command line yield assert_equal, fotos.cmdline, "tksurfer fsaverage lh pial -tcl snapshots.tcl" # Test identity schmotos = fs.SurfaceSnapshots(subject_id="mysubject",hemi="rh",surface="white") yield assert_not_equal, fotos, schmotos # Test that the tcl script gets written fotos._write_tcl_script() yield assert_equal, True, os.path.exists("snapshots.tcl") # Test that we can use a different tcl script foo = open("other.tcl", "w").close() fotos.inputs.tcl_script = "other.tcl" yield assert_equal, fotos.cmdline, "tksurfer fsaverage lh pial -tcl other.tcl" # Test that the interface crashes politely if graphics aren't enabled try: hold_display = os.environ["DISPLAY"] del os.environ["DISPLAY"] yield assert_raises, RuntimeError, fotos.run os.environ["DISPLAY"] = hold_display except KeyError: pass # Clean up our mess clean_directory(cwd, oldwd) nipype-0.9.2/nipype/interfaces/freesurfer/utils.py000066400000000000000000001403141227300005300223320ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Interfaces to assorted Freesurfer utility programs. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ __docformat__ = 'restructuredtext' import os import re from nipype.utils.filemanip import fname_presuffix, split_filename from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec from nipype.interfaces.base import TraitedSpec, File, traits, OutputMultiPath, isdefined, CommandLine, CommandLineInputSpec filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc', afni='brik', brik='brik', bshort='bshort', spm='img', analyze='img', analyze4d='img', bfloat='bfloat', nifti1='img', nii='nii', niigz='nii.gz') filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze', 'analyze4d', 'spm', 'afni', 'brik', 'bshort', 'bfloat', 'sdt', 'outline', 'otl', 'gdf', 'nifti1', 'nii', 'niigz'] class SampleToSurfaceInputSpec(FSTraitedSpec): source_file = File(exists=True, mandatory=True, argstr="--mov %s", desc="volume to sample values from") reference_file = File(exists=True, argstr="--ref %s", desc="reference volume (default is orig.mgz)") hemi = traits.Enum("lh", "rh", mandatory=True, argstr="--hemi %s", desc="target hemisphere") surface = traits.String(argstr="--surf %s", desc="target surface (default is white)") reg_xors = ["reg_file", "reg_header", "mni152reg"] reg_file = File(exists=True, argstr="--reg %s", mandatory=True, xor=reg_xors, desc="source-to-reference registration file") reg_header = traits.Bool(argstr="--regheader %s", requires=["subject_id"], mandatory=True, xor=reg_xors, desc="register based on header geometry") mni152reg = traits.Bool(argstr="--mni152reg", mandatory=True, xor=reg_xors, desc="source volume is in MNI152 space") apply_rot = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr="--rot %.3f %.3f %.3f", desc="rotation angles (in degrees) to apply to reg matrix") apply_trans = traits.Tuple(traits.Float, traits.Float, traits.Float, argstr="--trans %.3f %.3f %.3f", desc="translation (in mm) to apply to reg matrix") override_reg_subj = traits.Bool(argstr="--srcsubject %s", requires=["subject_id"], desc="override the subject in the reg file header") sampling_method = traits.Enum("point", "max", "average", mandatory=True, argstr="%s", xor=["projection_stem"], requires=["sampling_range", "sampling_units"], desc="how to sample -- at a point or at the max or average over a range") sampling_range = traits.Either(traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float), desc="sampling range - a point or a tuple of (min, max, step)") sampling_units = traits.Enum("mm", "frac", desc="sampling range type -- either 'mm' or 'frac'") projection_stem = traits.String(mandatory=True, xor=["sampling_method"], desc="stem for precomputed linear estimates and volume fractions") smooth_vol = traits.Float(argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)") smooth_surf = traits.Float(argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)") interp_method = traits.Enum("nearest", "trilinear", argstr="--interp %s", desc="interpolation method") cortex_mask = traits.Bool(argstr="--cortex", xor=["mask_label"], desc="mask the target surface with hemi.cortex.label") mask_label = File(exists=True, argstr="--mask %s", xor=["cortex_mask"], desc="label file to mask output with") float2int_method = traits.Enum("round", "tkregister", argstr="--float2int %s", desc="method to convert reg matrix values (default is round)") fix_tk_reg = traits.Bool(argstr="--fixtkreg", desc="make reg matrix round-compatible") subject_id = traits.String(desc="subject id") target_subject = traits.String(argstr="--trgsubject %s", desc="sample to surface of different subject than source") surf_reg = traits.Bool(argstr="--surfreg", requires=["target_subject"], desc="use surface registration to target subject") ico_order = traits.Int(argstr="--icoorder %d", requires=["target_subject"], desc="icosahedron order when target_subject is 'ico'") reshape = traits.Bool(argstr="--reshape", xor=["no_reshape"], desc="reshape surface vector to fit in non-mgh format") no_reshape = traits.Bool(argstr="--noreshape", xor=["reshape"], desc="do not reshape surface vector (default)") reshape_slices = traits.Int(argstr="--rf %d", desc="number of 'slices' for reshaping") scale_input = traits.Float(argstr="--scale %.3f", desc="multiple all intensities by scale factor") frame = traits.Int(argstr="--frame %d", desc="save only one frame (0-based)") out_file = File(argstr="--o %s", genfile=True, desc="surface file to write") out_type = traits.Enum(filetypes, argstr="--out_type %s", desc="output file type") hits_file = traits.Either(traits.Bool, File(exists=True), argstr="--srchit %s", desc="save image with number of hits at each voxel") hits_type = traits.Enum(filetypes, argstr="--srchit_type", desc="hits file type") vox_file = traits.Either(traits.Bool, File, argstr="--nvox %s", desc="text file with the number of voxels intersecting the surface") class SampleToSurfaceOutputSpec(TraitedSpec): out_file = File(exists=True, desc="surface file") hits_file = File(exists=True, desc="image with number of hits at each voxel") vox_file = File(exists=True, desc="text file with the number of voxels intersecting the surface") class SampleToSurface(FSCommand): """Sample a volume to the cortical surface using Freesurfer's mri_vol2surf. You must supply a sampling method, range, and units. You can project either a given distance (in mm) or a given fraction of the cortical thickness at that vertex along the surface normal from the target surface, and then set the value of that vertex to be either the value at that point or the average or maximum value found along the projection vector. By default, the surface will be saved as a vector with a length equal to the number of vertices on the target surface. This is not a problem for Freesurfer programs, but if you intend to use the file with interfaces to another package, you must set the ``reshape`` input to True, which will factor the surface vector into a matrix with dimensions compatible with proper Nifti files. Examples -------- >>> import nipype.interfaces.freesurfer as fs >>> sampler = fs.SampleToSurface(hemi="lh") >>> sampler.inputs.source_file = "cope1.nii.gz" >>> sampler.inputs.reg_file = "register.dat" >>> sampler.inputs.sampling_method = "average" >>> sampler.inputs.sampling_range = 1 >>> sampler.inputs.sampling_units = "frac" >>> res = sampler.run() # doctest: +SKIP """ _cmd = "mri_vol2surf" input_spec = SampleToSurfaceInputSpec output_spec = SampleToSurfaceOutputSpec filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc', afni='brik', brik='brik', bshort='bshort', spm='img', analyze='img', analyze4d='img', bfloat='bfloat', nifti1='img', nii='nii', niigz='nii.gz') def _format_arg(self, name, spec, value): if name == "sampling_method": range = self.inputs.sampling_range units = self.inputs.sampling_units if units == "mm": units = "dist" if isinstance(range, tuple): range = "%.3f %.3f %.3f" % range else: range = "%.3f" % range method = dict(point="", max="-max", average="-avg")[value] return "--proj%s%s %s" % (units, method, range) if name == "reg_header": return spec.argstr % self.inputs.subject_id if name == "override_reg_subj": return spec.argstr % self.inputs.subject_id if name in ["hits_file", "vox_file"]: return spec.argstr % self._get_outfilename(name) return super(SampleToSurface, self)._format_arg(name, spec, value) def _get_outfilename(self, opt="out_file"): outfile = getattr(self.inputs, opt) if not isdefined(outfile) or isinstance(outfile, bool): if isdefined(self.inputs.out_type): if opt == "hits_file": suffix = '_hits.' + self.filemap[self.inputs.out_type] else: suffix = '.' + self.filemap[self.inputs.out_type] elif opt == "hits_file": suffix = "_hits.mgz" else: suffix = '.mgz' outfile = fname_presuffix(self.inputs.source_file, newpath=os.getcwd(), prefix=self.inputs.hemi + ".", suffix=suffix, use_ext=False) return outfile def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = os.path.abspath(self._get_outfilename()) hitsfile = self.inputs.hits_file if isdefined(hitsfile): outputs["hits_file"] = hitsfile if isinstance(hitsfile, bool): hitsfile = self._get_outfilename("hits_file") voxfile = self.inputs.vox_file if isdefined(voxfile): if isinstance(voxfile, bool): voxfile = fname_presuffix(self.inputs.source_file, newpath=os.getcwd(), prefix=self.inputs.hemi + ".", suffix="_vox.txt", use_ext=False) outputs["vox_file"] = voxfile return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()[name] return None class SurfaceSmoothInputSpec(FSTraitedSpec): in_file = File(mandatory=True, argstr="--sval %s", desc="source surface file") subject_id = traits.String(mandatory=True, argstr="--s %s", desc="subject id of surface file") hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to operate on") fwhm = traits.Float(argstr="--fwhm %.4f", xor=["smooth_iters"], desc="effective FWHM of the smoothing process") smooth_iters = traits.Int(argstr="--smooth %d", xor=["fwhm"], desc="iterations of the smoothing process") cortex = traits.Bool(True, argstr="--cortex", usedefault=True, desc="only smooth within $hemi.cortex.label") reshape = traits.Bool(argstr="--reshape", desc="reshape surface vector to fit in non-mgh format") out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write") class SurfaceSmoothOutputSpec(TraitedSpec): out_file = File(exists=True, desc="smoothed surface file") class SurfaceSmooth(FSCommand): """Smooth a surface image with mri_surf2surf. The surface is smoothed by an interative process of averaging the value at each vertex with those of its adjacent neighbors. You may supply either the number of iterations to run or a desired effective FWHM of the smoothing process. If the latter, the underlying program will calculate the correct number of iterations internally. .. seealso:: SmoothTessellation() Interface For smoothing a tessellated surface (e.g. in gifti or .stl) Examples -------- >>> import nipype.interfaces.freesurfer as fs >>> smoother = fs.SurfaceSmooth() >>> smoother.inputs.in_file = "lh.cope1.mgz" >>> smoother.inputs.subject_id = "subj_1" >>> smoother.inputs.hemi = "lh" >>> smoother.inputs.fwhm = 5 >>> smoother.run() # doctest: +SKIP """ _cmd = "mri_surf2surf" input_spec = SurfaceSmoothInputSpec output_spec = SurfaceSmoothOutputSpec def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = self.inputs.out_file if not isdefined(outputs["out_file"]): in_file = self.inputs.in_file if isdefined(self.inputs.fwhm): kernel = self.inputs.fwhm else: kernel = self.inputs.smooth_iters outputs["out_file"] = fname_presuffix(in_file, suffix="_smooth%d" % kernel, newpath=os.getcwd()) return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()[name] return None class SurfaceTransformInputSpec(FSTraitedSpec): source_file = File(exists=True, mandatory=True, argstr="--sval %s", xor=['source_annot_file'], desc="surface file with source values") source_annot_file = File(exists=True, mandatory=True, argstr="--sval-annot %s", xor=['source_file'], desc="surface annotation file") source_subject = traits.String(mandatory=True, argstr="--srcsubject %s", desc="subject id for source surface") hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to transform") target_subject = traits.String(mandatory=True, argstr="--trgsubject %s", desc="subject id of target surface") target_ico_order = traits.Enum(1, 2, 3, 4, 5, 6, 7, argstr="--trgicoorder %d", desc=("order of the icosahedron if " "target_subject is 'ico'")) source_type = traits.Enum(filetypes, argstr='--sfmt %s', requires=['source_file'], desc="source file format") target_type = traits.Enum(filetypes, argstr='--tfmt %s', desc="output format") reshape = traits.Bool(argstr="--reshape", desc="reshape output surface to conform with Nifti") reshape_factor = traits.Int(argstr="--reshape-factor", desc="number of slices in reshaped image") out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write") class SurfaceTransformOutputSpec(TraitedSpec): out_file = File(exists=True, desc="transformed surface file") class SurfaceTransform(FSCommand): """Transform a surface file from one subject to another via a spherical registration. Both the source and target subject must reside in your Subjects Directory, and they must have been processed with recon-all, unless you are transforming to one of the icosahedron meshes. Examples -------- >>> from nipype.interfaces.freesurfer import SurfaceTransform >>> sxfm = SurfaceTransform() >>> sxfm.inputs.source_file = "lh.cope1.nii.gz" >>> sxfm.inputs.source_subject = "my_subject" >>> sxfm.inputs.target_subject = "fsaverage" >>> sxfm.inputs.hemi = "lh" >>> sxfm.run() # doctest: +SKIP """ _cmd = "mri_surf2surf" input_spec = SurfaceTransformInputSpec output_spec = SurfaceTransformOutputSpec def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = self.inputs.out_file if not isdefined(outputs["out_file"]): source = self.inputs.source_file # Some recon-all files don't have a proper extension (e.g. "lh.thickness") # so we have to account for that here bad_extensions = [".%s" % e for e in ["area", "mid", "pial", "avg_curv", "curv", "inflated", "jacobian_white", "orig", "nofix", "smoothwm", "crv", "sphere", "sulc", "thickness", "volume", "white"]] use_ext = True if split_filename(source)[2] in bad_extensions: source = source + ".stripme" use_ext = False ext = "" if isdefined(self.inputs.target_type): ext = "." + filemap[self.inputs.target_type] use_ext = False outputs["out_file"] = fname_presuffix(source, suffix=".%s%s" % (self.inputs.target_subject, ext), newpath=os.getcwd(), use_ext=use_ext) else: outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()[name] return None class Surface2VolTransformInputSpec(FSTraitedSpec): source_file = File(exists=True, argstr='--surfval %s', copyfile=False, mandatory=True, desc='This is the source of the surface values') hemi = traits.Str(argstr='--hemi %s', mandatory=True, desc='hemisphere of data') transformed_file = File(name_template="%s_asVol.nii", desc='Output volume', argstr='--outvol %s', name_source=['source_file'], hash_files=False) reg_file = File(exists=True, argstr='--volreg %s', mandatory=True, desc='tkRAS-to-tkRAS matrix (tkregister2 format)', xor=['subject_id']) template_file = File(exists=True, argstr='--template %s', desc='Output template volume') mkmask = traits.Bool(desc='make a mask instead of loading surface values', argstr='--mkmask') vertexvol_file = File(name_template="%s_asVol_vertex.nii", desc=('Path name of the vertex output volume, which ' 'is the same as output volume except that the ' 'value of each voxel is the vertex-id that is ' 'mapped to that voxel.'), argstr='--vtxvol %s', name_source=['source_file'], hash_files=False) surf_name = traits.Str(argstr='--surf %s', desc='surfname (default is white)') projfrac = traits.Float(argstr='--projfrac %s', desc='thickness fraction') subjects_dir = traits.Str(argstr='--sd %s', desc=('freesurfer subjects directory defaults to ' '$SUBJECTS_DIR')) subject_id = traits.Str(argstr='--identity %s',desc='subject id', xor=['reg_file']) class Surface2VolTransformOutputSpec(TraitedSpec): transformed_file = File(exists=True, desc='Path to output file if used normally') vertexvol_file = File(desc='vertex map volume path id. Optional') class Surface2VolTransform(FSCommand): """Use FreeSurfer mri_surf2vol to apply a transform. Examples -------- >>> from nipype.interfaces.freesurfer import Surface2VolTransform >>> xfm2vol = Surface2VolTransform() >>> xfm2vol.inputs.source_file = 'lh.cope1.mgz' >>> xfm2vol.inputs.reg_file = 'register.mat' >>> xfm2vol.inputs.hemi = 'lh' >>> xfm2vol.inputs.template_file = 'cope1.nii.gz' >>> xfm2vol.inputs.subjects_dir = '.' >>> xfm2vol.cmdline 'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii' >>> res = xfm2vol.run()# doctest: +SKIP """ _cmd = 'mri_surf2vol' input_spec = Surface2VolTransformInputSpec output_spec = Surface2VolTransformOutputSpec class ApplyMaskInputSpec(FSTraitedSpec): in_file = File(exists=True, mandatory=True, position=-3, argstr="%s", desc="input image (will be masked)") mask_file = File(exists=True, mandatory=True, position=-2, argstr="%s", desc="image defining mask space") out_file = File(genfile=True, position=-1, argstr="%s", desc="final image to write") xfm_file = File(exists=True, argstr="-xform %s", desc="LTA-format transformation matrix to align mask with input") invert_xfm = traits.Bool(argstr="-invert", desc="invert transformation") xfm_source = File(exists=True, argstr="-lta_src %s", desc="image defining transform source space") xfm_target = File(exists=True, argstr="-lta_dst %s", desc="image defining transform target space") use_abs = traits.Bool(argstr="-abs", desc="take absolute value of mask before applying") mask_thresh = traits.Float(argstr="-T %.4f", desc="threshold mask before applying") class ApplyMaskOutputSpec(TraitedSpec): out_file = File(exists=True, desc="masked image") class ApplyMask(FSCommand): """Use Freesurfer's mri_mask to apply a mask to an image. The mask file need not be binarized; it can be thresholded above a given value before application. It can also optionally be transformed into input space with an LTA matrix. """ _cmd = "mri_mask" input_spec = ApplyMaskInputSpec output_spec = ApplyMaskOutputSpec def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = self.inputs.out_file if not isdefined(outputs["out_file"]): outputs["out_file"] = fname_presuffix(self.inputs.in_file, suffix="_masked", newpath=os.getcwd(), use_ext=True) else: outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()[name] return None class SurfaceSnapshotsInputSpec(FSTraitedSpec): subject_id = traits.String(position=1, argstr="%s", mandatory=True, desc="subject to visualize") hemi = traits.Enum("lh", "rh", position=2, argstr="%s", mandatory=True, desc="hemisphere to visualize") surface = traits.String(position=3, argstr="%s", mandatory=True, desc="surface to visualize") show_curv = traits.Bool(argstr="-curv", desc="show curvature", xor=["show_gray_curv"]) show_gray_curv = traits.Bool(argstr="-gray", desc="show curvature in gray", xor=["show_curv"]) overlay = File(exists=True, argstr="-overlay %s", desc="load an overlay volume/surface", requires=["overlay_range"]) reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"] overlay_reg = traits.File(exists=True, argstr="-overlay-reg %s", xor=reg_xors, desc="registration matrix file to register overlay to surface") identity_reg = traits.Bool(argstr="-overlay-reg-identity", xor=reg_xors, desc="use the identity matrix to register the overlay to the surface") mni152_reg = traits.Bool(argstr="-mni152reg", xor=reg_xors, desc="use to display a volume in MNI152 space on the average subject") overlay_range = traits.Either(traits.Float, traits.Tuple(traits.Float, traits.Float), traits.Tuple(traits.Float, traits.Float, traits.Float), desc="overlay range--either min, (min, max) or (min, mid, max)", argstr="%s") overlay_range_offset = traits.Float(argstr="-foffset %.3f", desc="overlay range will be symettric around offset value") truncate_overlay = traits.Bool(argstr="-truncphaseflag 1", desc="truncate the overlay display") reverse_overlay = traits.Bool(argstr="-revphaseflag 1", desc="reverse the overlay display") invert_overlay = traits.Bool(argstr="-invphaseflag 1", desc="invert the overlay display") demean_overlay = traits.Bool(argstr="-zm", desc="remove mean from overlay") annot_file = File(exists=True, argstr="-annotation %s", xor=["annot_name"], desc="path to annotation file to display") annot_name = traits.String(argstr="-annotation %s", xor=["annot_file"], desc="name of annotation to display (must be in $subject/label directory") label_file = File(exists=True, argstr="-label %s", xor=["label_name"], desc="path to label file to display") label_name = traits.String(argstr="-label %s", xor=["label_file"], desc="name of label to display (must be in $subject/label directory") colortable = File(exists=True, argstr="-colortable %s", desc="load colortable file") label_under = traits.Bool(argstr="-labels-under", desc="draw label/annotation under overlay") label_outline = traits.Bool(argstr="-label-outline", desc="draw label/annotation as outline") patch_file = File(exists=True, argstr="-patch %s", desc="load a patch") orig_suffix = traits.String(argstr="-orig %s", desc="set the orig surface suffix string") sphere_suffix = traits.String(argstr="-sphere %s", desc="set the sphere.reg suffix string") show_color_scale = traits.Bool(argstr="-colscalebarflag 1", desc="display the color scale bar") show_color_text = traits.Bool(argstr="-colscaletext 1", desc="display text in the color scale bar") six_images = traits.Bool(desc="also take anterior and posterior snapshots") screenshot_stem = traits.String(desc="stem to use for screenshot file names") stem_template_args = traits.List(traits.String, requires=["screenshot_stem"], desc="input names to use as arguments for a string-formated stem template") tcl_script = File(exists=True, argstr="%s", genfile=True, desc="override default screenshot script") class SurfaceSnapshotsOutputSpec(TraitedSpec): snapshots = OutputMultiPath(File(exists=True), desc="tiff images of the surface from different perspectives") class SurfaceSnapshots(FSCommand): """Use Tksurfer to save pictures of the cortical surface. By default, this takes snapshots of the lateral, medial, ventral, and dorsal surfaces. See the ``six_images`` option to add the anterior and posterior surfaces. You may also supply your own tcl script (see the Freesurfer wiki for information on scripting tksurfer). The screenshot stem is set as the environment variable "_SNAPSHOT_STEM", which you can use in your own scripts. Node that this interface will not run if you do not have graphics enabled on your system. Examples -------- >>> import nipype.interfaces.freesurfer as fs >>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial") >>> shots.inputs.overlay = "zstat1.nii.gz" >>> shots.inputs.overlay_range = (2.3, 6) >>> shots.inputs.overlay_reg = "register.dat" >>> res = shots.run() # doctest: +SKIP """ _cmd = "tksurfer" input_spec = SurfaceSnapshotsInputSpec output_spec = SurfaceSnapshotsOutputSpec def _format_arg(self, name, spec, value): if name == "tcl_script": if not isdefined(value): return "-tcl snapshots.tcl" else: return "-tcl %s" % value elif name == "overlay_range": if isinstance(value, float): return "-fthresh %.3f" % value else: if len(value) == 2: return "-fminmax %.3f %.3f" % value else: return "-fminmax %.3f %.3f -fmid %.3f" % (value[0], value[2], value[1]) elif name == "annot_name" and isdefined(value): # Matching annot by name needs to strip the leading hemi and trailing # extension strings if value.endswith(".annot"): value = value[:-6] if re.match("%s[\.\-_]" % self.inputs.hemi, value[:3]): value = value[3:] return "-annotation %s" % value return super(SurfaceSnapshots, self)._format_arg(name, spec, value) def _run_interface(self, runtime): if not isdefined(self.inputs.screenshot_stem): stem = "%s_%s_%s" % ( self.inputs.subject_id, self.inputs.hemi, self.inputs.surface) else: stem = self.inputs.screenshot_stem stem_args = self.inputs.stem_template_args if isdefined(stem_args): args = tuple([getattr(self.inputs, arg) for arg in stem_args]) stem = stem % args # Check if the DISPLAY variable is set -- should avoid crashes (might not?) if not "DISPLAY" in os.environ: raise RuntimeError("Graphics are not enabled -- cannot run tksurfer") runtime.environ["_SNAPSHOT_STEM"] = stem self._write_tcl_script() runtime = super(SurfaceSnapshots, self)._run_interface(runtime) # If a display window can't be opened, this will crash on # aggregate_outputs. Let's try to parse stderr and raise a # better exception here if that happened. errors = ["surfer: failed, no suitable display found", "Fatal Error in tksurfer.bin: could not open display"] for err in errors: if err in runtime.stderr: self.raise_exception(runtime) # Tksurfer always (or at least always when you run a tcl script) # exits with a nonzero returncode. We have to force it to 0 here. runtime.returncode = 0 return runtime def _write_tcl_script(self): fid = open("snapshots.tcl", "w") script = ["save_tiff $env(_SNAPSHOT_STEM)-lat.tif", "make_lateral_view", "rotate_brain_y 180", "redraw", "save_tiff $env(_SNAPSHOT_STEM)-med.tif", "make_lateral_view", "rotate_brain_x 90", "redraw", "save_tiff $env(_SNAPSHOT_STEM)-ven.tif", "make_lateral_view", "rotate_brain_x -90", "redraw", "save_tiff $env(_SNAPSHOT_STEM)-dor.tif"] if isdefined(self.inputs.six_images) and self.inputs.six_images: script.extend(["make_lateral_view", "rotate_brain_y 90", "redraw", "save_tiff $env(_SNAPSHOT_STEM)-pos.tif", "make_lateral_view", "rotate_brain_y -90", "redraw", "save_tiff $env(_SNAPSHOT_STEM)-ant.tif"]) script.append("exit") fid.write("\n".join(script)) fid.close() def _list_outputs(self): outputs = self._outputs().get() if not isdefined(self.inputs.screenshot_stem): stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, self.inputs.surface) else: stem = self.inputs.screenshot_stem stem_args = self.inputs.stem_template_args if isdefined(stem_args): args = tuple([getattr(self.inputs, arg) for arg in stem_args]) stem = stem % args snapshots = ["%s-lat.tif", "%s-med.tif", "%s-dor.tif", "%s-ven.tif"] if self.inputs.six_images: snapshots.extend(["%s-pos.tif", "%s-ant.tif"]) snapshots = [self._gen_fname(f % stem, suffix="") for f in snapshots] outputs["snapshots"] = snapshots return outputs def _gen_filename(self, name): if name == "tcl_script": return "snapshots.tcl" return None class ImageInfoInputSpec(FSTraitedSpec): in_file = File(exists=True, position=1, argstr="%s", desc="image to query") class ImageInfoOutputSpec(TraitedSpec): info = traits.Any(desc="output of mri_info") out_file = File(exists=True, desc="text file with image information") data_type = traits.String(desc="image data type") file_format = traits.String(desc="file format") TE = traits.String(desc="echo time (msec)") TR = traits.String(desc="repetition time(msec)") TI = traits.String(desc="inversion time (msec)") dimensions = traits.Tuple(desc="image dimensions (voxels)") vox_sizes = traits.Tuple(desc="voxel sizes (mm)") orientation = traits.String(desc="image orientation") ph_enc_dir = traits.String(desc="phase encode direction") class ImageInfo(FSCommand): _cmd = "mri_info" input_spec = ImageInfoInputSpec output_spec = ImageInfoOutputSpec def info_regexp(self, info, field, delim="\n"): m = re.search("%s\s*:\s+(.+?)%s" % (field, delim), info) if m: return m.group(1) else: return None def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() info = runtime.stdout outputs.info = info # Pulse sequence parameters for field in ["TE", "TR", "TI"]: fieldval = self.info_regexp(info, field, ", ") if fieldval.endswith(" msec"): fieldval = fieldval[:-5] setattr(outputs, field, fieldval) # Voxel info vox = self.info_regexp(info, "voxel sizes") vox = tuple(vox.split(", ")) outputs.vox_sizes = vox dim = self.info_regexp(info, "dimensions") dim = tuple([int(d) for d in dim.split(" x ")]) outputs.dimensions = dim outputs.orientation = self.info_regexp(info, "Orientation") outputs.ph_enc_dir = self.info_regexp(info, "PhEncDir") # File format and datatype are both keyed by "type" ftype, dtype = re.findall("%s\s*:\s+(.+?)\n" % "type", info) outputs.file_format = ftype outputs.data_type = dtype return outputs class MRIsConvertInputSpec(FSTraitedSpec): """ Uses Freesurfer's mris_convert to convert surface files to various formats """ annot_file = File(exists=True, argstr="--annot %s", desc="input is annotation or gifti label data") parcstats_file = File(exists=True, argstr="--parcstats %s", desc="infile is name of text file containing label/val pairs") label_file = File(exists=True, argstr="--label %s", desc="infile is .label file, label is name of this label") scalarcurv_file = File(exists=True, argstr="-c %s", desc="input is scalar curv overlay file (must still specify surface)") functional_file = File(exists=True, argstr="-f %s", desc="input is functional time-series or other multi-frame data (must specify surface)") labelstats_outfile = File(exists=False, argstr="--labelstats %s", desc="outfile is name of gifti file to which label stats will be written") patch = traits.Bool(argstr="-p", desc="input is a patch, not a full surface") rescale = traits.Bool(argstr="-r", desc="rescale vertex xyz so total area is same as group average") normal = traits.Bool(argstr="-n", desc="output is an ascii file where vertex data") xyz_ascii = traits.Bool(argstr="-a", desc="Print only surface xyz to ascii file") vertex = traits.Bool(argstr="-v", desc="Writes out neighbors of a vertex in each row") scale = traits.Float(argstr="-s %.3f", desc="scale vertex xyz by scale") dataarray_num = traits.Int(argstr="--da_num %d", desc="if input is gifti, 'num' specifies which data array to use") talairachxfm_subjid = traits.String(argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz") origname = traits.String(argstr="-o %s", desc="read orig positions") in_file = File(exists=True, mandatory=True, position=-2, argstr='%s', desc='File to read/convert') out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one') #Not really sure why the ./ is necessary but the module fails without it out_datatype = traits.Enum("ico", "tri", "stl", "vtk", "gii", "mgh", "mgz", mandatory=True, desc="These file formats are supported: ASCII: .asc" \ "ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz") class MRIsConvertOutputSpec(TraitedSpec): """ Uses Freesurfer's mris_convert to convert surface files to various formats """ converted = File(exists=True, desc='converted output surface') class MRIsConvert(FSCommand): """ Uses Freesurfer's mris_convert to convert surface files to various formats Example ------- >>> import nipype.interfaces.freesurfer as fs >>> mris = fs.MRIsConvert() >>> mris.inputs.in_file = 'lh.pial' >>> mris.inputs.out_datatype = 'gii' >>> mris.run() # doctest: +SKIP """ _cmd = 'mris_convert' input_spec = MRIsConvertInputSpec output_spec = MRIsConvertOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["converted"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): if isdefined(self.inputs.annot_file): _, name, ext = split_filename(self.inputs.annot_file) elif isdefined(self.inputs.parcstats_file): _, name, ext = split_filename(self.inputs.parcstats_file) elif isdefined(self.inputs.label_file): _, name, ext = split_filename(self.inputs.label_file) elif isdefined(self.inputs.scalarcurv_file): _, name, ext = split_filename(self.inputs.scalarcurv_file) elif isdefined(self.inputs.functional_file): _, name, ext = split_filename(self.inputs.functional_file) elif isdefined(self.inputs.in_file): _, name, ext = split_filename(self.inputs.in_file) return name + ext + "_converted." + self.inputs.out_datatype class MRITessellateInputSpec(FSTraitedSpec): """ Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume """ in_file = File(exists=True, mandatory=True, position=-3, argstr='%s', desc='Input volume to tesselate voxels from.') label_value = traits.Int(position=-2, argstr='%d', mandatory=True, desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)') out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one') tesselate_all_voxels = traits.Bool(argstr='-a', desc='Tessellate the surface of all voxels with different labels') use_real_RAS_coordinates = traits.Bool(argstr='-n', desc='Saves surface with real RAS coordinates where c_(r,a,s) != 0') class MRITessellateOutputSpec(TraitedSpec): """ Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume """ surface = File(exists=True, desc='binary surface of the tessellation ') class MRITessellate(FSCommand): """ Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume Example ------- >>> import nipype.interfaces.freesurfer as fs >>> tess = fs.MRITessellate() >>> tess.inputs.in_file = 'aseg.mgz' >>> tess.inputs.label_value = 17 >>> tess.inputs.out_file = 'lh.hippocampus' >>> tess.run() # doctest: +SKIP """ _cmd = 'mri_tessellate' input_spec = MRITessellateInputSpec output_spec = MRITessellateOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['surface'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): if isdefined(self.inputs.out_file): return self.inputs.out_file else: _, name, ext = split_filename(self.inputs.in_file) return name + ext + '_' + str(self.inputs.label_value) class MRIMarchingCubesInputSpec(FSTraitedSpec): """ Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume """ in_file = File(exists=True, mandatory=True, position=1, argstr='%s', desc='Input volume to tesselate voxels from.') label_value = traits.Int(position=2, argstr='%d', mandatory=True, desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)') connectivity_value = traits.Int(1, position=-1, argstr='%d', usedefault=True, desc='Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)') out_file = File(argstr='./%s', position=-2, genfile=True, desc='output filename or True to generate one') class MRIMarchingCubesOutputSpec(TraitedSpec): """ Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume """ surface = File(exists=True, desc='binary surface of the tessellation ') class MRIMarchingCubes(FSCommand): """ Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume Example ------- >>> import nipype.interfaces.freesurfer as fs >>> mc = fs.MRIMarchingCubes() >>> mc.inputs.in_file = 'aseg.mgz' >>> mc.inputs.label_value = 17 >>> mc.inputs.out_file = 'lh.hippocampus' >>> mc.run() # doctest: +SKIP """ _cmd = 'mri_mc' input_spec = MRIMarchingCubesInputSpec output_spec = MRIMarchingCubesOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['surface'] = self._gen_outfilename() return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): if isdefined(self.inputs.out_file): return os.path.abspath(self.inputs.out_file) else: _, name, ext = split_filename(self.inputs.in_file) return os.path.abspath(name + ext + '_' + str(self.inputs.label_value)) class SmoothTessellationInputSpec(FSTraitedSpec): """ This program smooths the tessellation of a surface using 'mris_smooth' """ in_file = File(exists=True, mandatory=True, argstr='%s', position=1, desc='Input volume to tesselate voxels from.') curvature_averaging_iterations = traits.Int(10, usedefault=True, argstr='-a %d', position=-1, desc='Number of curvature averaging iterations (default=10)') smoothing_iterations = traits.Int(10, usedefault=True, argstr='-n %d', position=-2, desc='Number of smoothing iterations (default=10)') snapshot_writing_iterations = traits.Int(argstr='-w %d', desc='Write snapshot every "n" iterations') use_gaussian_curvature_smoothing = traits.Bool(argstr='-g', position=3, desc='Use Gaussian curvature smoothing') gaussian_curvature_norm_steps = traits.Int(argstr='%d ', position=4, desc='Use Gaussian curvature smoothing') gaussian_curvature_smoothing_steps = traits.Int(argstr='%d', position=5, desc='Use Gaussian curvature smoothing') disable_estimates = traits.Bool(argstr='-nw', desc='Disables the writing of curvature and area estimates') normalize_area = traits.Bool(argstr='-area', desc='Normalizes the area after smoothing') use_momentum = traits.Bool(argstr='-m', desc='Uses momentum') out_file = File(argstr='%s', position=2, genfile=True, desc='output filename or True to generate one') out_curvature_file = File(argstr='-c %s', desc='Write curvature to ?h.curvname (default "curv")') out_area_file = File(argstr='-b %s', desc='Write area to ?h.areaname (default "area")') class SmoothTessellationOutputSpec(TraitedSpec): """ This program smooths the tessellation of a surface using 'mris_smooth' """ surface = File(exists=True, desc='Smoothed surface file ') class SmoothTessellation(FSCommand): """ This program smooths the tessellation of a surface using 'mris_smooth' .. seealso:: SurfaceSmooth() Interface For smoothing a scalar field along a surface manifold Example ------- >>> import nipype.interfaces.freesurfer as fs >>> smooth = fs.SmoothTessellation() >>> smooth.inputs.in_file = 'lh.hippocampus.stl' >>> smooth.run() # doctest: +SKIP """ _cmd = 'mris_smooth' input_spec = SmoothTessellationInputSpec output_spec = SmoothTessellationOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['surface'] = self._gen_outfilename() return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): if isdefined(self.inputs.out_file): return os.path.abspath(self.inputs.out_file) else: _, name, ext = split_filename(self.inputs.in_file) return os.path.abspath(name + '_smoothed' + ext) def _run_interface(self, runtime): # The returncode is meaningless in BET. So check the output # in stderr and if it's set, then update the returncode # accordingly. runtime = super(SmoothTessellation, self)._run_interface(runtime) if "failed" in runtime.stderr: self.raise_exception(runtime) return runtime class MakeAverageSubjectInputSpec(FSTraitedSpec): subjects_ids = traits.List(traits.Str(), argstr='--subjects %s', desc='freesurfer subjects ids to average', mandatory=True, sep=' ') out_name = File('average', argstr='--out %s', desc='name for the average subject', usedefault=True) class MakeAverageSubjectOutputSpec(TraitedSpec): average_subject_name = traits.Str(desc='Output registration file') class MakeAverageSubject(FSCommand): """Make an average freesurfer subject Examples -------- >>> from nipype.interfaces.freesurfer import MakeAverageSubject >>> avg = MakeAverageSubject(subjects_ids=['s1', 's2']) >>> avg.cmdline 'make_average_subject --out average --subjects s1 s2' """ _cmd = 'make_average_subject' input_spec = MakeAverageSubjectInputSpec output_spec = MakeAverageSubjectOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['average_subject_name'] = self.inputs.out_name return outputs class ExtractMainComponentInputSpec(CommandLineInputSpec): in_file = File(exists=True, mandatory=True, argstr='%s', position=1, desc='input surface file') out_file = File(name_template='%s.maincmp', name_source='in_file', argstr='%s', position=2, desc='surface containing main component') class ExtractMainComponentOutputSpec(TraitedSpec): out_file = File(exists=True, desc='surface containing main component') class ExtractMainComponent(CommandLine): """Extract the main component of a tesselated surface Examples -------- >>> from nipype.interfaces.freesurfer import ExtractMainComponent >>> mcmp = ExtractMainComponent(in_file='lh.pial') >>> mcmp.cmdline 'mris_extract_main_component lh.pial lh.maincmp' """ _cmd='mris_extract_main_component' input_spec=ExtractMainComponentInputSpec output_spec=ExtractMainComponentOutputSpec nipype-0.9.2/nipype/interfaces/fsl/000077500000000000000000000000001227300005300172315ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/fsl/__init__.py000066400000000000000000000030171227300005300213430ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. Top-level namespace for fsl. """ from .base import (FSLCommand, Info, check_fsl, no_fsl, no_fsl_course_data) from .preprocess import (FAST, FLIRT, ApplyXfm, BET, MCFLIRT, FNIRT, ApplyWarp, SliceTimer, SUSAN, PRELUDE, FUGUE, FIRST) from .model import (Level1Design, FEAT, FEATModel, FILMGLS, FEATRegister, FLAMEO, ContrastMgr, MultipleRegressDesign, L2Model, SMM, MELODIC, SmoothEstimate, Cluster, Randomise, GLM) from .utils import (Smooth, Merge, ExtractROI, Split, ImageMaths, ImageMeants, ImageStats, FilterRegressor, Overlay, Slicer, PlotTimeSeries, PlotMotionParams, ConvertXFM, SwapDimensions, PowerSpectrum, Reorient2Std, Complex, InvWarp) from .epi import (PrepareFieldmap, TOPUP, ApplyTOPUP, Eddy, EPIDeWarp, SigLoss, EddyCorrect) from .dti import (BEDPOSTX, DTIFit, ProbTrackX, VecReg, ProjThresh, FindTheBiggest, DistanceMap, TractSkeleton, XFibres, MakeDyadicVectors) from .maths import (ChangeDataType, Threshold, MeanImage, ApplyMask, IsotropicSmooth, TemporalFilter, DilateImage, ErodeImage, SpatialFilter, UnaryMaths, BinaryMaths, MultiImageMaths) nipype-0.9.2/nipype/interfaces/fsl/base.py000066400000000000000000000172051227300005300205220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This was written to work with FSL version 4.1.4. These are the base tools for working with FSL. Preprocessing tools are found in fsl/preprocess.py Model tools are found in fsl/model.py DTI tools are found in fsl/dti.py XXX Make this doc current! Currently these tools are supported: * BET v2.1: brain extraction * FAST v4.1: segmentation and bias correction * FLIRT v5.5: linear registration * MCFLIRT: motion correction * FNIRT v1.0: non-linear warp Examples -------- See the docstrings of the individual classes for examples. """ from glob import glob import os import warnings from ...utils.filemanip import fname_presuffix from ..base import (CommandLine, traits, CommandLineInputSpec, isdefined) warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class Info(object): """Handle fsl output type and version information. version refers to the version of fsl on the system output type refers to the type of file fsl defaults to writing eg, NIFTI, NIFTI_GZ """ ftypes = {'NIFTI': '.nii', 'NIFTI_PAIR': '.img', 'NIFTI_GZ': '.nii.gz', 'NIFTI_PAIR_GZ': '.img.gz'} @staticmethod def version(): """Check for fsl version on system Parameters ---------- None Returns ------- version : str Version number as string or None if FSL not found """ # find which fsl being used....and get version from # /path/to/fsl/etc/fslversion try: basedir = os.environ['FSLDIR'] except KeyError: return None clout = CommandLine(command='cat', args='%s/etc/fslversion' % (basedir), terminal_output='allatonce').run() out = clout.runtime.stdout return out.strip('\n') @classmethod def output_type_to_ext(cls, output_type): """Get the file extension for the given output type. Parameters ---------- output_type : {'NIFTI', 'NIFTI_GZ', 'NIFTI_PAIR', 'NIFTI_PAIR_GZ'} String specifying the output type. Returns ------- extension : str The file extension for the output type. """ try: return cls.ftypes[output_type] except KeyError: msg = 'Invalid FSLOUTPUTTYPE: ', output_type raise KeyError(msg) @classmethod def output_type(cls): """Get the global FSL output file type FSLOUTPUTTYPE. This returns the value of the environment variable FSLOUTPUTTYPE. An exception is raised if it is not defined. Returns ------- fsl_ftype : string Represents the current environment setting of FSLOUTPUTTYPE """ try: return os.environ['FSLOUTPUTTYPE'] except KeyError: warnings.warn(('FSL environment variables not set. setting output ' 'type to NIFTI')) return 'NIFTI' @staticmethod def standard_image(img_name=None): '''Grab an image from the standard location. Returns a list of standard images if called without arguments. Could be made more fancy to allow for more relocatability''' try: fsldir = os.environ['FSLDIR'] except KeyError: raise Exception('FSL environment variables not set') stdpath = os.path.join(fsldir, 'data', 'standard') if img_name is None: return [filename.replace(stdpath + '/', '') for filename in glob(os.path.join(stdpath, '*nii*'))] return os.path.join(stdpath, img_name) class FSLCommandInputSpec(CommandLineInputSpec): """ Base Input Specification for all FSL Commands All command support specifying FSLOUTPUTTYPE dynamically via output_type. Example ------- fsl.ExtractRoi(tmin=42, tsize=1, output_type='NIFTI') """ output_type = traits.Enum('NIFTI', Info.ftypes.keys(), desc='FSL output type') class FSLCommand(CommandLine): """Base support for FSL commands. """ input_spec = FSLCommandInputSpec _output_type = None def __init__(self, **inputs): super(FSLCommand, self).__init__(**inputs) self.inputs.on_trait_change(self._output_update, 'output_type') if self._output_type is None: self._output_type = Info.output_type() if not isdefined(self.inputs.output_type): self.inputs.output_type = self._output_type else: self._output_update() def _output_update(self): self._output_type = self.inputs.output_type self.inputs.environ.update({'FSLOUTPUTTYPE': self.inputs.output_type}) @classmethod def set_default_output_type(cls, output_type): """Set the default output type for FSL classes. This method is used to set the default output type for all fSL subclasses. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.output_type. """ if output_type in Info.ftypes: cls._output_type = output_type else: raise AttributeError('Invalid FSL output_type: %s' % output_type) @property def version(self): return Info.version() def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None): """Generate a filename based on the given parameters. The filename will take the form: cwd/basename. If change_ext is True, it will use the extentions specified in intputs.output_type. Parameters ---------- basename : str Filename to base the new filename on. cwd : str Path to prefix to the new filename. (default is os.getcwd()) suffix : str Suffix to add to the `basename`. (defaults is '' ) change_ext : bool Flag to change the filename extension to the FSL output type. (default True) Returns ------- fname : str New filename based on given parameters. """ if basename == '': msg = 'Unable to generate filename for command %s. ' % self.cmd msg += 'basename is not set!' raise ValueError(msg) if cwd is None: cwd = os.getcwd() if ext is None: ext = Info.output_type_to_ext(self.inputs.output_type) if change_ext: if suffix: suffix = ''.join((suffix, ext)) else: suffix = ext if suffix is None: suffix = '' fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd) return fname def _overload_extension(self, value): return value + Info.output_type_to_ext(self.inputs.output_type) def check_fsl(): ver = Info.version() if ver: return 0 else: return 1 def no_fsl(): """Checks if FSL is NOT installed used with skipif to skip tests that will fail if FSL is not installed""" if Info.version() is None: return True else: return False def no_fsl_course_data(): """check if fsl_course data is present""" return not (os.path.isdir(os.path.abspath('fsl_course_data'))) nipype-0.9.2/nipype/interfaces/fsl/dti.py000066400000000000000000001257501227300005300203750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This was written to work with FSL version 4.1.4. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os import shutil import warnings from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec, Info from nipype.interfaces.base import (TraitedSpec, isdefined, File, Directory, InputMultiPath, OutputMultiPath, traits) from nipype.utils.filemanip import fname_presuffix, split_filename, copyfile warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class DTIFitInputSpec(FSLCommandInputSpec): dwi = File(exists=True, desc='diffusion weighted image data file', argstr='-k %s', position=0, mandatory=True) base_name = traits.Str("dtifit_", desc='base_name that all output files will start with', argstr='-o %s', position=1, usedefault=True) mask = File(exists=True, desc='bet binary mask file', argstr='-m %s', position=2, mandatory=True) bvecs = File(exists=True, desc='b vectors file', argstr='-r %s', position=3, mandatory=True) bvals = File(exists=True, desc='b values file', argstr='-b %s', position=4, mandatory=True) min_z = traits.Int(argstr='-z %d', desc='min z') max_z = traits.Int(argstr='-Z %d', desc='max z') min_y = traits.Int(argstr='-y %d', desc='min y') max_y = traits.Int(argstr='-Y %d', desc='max y') min_x = traits.Int(argstr='-x %d', desc='min x') max_x = traits.Int(argstr='-X %d', desc='max x') save_tensor = traits.Bool(desc='save the elements of the tensor', argstr='--save_tensor') sse = traits.Bool(desc='output sum of squared errors', argstr='--sse') cni = File(exists=True, desc='input counfound regressors', argstr='-cni %s') little_bit = traits.Bool(desc='only process small area of brain', argstr='--littlebit') class DTIFitOutputSpec(TraitedSpec): V1 = File(exists=True, desc='path/name of file with the 1st eigenvector') V2 = File(exists=True, desc='path/name of file with the 2nd eigenvector') V3 = File(exists=True, desc='path/name of file with the 3rd eigenvector') L1 = File(exists=True, desc='path/name of file with the 1st eigenvalue') L2 = File(exists=True, desc='path/name of file with the 2nd eigenvalue') L3 = File(exists=True, desc='path/name of file with the 3rd eigenvalue') MD = File(exists=True, desc='path/name of file with the mean diffusivity') FA = File(exists=True, desc='path/name of file with the fractional anisotropy') MO = File(exists=True, desc='path/name of file with the mode of anisotropy') S0 = File(exists=True, desc='path/name of file with the raw T2 signal with no ' + 'diffusion weighting') tensor = File(exists=True, desc='path/name of file with the 4D tensor volume') class DTIFit(FSLCommand): """ Use FSL dtifit command for fitting a diffusion tensor model at each voxel Example ------- >>> from nipype.interfaces import fsl >>> dti = fsl.DTIFit() >>> dti.inputs.dwi = 'diffusion.nii' >>> dti.inputs.bvecs = 'bvecs' >>> dti.inputs.bvals = 'bvals' >>> dti.inputs.base_name = 'TP' >>> dti.inputs.mask = 'mask.nii' >>> dti.cmdline 'dtifit -k diffusion.nii -o TP -m mask.nii -r bvecs -b bvals' """ _cmd = 'dtifit' input_spec = DTIFitInputSpec output_spec = DTIFitOutputSpec def _list_outputs(self): outputs = self.output_spec().get() for k in outputs.keys(): if k not in ('outputtype', 'environ', 'args'): if k != 'tensor' or (isdefined(self.inputs.save_tensor) and self.inputs.save_tensor): outputs[k] = self._gen_fname(self.inputs.base_name, suffix='_' + k) return outputs class BEDPOSTXInputSpec(FSLCommandInputSpec): dwi = File(exists=True, desc='diffusion weighted image data file', mandatory=True) mask = File(exists=True, desc='bet binary mask file', mandatory=True) bvecs = File(exists=True, desc='b vectors file', mandatory=True) bvals = File(exists=True, desc='b values file', mandatory=True) bpx_directory = Directory('bedpostx', argstr='%s', usedefault=True, desc='the name for this subject''s bedpostx folder') fibres = traits.Int(1, argstr='-n %d', desc='number of fibres per voxel') weight = traits.Float(1.00, argstr='-w %.2f', desc='ARD weight, more weight means less' + ' secondary fibres per voxel') burn_period = traits.Int(1000, argstr='-b %d', desc='burnin period') jumps = traits.Int(1250, argstr='-j %d', desc='number of jumps') sampling = traits.Int(25, argstr='-s %d', desc='sample every') class BEDPOSTXOutputSpec(TraitedSpec): bpx_out_directory = Directory(exists=True, desc='path/name of directory with all ' + 'bedpostx output files for this subject') xfms_directory = Directory(exists=True, desc='path/name of directory with the ' + 'tranformation matrices') merged_thsamples = traits.List(File(exists=True), desc='a list of path/name of 4D volume ' + 'with samples from the distribution ' + 'on theta') merged_phsamples = traits.List(File(exists=True), desc='a list of path/name of file with ' 'samples from the distribution on phi') merged_fsamples = traits.List(File(exists=True), desc='a list of path/name of 4D volume ' + 'with samples from the distribution ' + 'on anisotropic volume fraction') mean_thsamples = traits.List(File(exists=True), desc='a list of path/name of 3D volume with mean of distribution on theta') mean_phsamples = traits.List(File(exists=True), desc='a list of path/name of 3D volume with mean of distribution on phi') mean_fsamples = traits.List(File(exists=True), desc='a list of path/name of 3D volume with mean of distribution on f anisotropy') dyads = traits.List(File(exists=True), desc='a list of path/name of mean of PDD distribution in vector form') class BEDPOSTX(FSLCommand): """ Deprecated! Please use create_bedpostx_pipeline instead Example ------- >>> from nipype.interfaces import fsl >>> bedp = fsl.BEDPOSTX(bpx_directory='subjdir', bvecs='bvecs', bvals='bvals', dwi='diffusion.nii', \ mask='mask.nii', fibres=1) >>> bedp.cmdline 'bedpostx subjdir -n 1' """ _cmd = 'bedpostx' input_spec = BEDPOSTXInputSpec output_spec = BEDPOSTXOutputSpec _can_resume = True def __init__(self, **inputs): warnings.warn("Deprecated: Please use create_bedpostx_pipeline instead", DeprecationWarning) return super(BEDPOSTX, self).__init__(**inputs) def _run_interface(self, runtime): #create the subject specific bpx_directory bpx_directory = os.path.join(os.getcwd(), self.inputs.bpx_directory) self.inputs.bpx_directory = bpx_directory if not os.path.exists(bpx_directory): os.makedirs(bpx_directory) _, _, ext = split_filename(self.inputs.mask) shutil.copyfile(self.inputs.mask, os.path.join(self.inputs.bpx_directory, 'nodif_brain_mask' + ext)) _, _, ext = split_filename(self.inputs.dwi) shutil.copyfile(self.inputs.dwi, os.path.join(self.inputs.bpx_directory, 'data' + ext)) shutil.copyfile(self.inputs.bvals, os.path.join(self.inputs.bpx_directory, 'bvals')) shutil.copyfile(self.inputs.bvecs, os.path.join(self.inputs.bpx_directory, 'bvecs')) runtime = super(BEDPOSTX, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs['bpx_out_directory'] = os.path.join(os.getcwd(), self.inputs.bpx_directory + '.bedpostX') outputs['xfms_directory'] = os.path.join(os.getcwd(), self.inputs.bpx_directory + '.bedpostX', 'xfms') for k in outputs.keys(): if k not in ('outputtype', 'environ', 'args', 'bpx_out_directory', 'xfms_directory'): outputs[k] = [] for n in range(self.inputs.fibres): outputs['merged_thsamples'].append(self._gen_fname('merged_th' + repr(n + 1) + 'samples', suffix='', cwd=outputs['bpx_out_directory'])) outputs['merged_phsamples'].append(self._gen_fname('merged_ph' + repr(n + 1) + 'samples', suffix='', cwd=outputs['bpx_out_directory'])) outputs['merged_fsamples'].append(self._gen_fname('merged_f' + repr(n + 1) + 'samples', suffix='', cwd=outputs['bpx_out_directory'])) outputs['mean_thsamples'].append(self._gen_fname('mean_th' + repr(n + 1) + 'samples', suffix='', cwd=outputs['bpx_out_directory'])) outputs['mean_phsamples'].append(self._gen_fname('mean_ph' + repr(n + 1) + 'samples', suffix='', cwd=outputs['bpx_out_directory'])) outputs['mean_fsamples'].append(self._gen_fname('mean_f' + repr(n + 1) + 'samples', suffix='', cwd=outputs['bpx_out_directory'])) outputs['dyads'].append(self._gen_fname('dyads' + repr(n + 1), suffix='', cwd=outputs['bpx_out_directory'])) return outputs class ProbTrackXInputSpec(FSLCommandInputSpec): thsamples = InputMultiPath(File(exists=True), mandatory=True) phsamples = InputMultiPath(File(exists=True), mandatory=True) fsamples = InputMultiPath(File(exists=True), mandatory=True) samples_base_name = traits.Str("merged", desc='the rootname/base_name for samples files', argstr='--samples=%s', usedefault=True) mask = File(exists=True, desc='bet binary mask file in diffusion space', argstr='-m %s', mandatory=True) seed = traits.Either(File(exists=True), traits.List(File(exists=True)), traits.List(traits.List(traits.Int(), minlen=3, maxlen=3)), desc='seed volume(s), or voxel(s)' + 'or freesurfer label file', argstr='--seed=%s', mandatory=True) mode = traits.Enum("simple", "two_mask_symm", "seedmask", desc='options: simple (single seed voxel), seedmask (mask of seed voxels), ' + 'twomask_symm (two bet binary masks) ', argstr='--mode=%s', genfile=True) target_masks = InputMultiPath(File(exits=True), desc='list of target masks - ' + 'required for seeds_to_targets classification', argstr='--targetmasks=%s') mask2 = File(exists=True, desc='second bet binary mask (in diffusion space) in twomask_symm mode', argstr='--mask2=%s') waypoints = File(exists=True, desc='waypoint mask or ascii list of waypoint masks - ' + 'only keep paths going through ALL the masks', argstr='--waypoints=%s') network = traits.Bool(desc='activate network mode - only keep paths going through ' + 'at least one seed mask (required if multiple seed masks)', argstr='--network') mesh = File(exists=True, desc='Freesurfer-type surface descriptor (in ascii format)', argstr='--mesh=%s') seed_ref = File(exists=True, desc='reference vol to define seed space in ' + 'simple mode - diffusion space assumed if absent', argstr='--seedref=%s') out_dir = Directory(exists=True, argstr='--dir=%s', desc='directory to put the final volumes in', genfile=True) force_dir = traits.Bool(True, desc='use the actual directory name given - i.e. ' + 'do not add + to make a new directory', argstr='--forcedir', usedefault=True) opd = traits.Bool(True, desc='outputs path distributions', argstr='--opd', usedefault=True) correct_path_distribution = traits.Bool(desc='correct path distribution for the length of the pathways', argstr='--pd') os2t = traits.Bool(desc='Outputs seeds to targets', argstr='--os2t') #paths_file = File('nipype_fdtpaths', usedefault=True, argstr='--out=%s', # desc='produces an output file (default is fdt_paths)') avoid_mp = File(exists=True, desc='reject pathways passing through locations given by this mask', argstr='--avoid=%s') stop_mask = File(exists=True, argstr='--stop=%s', desc='stop tracking at locations given by this mask file') xfm = File(exists=True, argstr='--xfm=%s', desc='transformation matrix taking seed space to DTI space ' + '(either FLIRT matrix or FNIRT warp_field) - default is identity') inv_xfm = File(argstr='--invxfm=%s', desc='transformation matrix taking DTI space to seed' + ' space (compulsory when using a warp_field for seeds_to_dti)') n_samples = traits.Int(5000, argstr='--nsamples=%d', desc='number of samples - default=5000', usedefault=True) n_steps = traits.Int(argstr='--nsteps=%d', desc='number of steps per sample - default=2000') dist_thresh = traits.Float(argstr='--distthresh=%.3f', desc='discards samples shorter than ' + 'this threshold (in mm - default=0)') c_thresh = traits.Float(argstr='--cthr=%.3f', desc='curvature threshold - default=0.2') sample_random_points = traits.Bool(argstr='--sampvox', desc='sample random points within seed voxels') step_length = traits.Float(argstr='--steplength=%.3f', desc='step_length in mm - default=0.5') loop_check = traits.Bool(argstr='--loopcheck', desc='perform loop_checks on paths -' + ' slower, but allows lower curvature threshold') use_anisotropy = traits.Bool(argstr='--usef', desc='use anisotropy to constrain tracking') rand_fib = traits.Enum(0, 1, 2, 3, argstr='--randfib=%d', desc='options: 0 - default, 1 - to randomly sample' + ' initial fibres (with f > fibthresh), 2 - to sample in ' + 'proportion fibres (with f>fibthresh) to f, 3 - to sample ALL ' + 'populations at random (even if f>> from nipype.interfaces import fsl >>> pbx = fsl.ProbTrackX(samples_base_name='merged', mask='mask.nii', \ seed='MASK_average_thal_right.nii', mode='seedmask', \ xfm='trans.mat', n_samples=3, n_steps=10, force_dir=True, opd=True, os2t=True, \ target_masks = ['targets_MASK1.nii', 'targets_MASK2.nii'], \ thsamples='merged_thsamples.nii', fsamples='merged_fsamples.nii', phsamples='merged_phsamples.nii', \ out_dir='.') >>> pbx.cmdline 'probtrackx --forcedir -m mask.nii --mode=seedmask --nsamples=3 --nsteps=10 --opd --os2t --dir=. --samples=merged --seed=MASK_average_thal_right.nii --targetmasks=targets.txt --xfm=trans.mat' """ _cmd = 'probtrackx' input_spec = ProbTrackXInputSpec output_spec = ProbTrackXOutputSpec def __init__(self, **inputs): warnings.warn("Deprecated: Please use create_bedpostx_pipeline instead", DeprecationWarning) return super(ProbTrackX, self).__init__(**inputs) def _run_interface(self, runtime): for i in range(1, len(self.inputs.thsamples) + 1): _, _, ext = split_filename(self.inputs.thsamples[i - 1]) copyfile(self.inputs.thsamples[i - 1], self.inputs.samples_base_name + "_th%dsamples" % i + ext, copy=False) _, _, ext = split_filename(self.inputs.thsamples[i - 1]) copyfile(self.inputs.phsamples[i - 1], self.inputs.samples_base_name + "_ph%dsamples" % i + ext, copy=False) _, _, ext = split_filename(self.inputs.thsamples[i - 1]) copyfile(self.inputs.fsamples[i - 1], self.inputs.samples_base_name + "_f%dsamples" % i + ext, copy=False) if isdefined(self.inputs.target_masks): f = open("targets.txt", "w") for target in self.inputs.target_masks: f.write("%s\n" % target) f.close() if isinstance(self.inputs.seed, list): f = open("seeds.txt", "w") for seed in self.inputs.seed: if isinstance(seed, list): f.write("%s\n" % (" ".join([str(s) for s in seed]))) else: f.write("%s\n" % seed) f.close() runtime = super(ProbTrackX, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _format_arg(self, name, spec, value): if name == 'target_masks' and isdefined(value): fname = "targets.txt" return super(ProbTrackX, self)._format_arg(name, spec, [fname]) elif name == 'seed' and isinstance(value, list): fname = "seeds.txt" return super(ProbTrackX, self)._format_arg(name, spec, fname) else: return super(ProbTrackX, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_dir): out_dir = self._gen_filename("out_dir") else: out_dir = self.inputs.out_dir outputs['log'] = os.path.abspath(os.path.join(out_dir, 'probtrackx.log')) #utputs['way_total'] = os.path.abspath(os.path.join(out_dir, 'waytotal')) if isdefined(self.inputs.opd == True): if isinstance(self.inputs.seed, list) and isinstance(self.inputs.seed[0], list): outputs['fdt_paths'] = [] for seed in self.inputs.seed: outputs['fdt_paths'].append( os.path.abspath( self._gen_fname("fdt_paths_%s" % ("_".join([str(s) for s in seed])), cwd=out_dir, suffix=''))) else: outputs['fdt_paths'] = os.path.abspath(self._gen_fname("fdt_paths", cwd=out_dir, suffix='')) # handle seeds-to-target output files if isdefined(self.inputs.target_masks): outputs['targets'] = [] for target in self.inputs.target_masks: outputs['targets'].append(os.path.abspath( self._gen_fname('seeds_to_' + os.path.split(target)[1], cwd=out_dir, suffix=''))) if isdefined(self.inputs.verbose) and self.inputs.verbose == 2: outputs['particle_files'] = [os.path.abspath( os.path.join(out_dir, 'particle%d' % i)) for i in range(self.inputs.n_samples)] return outputs def _gen_filename(self, name): if name == "out_dir": return os.getcwd() elif name == "mode": if isinstance(self.inputs.seed, list) and isinstance(self.inputs.seed[0], list): return "simple" else: return "seedmask" class VecRegInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='-i %s', desc='filename for input vector or tensor field', mandatory=True) out_file = File(argstr='-o %s', desc='filename for output registered vector or tensor field', genfile=True, hash_files=False) ref_vol = File(exists=True, argstr='-r %s', desc='filename for reference (target) volume', mandatory=True) affine_mat = File(exists=True, argstr='-t %s', desc='filename for affine transformation matrix') warp_field = File(exists=True, argstr='-w %s', desc='filename for 4D warp field for nonlinear registration') rotation_mat = File(exists=True, argstr='--rotmat=%s', desc='filename for secondary affine matrix' + 'if set, this will be used for the rotation of the vector/tensor field') rotation_warp = File(exists=True, argstr='--rotwarp=%s', desc='filename for secondary warp field' + 'if set, this will be used for the rotation of the vector/tensor field') interpolation = traits.Enum("nearestneighbour", "trilinear", "sinc", "spline", argstr='--interp=%s', desc='interpolation method : ' + 'nearestneighbour, trilinear (default), sinc or spline') mask = File(exists=True, argstr='-m %s', desc='brain mask in input space') ref_mask = File(exists=True, argstr='--refmask=%s', desc='brain mask in output space ' + '(useful for speed up of nonlinear reg)') class VecRegOutputSpec(TraitedSpec): out_file = File(exists=True, desc='path/name of filename for the registered vector or tensor field') class VecReg(FSLCommand): """Use FSL vecreg for registering vector data For complete details, see the FDT Documentation Example ------- >>> from nipype.interfaces import fsl >>> vreg = fsl.VecReg(in_file='diffusion.nii', \ affine_mat='trans.mat', \ ref_vol='mni.nii', \ out_file='diffusion_vreg.nii') >>> vreg.cmdline 'vecreg -t trans.mat -i diffusion.nii -o diffusion_vreg.nii -r mni.nii' """ _cmd = 'vecreg' input_spec = VecRegInputSpec output_spec = VecRegOutputSpec def _run_interface(self, runtime): if not isdefined(self.inputs.out_file): pth, base_name = os.path.split(self.inputs.in_file) self.inputs.out_file = self._gen_fname(base_name, cwd=os.path.abspath(pth), suffix='_vreg') return super(VecReg, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']) and isdefined(self.inputs.in_file): pth, base_name = os.path.split(self.inputs.in_file) outputs['out_file'] = self._gen_fname(base_name, cwd=os.path.abspath(pth), suffix='_vreg') outputs['out_file'] = os.path.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name is 'out_file': return self._list_outputs()[name] else: return None class ProjThreshInputSpec(FSLCommandInputSpec): in_files = traits.List(File(exists=True), argstr='%s', desc='a list of input volumes', mandatory=True, position=0) threshold = traits.Int(argstr='%d', desc='threshold indicating minimum ' + 'number of seed voxels entering this mask region', mandatory=True, position=1) class ProjThreshOuputSpec(TraitedSpec): out_files = traits.List(File(exists=True), desc='path/name of output volume after thresholding') class ProjThresh(FSLCommand): """Use FSL proj_thresh for thresholding some outputs of probtrack For complete details, see the FDT Documentation Example ------- >>> from nipype.interfaces import fsl >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] >>> pThresh = fsl.ProjThresh(in_files=ldir, threshold=3) >>> pThresh.cmdline 'proj_thresh seeds_to_M1.nii seeds_to_M2.nii 3' """ _cmd = 'proj_thresh' input_spec = ProjThreshInputSpec output_spec = ProjThreshOuputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_files'] = [] for name in self.inputs.in_files: cwd, base_name = os.path.split(name) outputs['out_files'].append(self._gen_fname(base_name, cwd=cwd, suffix='_proj_seg_thr_' + repr(self.inputs.threshold))) return outputs class FindTheBiggestInputSpec(FSLCommandInputSpec): in_files = traits.List(File(exists=True), argstr='%s', desc='a list of input volumes or a singleMatrixFile', position=0, mandatory=True) out_file = File(argstr='%s', desc='file with the resulting segmentation', position=2, genfile=True, hash_files=False) class FindTheBiggestOutputSpec(TraitedSpec): out_file = File(exists=True, argstr='%s', desc='output file indexed in order of input files') class FindTheBiggest(FSLCommand): """ Use FSL find_the_biggest for performing hard segmentation on the outputs of connectivity-based thresholding in probtrack. For complete details, see the `FDT Documentation. `_ Example ------- >>> from nipype.interfaces import fsl >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] >>> fBig = fsl.FindTheBiggest(in_files=ldir, out_file='biggestSegmentation') >>> fBig.cmdline 'find_the_biggest seeds_to_M1.nii seeds_to_M2.nii biggestSegmentation' """ _cmd = 'find_the_biggest' input_spec = FindTheBiggestInputSpec output_spec = FindTheBiggestOutputSpec def _run_interface(self, runtime): if not isdefined(self.inputs.out_file): self.inputs.out_file = self._gen_fname('biggestSegmentation', suffix='') return super(FindTheBiggest, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']): outputs['out_file'] = self._gen_fname('biggestSegmentation', suffix='') outputs['out_file'] = os.path.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name is 'out_file': return self._list_outputs()[name] else: return None class TractSkeletonInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, argstr="-i %s", desc="input image (typcially mean FA volume)") _proj_inputs = ["threshold", "distance_map", "data_file"] project_data = traits.Bool(argstr="-p %.3f %s %s %s %s", requires=_proj_inputs, desc="project data onto skeleton") threshold = traits.Float(desc="skeleton threshold value") distance_map = File(exists=True, desc="distance map image") search_mask_file = File(exists=True, xor=["use_cingulum_mask"], desc="mask in which to use alternate search rule") use_cingulum_mask = traits.Bool(True, usedefault=True, xor=["search_mask_file"], desc="perform alternate search using built-in cingulum mask") data_file = File(exists=True, desc="4D data to project onto skeleton (usually FA)") alt_data_file = File(exists=True, argstr="-a %s", desc="4D non-FA data to project onto skeleton") alt_skeleton = File(exists=True, argstr="-s %s", desc="alternate skeleton to use") projected_data = File(desc="input data projected onto skeleton") skeleton_file = traits.Either(traits.Bool, File, argstr="-o %s", desc="write out skeleton image") class TractSkeletonOutputSpec(TraitedSpec): projected_data = File(desc="input data projected onto skeleton") skeleton_file = File(desc="tract skeleton image") class TractSkeleton(FSLCommand): """Use FSL's tbss_skeleton to skeletonise an FA image or project arbitrary values onto a skeleton. There are two ways to use this interface. To create a skeleton from an FA image, just supply the ``in_file`` and set ``skeleton_file`` to True (or specify a skeleton filename. To project values onto a skeleton, you must set ``project_data`` to True, and then also supply values for ``threshold``, ``distance_map``, and ``data_file``. The ``search_mask_file`` and ``use_cingulum_mask`` inputs are also used in data projection, but ``use_cingulum_mask`` is set to True by default. This mask controls where the projection algorithm searches within a circular space around a tract, rather than in a single perpindicular direction. Example ------- >>> import nipype.interfaces.fsl as fsl >>> skeletor = fsl.TractSkeleton() >>> skeletor.inputs.in_file = "all_FA.nii.gz" >>> skeletor.inputs.skeleton_file = True >>> skeletor.run() # doctest: +SKIP """ _cmd = "tbss_skeleton" input_spec = TractSkeletonInputSpec output_spec = TractSkeletonOutputSpec def _format_arg(self, name, spec, value): if name == "project_data": if isdefined(value) and value: _si = self.inputs if isdefined(_si.use_cingulum_mask) and _si.use_cingulum_mask: mask_file = Info.standard_image("LowerCingulum_1mm.nii.gz") else: mask_file = _si.search_mask_file if not isdefined(_si.projected_data): proj_file = self._list_outputs()["projected_data"] else: proj_file = _si.projected_data return spec.argstr % (_si.threshold, _si.distance_map, mask_file, _si.data_file, proj_file) elif name == "skeleton_file": if isinstance(value, bool): return spec.argstr % self._list_outputs()["skeleton_file"] else: return spec.argstr % value return super(TractSkeleton, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() _si = self.inputs if isdefined(_si.project_data) and _si.project_data: proj_data = _si.projected_data outputs["projected_data"] = proj_data if not isdefined(proj_data): stem = _si.data_file if isdefined(_si.alt_data_file): stem = _si.alt_data_file outputs["projected_data"] = fname_presuffix(stem, suffix="_skeletonised", newpath=os.getcwd(), use_ext=True) if isdefined(_si.skeleton_file) and _si.skeleton_file: outputs["skeleton_file"] = _si.skeleton_file if isinstance(_si.skeleton_file, bool): outputs["skeleton_file"] = fname_presuffix(_si.in_file, suffix="_skeleton", newpath=os.getcwd(), use_ext=True) return outputs class DistanceMapInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, argstr="--in=%s", desc="image to calculate distance values for") mask_file = File(exists=True, argstr="--mask=%s", desc="binary mask to contrain calculations") invert_input = traits.Bool(argstr="--invert", desc="invert input image") local_max_file = traits.Either(traits.Bool, File, argstr="--localmax=%s", desc="write an image of the local maxima", hash_files=False) distance_map = File(genfile=True, argstr="--out=%s", desc="distance map to write", hash_files=False) class DistanceMapOutputSpec(TraitedSpec): distance_map = File(exists=True, desc="value is distance to nearest nonzero voxels") local_max_file = File(desc="image of local maxima") class DistanceMap(FSLCommand): """Use FSL's distancemap to generate a map of the distance to the nearest nonzero voxel. Example ------- >>> import nipype.interfaces.fsl as fsl >>> mapper = fsl.DistanceMap() >>> mapper.inputs.in_file = "skeleton_mask.nii.gz" >>> mapper.run() # doctest: +SKIP """ _cmd = "distancemap" input_spec = DistanceMapInputSpec output_spec = DistanceMapOutputSpec def _format_arg(self, name, spec, value): if name == "local_max_file": if isinstance(value, bool): return spec.argstr % self._list_outputs()["local_max_file"] return super(DistanceMap, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() _si = self.inputs outputs["distance_map"] = _si.distance_map if not isdefined(_si.distance_map): outputs["distance_map"] = fname_presuffix(_si.in_file, suffix="_dstmap", use_ext=True, newpath=os.getcwd()) outputs["distance_map"] = os.path.abspath(outputs["distance_map"]) if isdefined(_si.local_max_file): outputs["local_max_file"] = _si.local_max_file if isinstance(_si.local_max_file, bool): outputs["local_max_file"] = fname_presuffix(_si.in_file, suffix="_lclmax", use_ext=True, newpath=os.getcwd()) outputs["local_max_file"] = os.path.abspath(outputs["local_max_file"]) return outputs def _gen_filename(self, name): if name == "distance_map": return self._list_outputs()["distance_map"] return None class XFibresInputSpec(FSLCommandInputSpec): dwi = File(exists=True, argstr="--data=%s", mandatory=True) mask = File(exists=True, argstr="--mask=%s", mandatory=True) gradnonlin = File(exists=True, argstr="--gradnonlin=%s") bvecs = File(exists=True, argstr="--bvecs=%s", mandatory=True) bvals = File(exists=True, argstr="--bvals=%s", mandatory=True) logdir = Directory("logdir", argstr="--logdir=%s", usedefault=True) n_fibres = traits.Range(low=1, argstr="--nfibres=%d", desc="Maximum nukmber of fibres to fit in each voxel") fudge = traits.Int(argstr="--fudge=%d", desc="ARD fudge factor") n_jumps = traits.Range(low=1, argstr="--njumps=%d", desc="Num of jumps to be made by MCMC") burn_in = traits.Range(low=0, argstr="--burnin=%d", desc="Total num of jumps at start of MCMC to be discarded") burn_in_no_ard = traits.Range(low=0, argstr="--burninnoard=%d", desc="num of burnin jumps before the ard is imposed") sample_every = traits.Range(low=0, argstr="--sampleevery=%d", desc="Num of jumps for each sample (MCMC)") update_proposal_every = traits.Range(low=1, argstr="--updateproposalevery=%d", desc="Num of jumps for each update to the proposal density std (MCMC)") seed = traits.Int(argstr="--seed=%d", desc="seed for pseudo random number generator") model = traits.Int(argstr="--model=%d", desc="Which model to use. \ 1=mono-exponential (default and required for single shell). 2=continous \ exponential (for multi-shell experiments)") _xor_inputs1 = ('no_ard', 'all_ard') no_ard = traits.Bool(argstr="--noard", desc="Turn ARD off on all fibres", xor=_xor_inputs1) all_ard = traits.Bool(argstr="--allard", desc="Turn ARD on on all fibres", xor=_xor_inputs1) _xor_inputs2 = ('no_spat', 'non_linear') no_spat = traits.Bool(argstr="--nospat", desc="Initialise with tensor, not spatially", xor=_xor_inputs2) non_linear = traits.Bool(argstr="--nonlinear", desc="Initialise with nonlinear fitting", xor=_xor_inputs2) force_dir = traits.Bool(True, desc='use the actual directory name given - i.e. ' + 'do not add + to make a new directory', argstr='--forcedir', usedefault=True) class XFibresOutputSpec(TraitedSpec): dyads = OutputMultiPath(File(exists=True), desc="Mean of PDD distribution in vector form.") fsamples = OutputMultiPath(File(exists=True), desc="Samples from the distribution on anisotropic volume fraction") mean_dsamples = File(exists=True, desc="Mean of distribution on diffusivity d") mean_fsamples = OutputMultiPath(File(exists=True), desc="Mean of distribution on f anisotropy") mean_S0samples = File(exists=True, desc="Samples from S0 distribution") phsamples = OutputMultiPath(File(exists=True), desc="Samples from the distribution on phi") thsamples = OutputMultiPath(File(exists=True), desc="Samples from the distribution on theta") class XFibres(FSLCommand): """Perform model parameters estimation for local (voxelwise) diffusion parameters """ _cmd = "xfibres" input_spec = XFibresInputSpec output_spec = XFibresOutputSpec def _run_interface(self, runtime): runtime = super(XFibres, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs["mean_dsamples"] = self._gen_fname("mean_dsamples", cwd=self.inputs.logdir) outputs["mean_S0samples"] = self._gen_fname("mean_S0samples", cwd=self.inputs.logdir) outputs["dyads"] = [] outputs["fsamples"] = [] outputs["mean_fsamples"] = [] outputs["phsamples"] = [] outputs["thsamples"] = [] for i in range(1, self.inputs.n_fibres + 1): outputs["dyads"].append(self._gen_fname("dyads%d" % i, cwd=self.inputs.logdir)) outputs["fsamples"].append(self._gen_fname("f%dsamples" % i, cwd=self.inputs.logdir)) outputs["mean_fsamples"].append(self._gen_fname("mean_f%dsamples" % i, cwd=self.inputs.logdir)) outputs["phsamples"].append(self._gen_fname("ph%dsamples" % i, cwd=self.inputs.logdir)) outputs["thsamples"].append(self._gen_fname("th%dsamples" % i, cwd=self.inputs.logdir)) return outputs class MakeDyadicVectorsInputSpec(FSLCommandInputSpec): theta_vol = File(exists=True, mandatory=True, position=0, argstr="%s") phi_vol = File(exists=True, mandatory=True, position=1, argstr="%s") mask = File(exists=True, position=2, argstr="%s") output = File("dyads", position=3, usedefault=True, argstr="%s", hash_files=False) perc = traits.Float(desc="the {perc}% angle of the output cone of \ uncertainty (output will be in degrees)", position=4, argstr="%f") class MakeDyadicVectorsOutputSpec(TraitedSpec): dyads = File(exists=True) dispersion = File(exists=True) class MakeDyadicVectors(FSLCommand): """Create vector volume representing mean principal diffusion direction and its uncertainty (dispersion)""" _cmd = "make_dyadic_vectors" input_spec = MakeDyadicVectorsInputSpec output_spec = MakeDyadicVectorsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["dyads"] = self._gen_fname(self.inputs.output) outputs["dispersion"] = self._gen_fname(self.inputs.output, suffix="_dispersion") return outputs nipype-0.9.2/nipype/interfaces/fsl/epi.py000066400000000000000000000625111227300005300203650ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This was written to work with FSL version 5.0.4. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from glob import glob import warnings import numpy as np import nibabel as nib from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec, Info from nipype.interfaces.base import (traits, TraitedSpec, InputMultiPath, File, isdefined, Undefined ) from nipype.utils.filemanip import load_json, save_json, split_filename, fname_presuffix warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class PrepareFieldmapInputSpec(FSLCommandInputSpec): scanner = traits.String('SIEMENS', argstr='%s', position=1, desc='must be SIEMENS', usedefault=True) in_phase = File( exists=True, argstr='%s', position=2, mandatory=True, desc='Phase difference map, in SIEMENS format range from 0-4096 or 0-8192 )' ) in_magnitude = File(exists=True, argstr='%s', position=3, mandatory=True, desc='Magnitude difference map, brain extracted') delta_TE = traits.Float(2.46, usedefault=True, mandatory=True, argstr='%f', position=-2, desc='echo time difference of the fielmap sequence in ms. (usually 2.46ms in Siemens)') nocheck = traits.Bool(False, position=-1, argstr='--nocheck',usedefault=True, desc='do not perform sanity checks for image size/range/dimensions') out_fieldmap = File( argstr='%s', position=5, desc='output name for prepared fieldmap' ) class PrepareFieldmapOutputSpec( TraitedSpec ): out_fieldmap = File( exists=True, desc='output name for prepared fieldmap' ) class PrepareFieldmap(FSLCommand): """ Interface for the fsl_prepare_fieldmap script (FSL 5.0) Prepares a fieldmap suitable for FEAT from SIEMENS data - saves output in rad/s format e.g. fsl_prepare_fieldmap SIEMENS images_3_gre_field_mapping images_4_gre_field_mapping fmap_rads 2.65 Examples -------- >>> from nipype.interfaces.fsl import PrepareFieldmap >>> prepare = PrepareFieldmap() >>> prepare.inputs.in_phase = "phase.nii" >>> prepare.inputs.in_magnitude = "magnitude.nii" >>> prepare.inputs.output_type = "NIFTI_GZ" >>> prepare.cmdline #doctest: +ELLIPSIS 'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii .../phase_fslprepared.nii.gz 2.460000' >>> res = prepare.run() # doctest: +SKIP """ _cmd = 'fsl_prepare_fieldmap' input_spec = PrepareFieldmapInputSpec output_spec = PrepareFieldmapOutputSpec def _parse_inputs( self, skip=None ): if skip is None: skip = [] if not isdefined(self.inputs.out_fieldmap ): self.inputs.out_fieldmap = self._gen_fname( self.inputs.in_phase, suffix='_fslprepared' ) if not isdefined(self.inputs.nocheck ) or not self.inputs.nocheck: skip += ['nocheck'] return super(PrepareFieldmap, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_fieldmap'] = self.inputs.out_fieldmap return outputs def _run_interface( self, runtime ): runtime = super( PrepareFieldmap, self )._run_interface(runtime) if runtime.returncode == 0: out_file = self.inputs.out_fieldmap im = nib.load( out_file ) dumb_img = nib.Nifti1Image(np.zeros( im.get_shape()), im.get_affine(), im.get_header()) out_nii = nib.funcs.concat_images((im, dumb_img)) nib.save( out_nii, out_file ) return runtime class TOPUPInputSpec( FSLCommandInputSpec ): in_file = File( exists=True, mandatory=True, desc='name of 4D file with images', argstr='--imain=%s' ) encoding_file = File( exists=True, desc='name of text file with PE directions/times', argstr='--datain=%s' ) encoding_direction = traits.Enum( 'y','x','z','x-','y-','z-', desc='encoding direction for automatic generation of encoding_file' ) readout_times = traits.List(traits.Float, desc='readout times (dwell times by # phase-encode steps minus 1)' ) out_base = File( desc='base-name of output files (spline coefficients (Hz) and movement parameters)', argstr='--out=%s' ) out_field = File( argstr='--fout=%s', desc='name of image file with field (Hz)' ) out_corrected = File( argstr='--iout=%s', desc='name of 4D image file with unwarped images' ) out_logfile = File( argstr='--logout=%s', desc='name of log-file' ) warp_res = traits.Float( 10.0, argstr='--warpres=%f', desc='(approximate) resolution (in mm) of warp basis for the different sub-sampling levels' ) subsamp = traits.Int( 1, argstr='--subsamp=%d', desc='sub-sampling scheme, default 1' ) fwhm = traits.Float( 8.0, argstr='--fwhm=%f', desc='FWHM (in mm) of gaussian smoothing kernel' ) config = traits.String('b02b0.cnf', desc='Name of config file specifying command line arguments', argstr='--config=%s', usedefault=True ) max_iter = traits.Int( 5, argstr='--miter=%d', desc='max # of non-linear iterations') # @oesteban: I don't know how to implement these 3 parameters, AFAIK there's no documentation. #lambda Weight of regularisation, default depending on --ssqlambda and --regmod switches. See user documetation. #ssqlambda If set (=1), lambda is weighted by current ssq, default 1 #regmod Model for regularisation of warp-field [membrane_energy bending_energy], default bending_energy estmov = traits.Enum( 1, 0, desc='estimate movements if set', argstr='--estmov=%d' ) minmet = traits.Enum( 0, 1, desc='Minimisation method 0=Levenberg-Marquardt, 1=Scaled Conjugate Gradient', argstr='--minmet=%d' ) splineorder = traits.Int( 3, argstr='--splineorder=%d', desc='order of spline, 2->Qadratic spline, 3->Cubic spline' ) numprec = traits.Enum( 'double', 'float', argstr='--numprec=%s', desc='Precision for representing Hessian, double or float.' ) interp = traits.Enum( 'spline', 'linear' , argstr='--interp=%s', desc='Image interpolation model, linear or spline.' ) scale = traits.Enum( 0, 1, argstr='--scale=%d', desc='If set (=1), the images are individually scaled to a common mean' ) regrid = traits.Enum( 1, 0, argstr='--regrid=%d', desc='If set (=1), the calculations are done in a different grid' ) class TOPUPOutputSpec( TraitedSpec ): out_fieldcoef = File( exists=True, desc='file containing the field coefficients' ) out_movpar = File( exists=True, desc='movpar.txt output file' ) out_enc_file= File( desc='encoding directions file output for applytopup' ) out_topup = File( desc='basename for the _fieldcoef.nii.gz and _movpar.txt files' ) out_field = File( desc='name of image file with field (Hz)' ) out_corrected = File( desc='name of 4D image file with unwarped images' ) out_logfile = File( desc='name of log-file' ) class TOPUP( FSLCommand ): """ Interface for FSL topup, a tool for estimating and correcting susceptibility induced distortions Reference: http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/TOPUP Example: http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/ExampleTopupFollowedByApplytopup topup --imain= --datain= --config= --coutname=my_field Examples -------- >>> from nipype.interfaces.fsl import TOPUP >>> topup = TOPUP() >>> topup.inputs.in_file = "b0_b0rev.nii" >>> topup.inputs.encoding_file = "topup_encoding.txt" >>> topup.cmdline #doctest: +ELLIPSIS 'topup --config=b02b0.cnf --datain=topup_encoding.txt --imain=b0_b0rev.nii --out=.../nipypetu' >>> res = topup.run() # doctest: +SKIP """ _cmd = 'topup' input_spec = TOPUPInputSpec output_spec = TOPUPOutputSpec def _parse_inputs( self, skip=None ): if skip is None: skip = [] if not isdefined(self.inputs.out_base ): self.inputs.out_base = './nipypetu' self.inputs.out_base = os.path.abspath(self.inputs.out_base) if isdefined( self.inputs.encoding_file ): skip.append( 'encoding_direction' ) skip.append( 'readout_times' ) else: encdir = 'y' enctimes = None if isdefined( self.inputs.encoding_direction ): encdir = self.inputs.encoding_direction if isdefined( self.inputs.readout_times ): enctimes = self.inputs.readout_times self.inputs.encoding_file = self._generate_encfile( encdir, enctimes ) return super(TOPUP, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_topup'] = self.inputs.out_base outputs['out_fieldcoef'] = '%s_%s.nii.gz' % (self.inputs.out_base, 'fieldcoef' ) outputs['out_movpar'] = '%s_%s.txt' % (self.inputs.out_base, 'movpar' ) outputs['out_enc_file'] = self.inputs.encoding_file if isdefined( self.inputs.out_field ): outputs['out_field'] = self.inputs.out_field else: outputs['out_field'] = Undefined if isdefined( self.inputs.out_corrected ): outputs['out_corrected'] = self.inputs.out_corrected else: outputs['out_corrected'] = Undefined if isdefined( self.inputs.out_logfile ): outputs['out_logfile'] = self.inputs.out_logfile else: outputs['out_logfile'] = Undefined return outputs def _generate_encfile( self, encdir, enctime=None ): out_file = '%s_encfile.txt' % self.inputs.out_base direction = 1.0 if len(encdir)==2 and encdir[1]=='-': direction = -1.0 if enctime is None: enctime=[ 1.0, 1.0 ] file1 = [ float(val[0]==encdir[0]) * direction for val in [ 'x', 'y', 'z' ] ] file2 = [ float(val[0]==encdir[0]) * direction * -1.0 for val in [ 'x', 'y', 'z' ] ] file1.append( enctime[0] ) file2.append( enctime[1] ) np.savetxt( out_file, np.array( [ file1, file2 ] ), fmt='%.2f' ) return out_file class ApplyTOPUPInputSpec( FSLCommandInputSpec ): in_files = InputMultiPath(File(exists=True), mandatory=True, desc='name of 4D file with images', argstr='%s' ) encoding_file = File( exists=True, mandatory=True, desc='name of text file with PE directions/times', argstr='--datain=%s' ) in_index = traits.List( argstr='%s', mandatory=True, desc='comma separated list of indicies into --datain of the input image (to be corrected)' ) in_topup = File( mandatory=True, desc='basename of field/movements (from topup)', argstr='--topup=%s' ) out_base = File( desc='basename for output (warped) image', argstr='--out=%s' ) method = traits.Enum( ('jac','lsr'), argstr='--method=%s', desc='use jacobian modulation (jac) or least-squares resampling (lsr)' ) interp = traits.Enum( ('trilinear','spline'), argstr='--interp=%s', desc='interpolation method' ) datatype = traits.Enum( ('char', 'short', 'int', 'float', 'double' ), argstr='-d=%s', desc='force output data type' ) class ApplyTOPUPOutputSpec( TraitedSpec ): out_corrected = File( exists=True, desc='name of 4D image file with unwarped images' ) class ApplyTOPUP( FSLCommand ): """ Interface for FSL topup, a tool for estimating and correcting susceptibility induced distortions. `General reference `_ and `use example `_. Examples -------- >>> from nipype.interfaces.fsl import ApplyTOPUP >>> applytopup = ApplyTOPUP() >>> applytopup.inputs.in_files = [ "epi.nii", "epi_rev.nii" ] >>> applytopup.inputs.encoding_file = "topup_encoding.txt" >>> applytopup.inputs.in_index = [ 1,2 ] >>> applytopup.inputs.in_topup = "my_topup_results" >>> applytopup.cmdline #doctest: +ELLIPSIS 'applytopup --datain=topup_encoding.txt --imain=epi.nii,epi_rev.nii --inindex=1,2 --topup=my_topup_results --out=.../nipypeatu' >>> res = applytopup.run() # doctest: +SKIP """ _cmd = 'applytopup' input_spec = ApplyTOPUPInputSpec output_spec = ApplyTOPUPOutputSpec def _format_arg(self, name, spec, value): # first do what should be done in general formated = super(ApplyTOPUP, self)._format_arg(name, spec, value) if name == 'in_files' or name == 'in_index': if name == 'in_files': formated = '--imain=' else: formated = '--inindex=' formated = formated + "%s" % value[0] for fname in value[1:]: formated = formated + ",%s" % fname return formated def _parse_inputs( self, skip=None ): if skip is None: skip = [] if not isdefined(self.inputs.out_base ): self.inputs.out_base = './nipypeatu' self.inputs.out_base = os.path.abspath(self.inputs.out_base) return super(ApplyTOPUP, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_corrected'] = '%s.nii.gz' % self.inputs.out_base return outputs class EddyInputSpec( FSLCommandInputSpec ): in_file = File(exists=True, mandatory=True, desc='File containing all the images to estimate distortions for', argstr='--imain=%s' ) in_mask = File(exists=True, mandatory=True, desc='Mask to indicate brain', argstr='--mask=%s' ) in_index = File(exists=True, mandatory=True, desc='File containing indices for all volumes in --imain into --acqp and --topup', argstr='--index=%s' ) in_acqp = File(exists=True, mandatory=True, desc='File containing acquisition parameters', argstr='--acqp=%s' ) in_bvec = File(exists=True, mandatory=True, desc='File containing the b-vectors for all volumes in --imain', argstr='--bvecs=%s' ) in_bval = File(exists=True, mandatory=True, desc='File containing the b-values for all volumes in --imain', argstr='--bvals=%s' ) out_base = File( desc='basename for output (warped) image', argstr='--out=%s' ) session = File(exists=True, desc='File containing session indices for all volumes in --imain', argstr='--session=%s' ) in_topup = File(exists=True, desc='Base name for output files from topup', argstr='--topup=%s' ) flm = traits.Enum( ('linear','quadratic','cubic'), desc='First level EC model', argstr='--flm=%s' ) fwhm = traits.Float( desc='FWHM for conditioning filter when estimating the parameters', argstr='--fwhm=%s' ) niter = traits.Int( 5, desc='Number of iterations', argstr='--niter=%s' ) method = traits.Enum( ('jac','lsr'), argstr='--resamp=%s', desc='Final resampling method (jacobian/least squeares)' ) repol = traits.Bool( False, desc='Detect and replace outlier slices', argstr='--repol' ) class EddyOutputSpec( TraitedSpec ): out_corrected = File( exists=True, desc='4D image file containing all the corrected volumes' ) out_parameter = File( exists=True, desc='text file with parameters definining the field and movement for each scan') class Eddy( FSLCommand ): """ Interface for FSL eddy, a tool for estimating and correcting eddy currents induced distortions. `User guide `_ and `more info regarding acqp file `_. Examples -------- >>> from nipype.interfaces.fsl import Eddy >>> eddy = Eddy() >>> eddy.inputs.in_file = 'epi.nii' >>> eddy.inputs.in_mask = 'epi_mask.nii' >>> eddy.inputs.in_index = 'epi_index.txt' >>> eddy.inputs.in_acqp = 'epi_acqp.txt' >>> eddy.inputs.in_bvec = 'bvecs.scheme' >>> eddy.inputs.in_bval = 'bvals.scheme' >>> eddy.cmdline #doctest: +ELLIPSIS 'eddy --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --out=.../eddy_corrected' >>> res = eddy.run() # doctest: +SKIP """ _cmd = 'eddy' input_spec = EddyInputSpec output_spec = EddyOutputSpec def _parse_inputs( self, skip=None ): if skip is None: skip = [] if not isdefined(self.inputs.out_base ): self.inputs.out_base = os.path.abspath( './eddy_corrected' ) return super(Eddy, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_corrected'] = '%s.nii.gz' % self.inputs.out_base outputs['out_parameter'] = '%s..eddy_parameters' % self.inputs.out_base return outputs class EPIDeWarpInputSpec(FSLCommandInputSpec): mag_file = File(exists=True, desc='Magnitude file', argstr='--mag %s', position=0, mandatory=True) dph_file = File(exists=True, desc='Phase file assumed to be scaled from 0 to 4095', argstr='--dph %s', mandatory=True) exf_file = File(exists=True, desc='example func volume (or use epi)', argstr='--exf %s') epi_file = File(exists=True, desc='EPI volume to unwarp', argstr='--epi %s') tediff = traits.Float(2.46, usedefault=True, desc='difference in B0 field map TEs', argstr='--tediff %s') esp = traits.Float(0.58, desc='EPI echo spacing', argstr='--esp %s', usedefault=True) sigma = traits.Int(2, usedefault=True, argstr='--sigma %s', desc="2D spatial gaussing smoothing \ stdev (default = 2mm)") vsm = traits.String(genfile=True, desc='voxel shift map', argstr='--vsm %s') exfdw = traits.String(desc='dewarped example func volume', genfile=True, argstr='--exfdw %s') epidw = traits.String(desc='dewarped epi volume', genfile=False, argstr='--epidw %s') tmpdir = traits.String(genfile=True, desc='tmpdir', argstr='--tmpdir %s') nocleanup = traits.Bool(True, usedefault=True, desc='no cleanup', argstr='--nocleanup') cleanup = traits.Bool(desc='cleanup', argstr='--cleanup') class EPIDeWarpOutputSpec(TraitedSpec): unwarped_file = File(desc="unwarped epi file") vsm_file = File(desc="voxel shift map") exfdw = File(desc="dewarped functional volume example") exf_mask = File(desc="Mask from example functional volume") class EPIDeWarp(FSLCommand): """Wraps fieldmap unwarping script from Freesurfer's epidewarp.fsl_ Examples -------- >>> from nipype.interfaces.fsl import EPIDeWarp >>> dewarp = EPIDeWarp() >>> dewarp.inputs.epi_file = "functional.nii" >>> dewarp.inputs.mag_file = "magnitude.nii" >>> dewarp.inputs.dph_file = "phase.nii" >>> dewarp.inputs.output_type = "NIFTI_GZ" >>> dewarp.cmdline #doctest: +ELLIPSIS 'epidewarp.fsl --mag magnitude.nii --dph phase.nii --epi functional.nii --esp 0.58 --exfdw .../exfdw.nii.gz --nocleanup --sigma 2 --tediff 2.46 --tmpdir .../temp --vsm .../vsm.nii.gz' >>> res = dewarp.run() # doctest: +SKIP References ---------- _epidewarp.fsl: http://surfer.nmr.mgh.harvard.edu/fswiki/epidewarp.fsl """ _cmd = 'epidewarp.fsl' input_spec = EPIDeWarpInputSpec output_spec = EPIDeWarpOutputSpec def _gen_filename(self, name): if name == 'exfdw': if isdefined(self.inputs.exf_file): return self._gen_fname(self.inputs.exf_file, suffix="_exfdw") else: return self._gen_fname("exfdw") if name == 'epidw': if isdefined(self.inputs.epi_file): return self._gen_fname(self.inputs.epi_file, suffix="_epidw") if name == 'vsm': return self._gen_fname('vsm') if name == 'tmpdir': return os.path.join(os.getcwd(), 'temp') return None def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.exfdw): outputs['exfdw'] = self._gen_filename('exfdw') else: outputs['exfdw'] = self.inputs.exfdw if isdefined(self.inputs.epi_file): if isdefined(self.inputs.epidw): outputs['unwarped_file'] = self.inputs.epidw else: outputs['unwarped_file'] = self._gen_filename('epidw') if not isdefined(self.inputs.vsm): outputs['vsm_file'] = self._gen_filename('vsm') else: outputs['vsm_file'] = self._gen_fname(self.inputs.vsm) if not isdefined(self.inputs.tmpdir): outputs[ 'exf_mask'] = self._gen_fname(cwd=self._gen_filename('tmpdir'), basename='maskexf') else: outputs['exf_mask'] = self._gen_fname(cwd=self.inputs.tmpdir, basename='maskexf') return outputs class SigLossInputSpec(FSLCommandInputSpec): in_file = File(mandatory=True, exists=True, argstr='-i %s', desc='b0 fieldmap file') out_file = File(argstr='-s %s', desc='output signal loss estimate file', genfile=True) mask_file = File(exists=True, argstr='-m %s', desc='brain mask file') echo_time = traits.Float(argstr='--te=%f', desc='echo time in seconds') slice_direction = traits.Enum('x','y','z', argstr='-d %s', desc='slicing direction') class SigLossOuputSpec(TraitedSpec): out_file = File(exists=True, desc='signal loss estimate file') class SigLoss(FSLCommand): """Estimates signal loss from a field map (in rad/s) Examples -------- >>> from nipype.interfaces.fsl import SigLoss >>> sigloss = SigLoss() >>> sigloss.inputs.in_file = "phase.nii" >>> sigloss.inputs.echo_time = 0.03 >>> sigloss.inputs.output_type = "NIFTI_GZ" >>> sigloss.cmdline #doctest: +ELLIPSIS 'sigloss --te=0.030000 -i phase.nii -s .../phase_sigloss.nii.gz' >>> res = sigloss.run() # doctest: +SKIP """ input_spec = SigLossInputSpec output_spec = SigLossOuputSpec _cmd = 'sigloss' def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']) and isdefined(self.inputs.in_file): outputs['out_file']=self._gen_fname(self.inputs.in_file, suffix='_sigloss') return outputs def _gen_filename(self, name): if name=='out_file': return self._list_outputs()['out_file'] return None class EddyCorrectInputSpec(FSLCommandInputSpec): in_file = File(exists=True, desc='4D input file', argstr='%s', position=0, mandatory=True) out_file = File(desc='4D output file', argstr='%s', position=1, genfile=True, hash_files=False) ref_num = traits.Int(argstr='%d', position=2, desc='reference number', mandatory=True) class EddyCorrectOutputSpec(TraitedSpec): eddy_corrected = File(exists=True, desc='path/name of 4D eddy corrected output file') class EddyCorrect(FSLCommand): """ Deprecated! Please use create_eddy_correct_pipeline instead Example ------- >>> from nipype.interfaces.fsl import EddyCorrect >>> eddyc = EddyCorrect(in_file='diffusion.nii', out_file="diffusion_edc.nii", ref_num=0) >>> eddyc.cmdline 'eddy_correct diffusion.nii diffusion_edc.nii 0' """ _cmd = 'eddy_correct' input_spec = EddyCorrectInputSpec output_spec = EddyCorrectOutputSpec def __init__(self, **inputs): warnings.warn("Deprecated: Please use create_eddy_correct_pipeline instead", DeprecationWarning) return super(EddyCorrect, self).__init__(**inputs) def _run_interface(self, runtime): if not isdefined(self.inputs.out_file): self.inputs.out_file = self._gen_fname(self.inputs.in_file, suffix='_edc') runtime = super(EddyCorrect, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs['eddy_corrected'] = self.inputs.out_file if not isdefined(outputs['eddy_corrected']): outputs['eddy_corrected'] = self._gen_fname(self.inputs.in_file, suffix='_edc') outputs['eddy_corrected'] = os.path.abspath(outputs['eddy_corrected']) return outputs def _gen_filename(self, name): if name is 'out_file': return self._list_outputs()['eddy_corrected'] else: return None nipype-0.9.2/nipype/interfaces/fsl/maths.py000066400000000000000000000254401227300005300207240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The maths module provides higher-level interfaces to some of the operations that can be performed with the fslmaths command-line program. """ import os import numpy as np from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath, isdefined) class MathsInput(FSLCommandInputSpec): in_file = File(position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on") out_file = File(genfile=True, position=-2, argstr="%s", desc="image to write", hash_files=False) _dtypes = ["float", "char", "int", "short", "double", "input"] internal_datatype = traits.Enum(*_dtypes, position=1, argstr="-dt %s", desc="datatype to use for calculations (default is float)") output_datatype = traits.Enum(*_dtypes, position=-1, argstr="-odt %s", desc="datatype to use for output (default uses input type)") nan2zeros = traits.Bool(position=3, argstr='-nan', desc='change NaNs to zeros before doing anything') class MathsOutput(TraitedSpec): out_file = File(exists=True, desc="image written after calculations") class MathsCommand(FSLCommand): _cmd = "fslmaths" input_spec = MathsInput output_spec = MathsOutput _suffix = "_maths" def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = self.inputs.out_file if not isdefined(self.inputs.out_file): outputs["out_file"] = self._gen_fname(self.inputs.in_file, suffix=self._suffix) outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()["out_file"] return None class ChangeDataTypeInput(MathsInput): _dtypes = ["float", "char", "int", "short", "double", "input"] output_datatype = traits.Enum(*_dtypes, position=-1, argstr="-odt %s", mandatory=True, desc="output data type") class ChangeDataType(MathsCommand): """Use fslmaths to change the datatype of an image. """ input_spec = ChangeDataTypeInput _suffix = "_chdt" class ThresholdInputSpec(MathsInput): thresh = traits.Float(mandatory=True, position=4, argstr="%s", desc="threshold value") direction = traits.Enum("below", "above", usedefault=True, desc="zero-out either below or above thresh value") use_robust_range = traits.Bool(desc="inteperet thresh as percentage (0-100) of robust range") use_nonzero_voxels = traits.Bool(desc="use nonzero voxels to caluclate robust range", requires=["use_robust_range"]) class Threshold(MathsCommand): """Use fslmaths to apply a threshold to an image in a variety of ways. """ input_spec = ThresholdInputSpec _suffix = "_thresh" def _format_arg(self, name, spec, value): if name == "thresh": arg = "-" _si = self.inputs if self.inputs.direction == "above": arg += "u" arg += "thr" if isdefined(_si.use_robust_range) and _si.use_robust_range: if isdefined(_si.use_nonzero_voxels) and _si.use_nonzero_voxels: arg += "P" else: arg += "p" arg += " %.10f" % value return arg return super(Threshold, self)._format_arg(name, spec, value) class MeanImageInput(MathsInput): dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True, argstr="-%smean", position=4, desc="dimension to mean across") class MeanImage(MathsCommand): """Use fslmaths to generate a mean image across a given dimension. """ input_spec = MeanImageInput _suffix = "_mean" class MaxImageInput(MathsInput): dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True, argstr="-%smax", position=4, desc="dimension to max across") class MaxImage(MathsCommand): """Use fslmaths to generate a max image across a given dimension. Examples -------- from nipype.interfaces.fsl.maths import MaxImage maxer = MaxImage() maxer.inputs.in_file = "functional.nii" maxer.dimension = "T" maths.cmdline fslmaths functional.nii -Tmax functional_max.nii """ input_spec = MaxImageInput _suffix = "_max" class IsotropicSmoothInput(MathsInput): fwhm = traits.Float(mandatory=True, xor=["sigma"], position=4, argstr="-s %.5f", desc="fwhm of smoothing kernel [mm]") sigma = traits.Float(mandatory=True, xor=["fwhm"], position=4, argstr="-s %.5f", desc="sigma of smoothing kernel [mm]") class IsotropicSmooth(MathsCommand): """Use fslmaths to spatially smooth an image with a gaussian kernel. """ input_spec = IsotropicSmoothInput _suffix = "_smooth" def _format_arg(self, name, spec, value): if name == "fwhm": sigma = float(value) / np.sqrt(8 * np.log(2)) return spec.argstr % sigma return super(IsotropicSmooth, self)._format_arg(name, spec, value) class ApplyMaskInput(MathsInput): mask_file = File(exists=True, mandatory=True, argstr="-mas %s", position=4, desc="binary image defining mask space") class ApplyMask(MathsCommand): """Use fslmaths to apply a binary mask to another image. """ input_spec = ApplyMaskInput _suffix = "_masked" class KernelInput(MathsInput): kernel_shape = traits.Enum("3D", "2D", "box", "boxv", "gauss", "sphere", "file", argstr="-kernel %s", position=4, desc="kernel shape to use") kernel_size = traits.Float(argstr="%.4f", position=5, xor=["kernel_file"], desc="kernel size - voxels for box/boxv, mm for sphere, mm sigma for gauss") kernel_file = File(exists=True, argstr="%s", position=5, xor=["kernel_size"], desc="use external file for kernel") class DilateInput(KernelInput): operation = traits.Enum("mean", "modal", "max", argstr="-dil%s", position=6, mandatory=True, desc="filtering operation to perfoem in dilation") class DilateImage(MathsCommand): """Use fslmaths to perform a spatial dilation of an image. """ input_spec = DilateInput _suffix = "_dil" def _format_arg(self, name, spec, value): if name == "operation": return spec.argstr % dict(mean="M", modal="D", max="F")[value] return super(DilateImage, self)._format_arg(name, spec, value) class ErodeInput(KernelInput): minimum_filter = traits.Bool(argstr="%s", position=6, usedefault=True, default_value=False, desc="if true, minimum filter rather than erosion by zeroing-out") class ErodeImage(MathsCommand): """Use fslmaths to perform a spatial erosion of an image. """ input_spec = ErodeInput _suffix = "_ero" def _format_arg(self, name, spec, value): if name == "minimum_filter": if value: return "-eroF" return "-ero" return super(ErodeImage, self)._format_arg(name, spec, value) class SpatialFilterInput(KernelInput): operation = traits.Enum("mean", "median", "meanu", argstr="-f%s", position=6, mandatory=True, desc="operation to filter with") class SpatialFilter(MathsCommand): """Use fslmaths to spatially filter an image. """ input_spec = SpatialFilterInput _suffix = "_filt" class UnaryMathsInput(MathsInput): operation = traits.Enum("exp", "log", "sin", "cos", "sqr", "sqrt", "recip", "abs", "bin", "index", argstr="-%s", position=4, mandatory=True, desc="operation to perform") class UnaryMaths(MathsCommand): """Use fslmaths to perorm a variety of mathematical operations on an image. """ input_spec = UnaryMathsInput def _list_outputs(self): self._suffix = "_" + self.inputs.operation return super(UnaryMaths, self)._list_outputs() class BinaryMathsInput(MathsInput): operation = traits.Enum("add", "sub", "mul", "div", "rem", "max", "min", mandatory=True, argstr="-%s", position=4, desc="operation to perform") operand_file = File(exists=True, argstr="%s", mandatory=True, position=5, xor=["operand_value"], desc="second image to perform operation with") operand_value = traits.Float(argstr="%.8f", mandatory=True, position=5, xor=["operand_file"], desc="value to perform operation with") class BinaryMaths(MathsCommand): """Use fslmaths to perform mathematical operations using a second image or a numeric value. """ input_spec = BinaryMathsInput class MultiImageMathsInput(MathsInput): op_string = traits.String(position=4, argstr="%s", mandatory=True, desc="python formatted string of operations to perform") operand_files = InputMultiPath(File(exists=True), mandatory=True, desc="list of file names to plug into op string") class MultiImageMaths(MathsCommand): """Use fslmaths to perform a sequence of mathematical operations. Examples -------- from nipype.interfaces.fsl import MultiImageMaths maths = MultiImageMaths() maths.inputs.in_file = "functional.nii" maths.inputs.op_string = "-add %s -mul -1 -div %s" maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] maths.inputs.out_file = functional4.nii maths.cmdline fslmaths functional1.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii """ input_spec = MultiImageMathsInput def _format_arg(self, name, spec, value): if name == "op_string": return value % tuple(self.inputs.operand_files) return super(MultiImageMaths, self)._format_arg(name, spec, value) class TemporalFilterInput(MathsInput): lowpass_sigma = traits.Float(-1, argstr="%.6f", position=5, usedefault=True, desc="lowpass filter sigma (in volumes)") highpass_sigma = traits.Float(-1, argstr="-bptf %.6f", position=4, usedefault=True, desc="highpass filter sigma (in volumes)") class TemporalFilter(MathsCommand): """Use fslmaths to apply a low, high, or bandpass temporal filter to a timeseries. """ input_spec = TemporalFilterInput _suffix = "_filt" nipype-0.9.2/nipype/interfaces/fsl/model.py000066400000000000000000002417751227300005300207230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This was written to work with FSL version 4.1.4. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from glob import glob import warnings from shutil import rmtree import numpy as np from nibabel import load from ... import LooseVersion from .base import (FSLCommand, FSLCommandInputSpec, Info) from ..base import (load_template, File, traits, isdefined, TraitedSpec, BaseInterface, Directory, InputMultiPath, OutputMultiPath, BaseInterfaceInputSpec) from ...utils.filemanip import (list_to_filename, filename_to_list) from ...utils.misc import human_order_sorted warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class Level1DesignInputSpec(BaseInterfaceInputSpec): interscan_interval = traits.Float(mandatory=True, desc='Interscan interval (in secs)') session_info = traits.Any(mandatory=True, desc='Session specific information generated by ``modelgen.SpecifyModel``') bases = traits.Either( traits.Dict(traits.Enum( 'dgamma'), traits.Dict(traits.Enum('derivs'), traits.Bool)), traits.Dict(traits.Enum('gamma'), traits.Dict( traits.Enum('derivs'), traits.Bool)), traits.Dict(traits.Enum('none'), traits.Enum(None)), mandatory=True, desc="name of basis function and options e.g., {'dgamma': {'derivs': True}}") model_serial_correlations = traits.Bool( desc="Option to model serial correlations using an \ autoregressive estimator (order 1). Setting this option is only \ useful in the context of the fsf file. If you set this to False, you need to repeat \ this option for FILMGLS by setting autocorr_noestimate to True", mandatory=True) contrasts = traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('F'), traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum( 'T'), traits.List( traits.Str), traits.List( traits.Float)), traits.Tuple( traits.Str, traits.Enum( 'T'), traits.List( traits.Str), traits.List( traits.Float), traits.List( traits.Float)))))), desc="List of contrasts with each contrast being a list of the form - \ [('name', 'stat', [condition list], [weight list], [session list])]. if \ session list is None or not provided, all sessions are used. For F \ contrasts, the condition list should contain previously defined \ T-contrasts.") class Level1DesignOutputSpec(TraitedSpec): fsf_files = OutputMultiPath(File(exists=True), desc='FSL feat specification files') ev_files = OutputMultiPath(traits.List(File(exists=True)), desc='condition information files') class Level1Design(BaseInterface): """Generate FEAT specific files Examples -------- >>> level1design = Level1Design() >>> level1design.inputs.interscan_interval = 2.5 >>> level1design.inputs.bases = {'dgamma':{'derivs': False}} >>> level1design.inputs.session_info = 'session_info.npz' >>> level1design.run() # doctest: +SKIP """ input_spec = Level1DesignInputSpec output_spec = Level1DesignOutputSpec def _create_ev_file(self, evfname, evinfo): f = open(evfname, 'wt') for i in evinfo: if len(i) == 3: f.write('%f %f %f\n' % (i[0], i[1], i[2])) else: f.write('%f\n' % i[0]) f.close() def _create_ev_files( self, cwd, runinfo, runidx, usetd, contrasts, no_bases, do_tempfilter): """Creates EV files from condition and regressor information. Parameters: ----------- runinfo : dict Generated by `SpecifyModel` and contains information about events and other regressors. runidx : int Index to run number usetd : int Whether or not to use temporal derivatives for conditions contrasts : list of lists Information on contrasts to be evaluated """ conds = {} evname = [] ev_hrf = load_template('feat_ev_hrf.tcl') ev_none = load_template('feat_ev_none.tcl') ev_ortho = load_template('feat_ev_ortho.tcl') ev_txt = '' # generate sections for conditions and other nuisance # regressors num_evs = [0, 0] for field in ['cond', 'regress']: for i, cond in enumerate(runinfo[field]): name = cond['name'] evname.append(name) evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runidx, len(evname))) evinfo = [] num_evs[0] += 1 num_evs[1] += 1 if field == 'cond': for j, onset in enumerate(cond['onset']): try: amplitudes = cond['amplitudes'] if len(amplitudes) > 1: amp = amplitudes[j] else: amp = amplitudes[0] except KeyError: amp = 1 if len(cond['duration']) > 1: evinfo.insert(j, [onset, cond['duration'][j], amp]) else: evinfo.insert(j, [onset, cond['duration'][0], amp]) if no_bases: ev_txt += ev_none.substitute(ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, cond_file=evfname) else: ev_txt += ev_hrf.substitute(ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, temporalderiv=usetd, cond_file=evfname) if usetd: evname.append(name + 'TD') num_evs[1] += 1 elif field == 'regress': evinfo = [[j] for j in cond['val']] ev_txt += ev_none.substitute(ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, cond_file=evfname) ev_txt += "\n" conds[name] = evfname self._create_ev_file(evfname, evinfo) # add ev orthogonalization for i in range(1, num_evs[0] + 1): for j in range(0, num_evs[0] + 1): ev_txt += ev_ortho.substitute(c0=i, c1=j) ev_txt += "\n" # add contrast info to fsf file if isdefined(contrasts): contrast_header = load_template('feat_contrast_header.tcl') contrast_prolog = load_template('feat_contrast_prolog.tcl') contrast_element = load_template('feat_contrast_element.tcl') contrast_ftest_element = load_template( 'feat_contrast_ftest_element.tcl') contrastmask_header = load_template('feat_contrastmask_header.tcl') contrastmask_footer = load_template('feat_contrastmask_footer.tcl') contrastmask_element = load_template( 'feat_contrastmask_element.tcl') # add t/f contrast info ev_txt += contrast_header.substitute() con_names = [] for j, con in enumerate(contrasts): con_names.append(con[0]) con_map = {} ftest_idx = [] ttest_idx = [] for j, con in enumerate(contrasts): if con[1] == 'F': ftest_idx.append(j) for c in con[2]: if c[0] not in con_map.keys(): con_map[c[0]] = [] con_map[c[0]].append(j) else: ttest_idx.append(j) for ctype in ['real', 'orig']: for j, con in enumerate(contrasts): if con[1] == 'F': continue tidx = ttest_idx.index(j) + 1 ev_txt += contrast_prolog.substitute(cnum=tidx, ctype=ctype, cname=con[0]) count = 0 for c in range(1, len(evname) + 1): if evname[c - 1].endswith('TD') and ctype == 'orig': continue count = count + 1 if evname[c - 1] in con[2]: val = con[3][con[2].index(evname[c - 1])] else: val = 0.0 ev_txt += contrast_element.substitute(cnum=tidx, element=count, ctype=ctype, val=val) ev_txt += "\n" if con[0] in con_map.keys(): for fconidx in con_map[con[0]]: ev_txt += contrast_ftest_element.substitute( cnum=ftest_idx.index(fconidx) + 1, element=tidx, ctype=ctype, val=1) ev_txt += "\n" # add contrast mask info ev_txt += contrastmask_header.substitute() for j, _ in enumerate(contrasts): for k, _ in enumerate(contrasts): if j != k: ev_txt += contrastmask_element.substitute(c1=j + 1, c2=k + 1) ev_txt += contrastmask_footer.substitute() return num_evs, ev_txt def _format_session_info(self, session_info): if isinstance(session_info, dict): session_info = [session_info] return session_info def _get_func_files(self, session_info): """Returns functional files in the order of runs """ func_files = [] for i, info in enumerate(session_info): func_files.insert(i, info['scans']) return func_files def _run_interface(self, runtime): cwd = os.getcwd() fsf_header = load_template('feat_header_l1.tcl') fsf_postscript = load_template('feat_nongui.tcl') prewhiten = 0 if isdefined(self.inputs.model_serial_correlations): prewhiten = int(self.inputs.model_serial_correlations) usetd = 0 no_bases = False basis_key = self.inputs.bases.keys()[0] if basis_key in ['dgamma', 'gamma']: usetd = int(self.inputs.bases[basis_key]['derivs']) if basis_key == 'none': no_bases = True session_info = self._format_session_info(self.inputs.session_info) func_files = self._get_func_files(session_info) n_tcon = 0 n_fcon = 0 if isdefined(self.inputs.contrasts): for i, c in enumerate(self.inputs.contrasts): if c[1] == 'T': n_tcon += 1 elif c[1] == 'F': n_fcon += 1 for i, info in enumerate(session_info): do_tempfilter = 1 if info['hpf'] == np.inf: do_tempfilter = 0 num_evs, cond_txt = self._create_ev_files(cwd, info, i, usetd, self.inputs.contrasts, no_bases, do_tempfilter) nim = load(func_files[i]) (_, _, _, timepoints) = nim.get_shape() fsf_txt = fsf_header.substitute(run_num=i, interscan_interval=self.inputs.interscan_interval, num_vols=timepoints, prewhiten=prewhiten, num_evs=num_evs[0], num_evs_real=num_evs[1], num_tcon=n_tcon, num_fcon=n_fcon, high_pass_filter_cutoff=info[ 'hpf'], temphp_yn=do_tempfilter, func_file=func_files[i]) fsf_txt += cond_txt fsf_txt += fsf_postscript.substitute(overwrite=1) f = open(os.path.join(cwd, 'run%d.fsf' % i), 'w') f.write(fsf_txt) f.close() return runtime def _list_outputs(self): outputs = self.output_spec().get() cwd = os.getcwd() outputs['fsf_files'] = [] outputs['ev_files'] = [] usetd = 0 basis_key = self.inputs.bases.keys()[0] if basis_key in ['dgamma', 'gamma']: usetd = int(self.inputs.bases[basis_key]['derivs']) for runno, runinfo in enumerate(self._format_session_info(self.inputs.session_info)): outputs['fsf_files'].append(os.path.join(cwd, 'run%d.fsf' % runno)) outputs['ev_files'].insert(runno, []) evname = [] for field in ['cond', 'regress']: for i, cond in enumerate(runinfo[field]): name = cond['name'] evname.append(name) evfname = os.path.join( cwd, 'ev_%s_%d_%d.txt' % (name, runno, len(evname))) if field == 'cond': if usetd: evname.append(name + 'TD') outputs['ev_files'][runno].append( os.path.join(cwd, evfname)) return outputs class FEATInputSpec(FSLCommandInputSpec): fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0, desc="File specifying the feat design spec file") class FEATOutputSpec(TraitedSpec): feat_dir = Directory(exists=True) class FEAT(FSLCommand): """Uses FSL feat to calculate first level stats """ _cmd = 'feat' input_spec = FEATInputSpec output_spec = FEATOutputSpec def _list_outputs(self): outputs = self._outputs().get() is_ica = False outputs['feat_dir']=None with open(self.inputs.fsf_file, 'rt') as fp: text = fp.read() if "set fmri(inmelodic) 1" in text: is_ica = True for line in text.split('\n'): if line.find("set fmri(outputdir)")>-1: try: outputdir_spec=line.split('"')[-2] if os.path.exists(outputdir_spec): outputs['feat_dir']=outputdir_spec except: pass if not outputs['feat_dir']: if is_ica: outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*ica'))[0] else: outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*feat'))[0] print 'Outputs from FEATmodel:',outputs return outputs class FEATModelInputSpec(FSLCommandInputSpec): fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0, desc="File specifying the feat design spec file", copyfile=False) ev_files = traits.List(File(exists=True), mandatory=True, argstr="%s", desc="Event spec files generated by level1design", position=1, copyfile=False) class FEATModelOutpuSpec(TraitedSpec): design_file = File( exists=True, desc='Mat file containing ascii matrix for design') design_image = File( exists=True, desc='Graphical representation of design matrix') design_cov = File( exists=True, desc='Graphical representation of design covariance') con_file = File( exists=True, desc='Contrast file containing contrast vectors') fcon_file = File(desc='Contrast file containing contrast vectors') class FEATModel(FSLCommand): """Uses FSL feat_model to generate design.mat files """ _cmd = 'feat_model' input_spec = FEATModelInputSpec output_spec = FEATModelOutpuSpec def _format_arg(self, name, trait_spec, value): if name == 'fsf_file': return super(FEATModel, self)._format_arg(name, trait_spec, self._get_design_root(value)) elif name == 'ev_files': return '' else: return super(FEATModel, self)._format_arg(name, trait_spec, value) def _get_design_root(self, infile): _, fname = os.path.split(infile) return fname.split('.')[0] def _list_outputs(self): # TODO: figure out file names and get rid off the globs outputs = self._outputs().get() root = self._get_design_root(list_to_filename(self.inputs.fsf_file)) design_file = glob(os.path.join(os.getcwd(), '%s*.mat' % root)) assert len(design_file) == 1, 'No mat file generated by FEAT Model' outputs['design_file'] = design_file[0] design_image = glob(os.path.join(os.getcwd(), '%s.png' % root)) assert len( design_image) == 1, 'No design image generated by FEAT Model' outputs['design_image'] = design_image[0] design_cov = glob(os.path.join(os.getcwd(), '%s_cov.png' % root)) assert len( design_cov) == 1, 'No covariance image generated by FEAT Model' outputs['design_cov'] = design_cov[0] con_file = glob(os.path.join(os.getcwd(), '%s*.con' % root)) assert len(con_file) == 1, 'No con file generated by FEAT Model' outputs['con_file'] = con_file[0] fcon_file = glob(os.path.join(os.getcwd(), '%s*.fts' % root)) if fcon_file: assert len(fcon_file) == 1, 'No fts file generated by FEAT Model' outputs['fcon_file'] = fcon_file[0] return outputs class FILMGLSInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, position=-3, argstr='%s', desc='input data file') design_file = File(exists=True, position=-2, argstr='%s', desc='design matrix file') threshold = traits.Range(default=1000., low=0.0, argstr='%f', position=-1, usedefault=True, desc='threshold') smooth_autocorr = traits.Bool(argstr='-sa', desc='Smooth auto corr estimates') mask_size = traits.Int(argstr='-ms %d', desc="susan mask size") brightness_threshold = traits.Range(low=0, argstr='-epith %d', desc='susan brightness threshold, otherwise it is estimated') full_data = traits.Bool(argstr='-v', desc='output full data') _estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'] autocorr_estimate_only = traits.Bool(argstr='-ac', xor=_estimate_xor, desc='perform autocorrelation estimatation only') fit_armodel = traits.Bool(argstr='-ar', xor=_estimate_xor, desc='fits autoregressive model - default is to use tukey with M=sqrt(numvols)') tukey_window = traits.Int(argstr='-tukey %d', xor=_estimate_xor, desc='tukey window size to estimate autocorr') multitaper_product = traits.Int(argstr='-mt %d', xor=_estimate_xor, desc='multitapering with slepian tapers and num is the time-bandwidth product') use_pava = traits.Bool( argstr='-pava', desc='estimates autocorr using PAVA') autocorr_noestimate = traits.Bool(argstr='-noest', xor=_estimate_xor, desc='do not estimate autocorrs') output_pwdata = traits.Bool(argstr='-output_pwdata', desc='output prewhitened data and average design matrix') results_dir = Directory('results', argstr='-rn %s', usedefault=True, desc='directory to store results in') class FILMGLSInputSpec505(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, position=-3, argstr='--in=%s', desc='input data file') design_file = File(exists=True, position=-2, argstr='--pd=%s', desc='design matrix file') threshold = traits.Range(default=1000., low=0.0, argstr='--thr=%f', position=-1, usedefault=True, desc='threshold') smooth_autocorr = traits.Bool(argstr='--sa', desc='Smooth auto corr estimates') mask_size = traits.Int(argstr='--ms=%d', desc="susan mask size") brightness_threshold = traits.Range(low=0, argstr='--epith=%d', desc=('susan brightness threshold, ' 'otherwise it is estimated')) full_data = traits.Bool(argstr='-v', desc='output full data') _estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'] autocorr_estimate_only = traits.Bool(argstr='--ac', xor=_estimate_xor, desc=('perform autocorrelation ' 'estimation only')) fit_armodel = traits.Bool(argstr='--ar', xor=_estimate_xor, desc=('fits autoregressive model - default is to ' 'use tukey with M=sqrt(numvols)')) tukey_window = traits.Int(argstr='--tukey=%d', xor=_estimate_xor, desc='tukey window size to estimate autocorr') multitaper_product = traits.Int(argstr='--mt=%d', xor=_estimate_xor, desc=('multitapering with slepian tapers ' 'and num is the time-bandwidth ' 'product')) use_pava = traits.Bool(argstr='--pava', desc='estimates autocorr using PAVA') autocorr_noestimate = traits.Bool(argstr='--noest', xor=_estimate_xor, desc='do not estimate autocorrs') output_pwdata = traits.Bool(argstr='--outputPWdata', desc=('output prewhitened data and average ' 'design matrix')) results_dir = Directory('results', argstr='--rn=%s', usedefault=True, desc='directory to store results in') class FILMGLSOutputSpec(TraitedSpec): param_estimates = OutputMultiPath(File(exists=True), desc='Parameter estimates for each column of the design matrix') residual4d = File(exists=True, desc='Model fit residual mean-squared error for each time point') dof_file = File(exists=True, desc='degrees of freedom') sigmasquareds = File( exists=True, desc='summary of residuals, See Woolrich, et. al., 2001') results_dir = Directory(exists=True, desc='directory storing model estimation output') corrections = File(exists=True, desc='statistical corrections used within FILM modelling') logfile = File(exists=True, desc='FILM run logfile') class FILMGLS(FSLCommand): """Use FSL film_gls command to fit a design matrix to voxel timeseries Examples -------- Initialize with no options, assigning them when calling run: >>> from nipype.interfaces import fsl >>> fgls = fsl.FILMGLS() >>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP Assign options through the ``inputs`` attribute: >>> fgls = fsl.FILMGLS() >>> fgls.inputs.in_file = 'functional.nii' >>> fgls.inputs.design_file = 'design.mat' >>> fgls.inputs.threshold = 10 >>> fgls.inputs.results_dir = 'stats' >>> res = fgls.run() #doctest: +SKIP Specify options when creating an instance: >>> fgls = fsl.FILMGLS(in_file='functional.nii', \ design_file='design.mat', \ threshold=10, results_dir='stats') >>> res = fgls.run() #doctest: +SKIP """ _cmd = 'film_gls' if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.4'): input_spec = FILMGLSInputSpec505 else: input_spec = FILMGLSInputSpec output_spec = FILMGLSOutputSpec def _get_pe_files(self, cwd): files = None if isdefined(self.inputs.design_file): fp = open(self.inputs.design_file, 'rt') for line in fp.readlines(): if line.startswith('/NumWaves'): numpes = int(line.split()[-1]) files = [] for i in range(numpes): files.append(self._gen_fname('pe%d.nii' % (i + 1), cwd=cwd)) break fp.close() return files def _list_outputs(self): outputs = self._outputs().get() cwd = os.getcwd() results_dir = os.path.join(cwd, self.inputs.results_dir) outputs['results_dir'] = results_dir pe_files = self._get_pe_files(results_dir) if pe_files: outputs['param_estimates'] = pe_files outputs['residual4d'] = self._gen_fname('res4d.nii', cwd=results_dir) outputs['dof_file'] = os.path.join(results_dir, 'dof') outputs['sigmasquareds'] = self._gen_fname('sigmasquareds.nii', cwd=results_dir) outputs['corrections'] = self._gen_fname('corrections.nii', cwd=results_dir) outputs['logfile'] = self._gen_fname('logfile', change_ext=False, cwd=results_dir) return outputs class FEATRegisterInputSpec(BaseInterfaceInputSpec): feat_dirs = InputMultiPath( Directory(exists=True), desc="Lower level feat dirs", mandatory=True) reg_image = File( exists=True, desc="image to register to (will be treated as standard)", mandatory=True) reg_dof = traits.Int( 12, desc="registration degrees of freedom", usedefault=True) class FEATRegisterOutputSpec(TraitedSpec): fsf_file = File(exists=True, desc="FSL feat specification file") class FEATRegister(BaseInterface): """Register feat directories to a specific standard """ input_spec = FEATRegisterInputSpec output_spec = FEATRegisterOutputSpec def _run_interface(self, runtime): fsf_header = load_template('featreg_header.tcl') fsf_footer = load_template('feat_nongui.tcl') fsf_dirs = load_template('feat_fe_featdirs.tcl') num_runs = len(self.inputs.feat_dirs) fsf_txt = fsf_header.substitute(num_runs=num_runs, regimage=self.inputs.reg_image, regdof=self.inputs.reg_dof) for i, rundir in enumerate(filename_to_list(self.inputs.feat_dirs)): fsf_txt += fsf_dirs.substitute(runno=i + 1, rundir=os.path.abspath(rundir)) fsf_txt += fsf_footer.substitute() f = open(os.path.join(os.getcwd(), 'register.fsf'), 'wt') f.write(fsf_txt) f.close() return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['fsf_file'] = os.path.abspath( os.path.join(os.getcwd(), 'register.fsf')) return outputs class FLAMEOInputSpec(FSLCommandInputSpec): cope_file = File(exists=True, argstr='--copefile=%s', mandatory=True, desc='cope regressor data file') var_cope_file = File(exists=True, argstr='--varcopefile=%s', desc='varcope weightings data file') dof_var_cope_file = File(exists=True, argstr='--dofvarcopefile=%s', desc='dof data file for varcope data') mask_file = File(exists=True, argstr='--maskfile=%s', mandatory=True, desc='mask file') design_file = File(exists=True, argstr='--designfile=%s', mandatory=True, desc='design matrix file') t_con_file = File( exists=True, argstr='--tcontrastsfile=%s', mandatory=True, desc='ascii matrix specifying t-contrasts') f_con_file = File(exists=True, argstr='--fcontrastsfile=%s', desc='ascii matrix specifying f-contrasts') cov_split_file = File( exists=True, argstr='--covsplitfile=%s', mandatory=True, desc='ascii matrix specifying the groups the covariance is split into') run_mode = traits.Enum( 'fe', 'ols', 'flame1', 'flame12', argstr='--runmode=%s', mandatory=True, desc='inference to perform') n_jumps = traits.Int( argstr='--njumps=%d', desc='number of jumps made by mcmc') burnin = traits.Int(argstr='--burnin=%d', desc='number of jumps at start of mcmc to be discarded') sample_every = traits.Int(argstr='--sampleevery=%d', desc='number of jumps for each sample') fix_mean = traits.Bool(argstr='--fixmean', desc='fix mean for tfit') infer_outliers = traits.Bool(argstr='--inferoutliers', desc='infer outliers - not for fe') no_pe_outputs = traits.Bool(argstr='--nopeoutput', desc='do not output pe files') sigma_dofs = traits.Int(argstr='--sigma_dofs=%d', desc='sigma (in mm) to use for Gaussian smoothing the DOFs in FLAME 2. Default is 1mm, -1 indicates no smoothing') outlier_iter = traits.Int(argstr='--ioni=%d', desc='Number of max iterations to use when inferring outliers. Default is 12.') log_dir = Directory("stats", argstr='--ld=%s', usedefault=True) # ohinds # no support for ven, vef class FLAMEOOutputSpec(TraitedSpec): pes = OutputMultiPath(File(exists=True), desc=("Parameter estimates for each column of the " "design matrix for each voxel")) res4d = OutputMultiPath(File(exists=True), desc=("Model fit residual mean-squared error for " "each time point")) copes = OutputMultiPath(File(exists=True), desc="Contrast estimates for each contrast") var_copes = OutputMultiPath(File(exists=True), desc="Variance estimates for each contrast") zstats = OutputMultiPath(File(exists=True), desc="z-stat file for each contrast") tstats = OutputMultiPath(File(exists=True), desc="t-stat file for each contrast") zfstats = OutputMultiPath(File(exists=True), desc="z stat file for each f contrast") fstats = OutputMultiPath(File(exists=True), desc="f-stat file for each contrast") mrefvars = OutputMultiPath(File(exists=True), desc=("mean random effect variances for each " "contrast")) tdof = OutputMultiPath(File(exists=True), desc="temporal dof file for each contrast") weights = OutputMultiPath(File(exists=True), desc="weights file for each contrast") stats_dir = Directory(File(exists=True), desc="directory storing model estimation output") class FLAMEO(FSLCommand): """Use FSL flameo command to perform higher level model fits Examples -------- Initialize FLAMEO with no options, assigning them when calling run: >>> from nipype.interfaces import fsl >>> import os >>> flameo = fsl.FLAMEO(cope_file='cope.nii.gz', \ var_cope_file='varcope.nii.gz', \ cov_split_file='cov_split.mat', \ design_file='design.mat', \ t_con_file='design.con', \ mask_file='mask.nii', \ run_mode='fe') >>> flameo.cmdline 'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz' """ _cmd = 'flameo' input_spec = FLAMEOInputSpec output_spec = FLAMEOOutputSpec # ohinds: 2010-04-06 def _run_interface(self, runtime): log_dir = self.inputs.log_dir cwd = os.getcwd() if os.access(os.path.join(cwd, log_dir), os.F_OK): rmtree(os.path.join(cwd, log_dir)) return super(FLAMEO, self)._run_interface(runtime) # ohinds: 2010-04-06 # made these compatible with flameo def _list_outputs(self): outputs = self._outputs().get() pth = os.path.join(os.getcwd(), self.inputs.log_dir) pes = human_order_sorted(glob(os.path.join(pth, 'pe[0-9]*.*'))) assert len(pes) >= 1, 'No pe volumes generated by FSL Estimate' outputs['pes'] = pes res4d = human_order_sorted(glob(os.path.join(pth, 'res4d.*'))) assert len(res4d) == 1, 'No residual volume generated by FSL Estimate' outputs['res4d'] = res4d[0] copes = human_order_sorted(glob(os.path.join(pth, 'cope[0-9]*.*'))) assert len(copes) >= 1, 'No cope volumes generated by FSL CEstimate' outputs['copes'] = copes var_copes = human_order_sorted( glob(os.path.join(pth, 'varcope[0-9]*.*'))) assert len( var_copes) >= 1, 'No varcope volumes generated by FSL CEstimate' outputs['var_copes'] = var_copes zstats = human_order_sorted(glob(os.path.join(pth, 'zstat[0-9]*.*'))) assert len(zstats) >= 1, 'No zstat volumes generated by FSL CEstimate' outputs['zstats'] = zstats if isdefined(self.inputs.f_con_file): zfstats = human_order_sorted( glob(os.path.join(pth, 'zfstat[0-9]*.*'))) assert len( zfstats) >= 1, 'No zfstat volumes generated by FSL CEstimate' outputs['zfstats'] = zfstats fstats = human_order_sorted( glob(os.path.join(pth, 'fstat[0-9]*.*'))) assert len( fstats) >= 1, 'No fstat volumes generated by FSL CEstimate' outputs['fstats'] = fstats tstats = human_order_sorted(glob(os.path.join(pth, 'tstat[0-9]*.*'))) assert len(tstats) >= 1, 'No tstat volumes generated by FSL CEstimate' outputs['tstats'] = tstats mrefs = human_order_sorted( glob(os.path.join(pth, 'mean_random_effects_var[0-9]*.*'))) assert len( mrefs) >= 1, 'No mean random effects volumes generated by FLAMEO' outputs['mrefvars'] = mrefs tdof = human_order_sorted(glob(os.path.join(pth, 'tdof_t[0-9]*.*'))) assert len(tdof) >= 1, 'No T dof volumes generated by FLAMEO' outputs['tdof'] = tdof weights = human_order_sorted( glob(os.path.join(pth, 'weights[0-9]*.*'))) assert len(weights) >= 1, 'No weight volumes generated by FLAMEO' outputs['weights'] = weights outputs['stats_dir'] = pth return outputs class ContrastMgrInputSpec(FSLCommandInputSpec): tcon_file = File(exists=True, mandatory=True, argstr='%s', position=-1, desc='contrast file containing T-contrasts') fcon_file = File(exists=True, argstr='-f %s', desc='contrast file containing F-contrasts') param_estimates = InputMultiPath(File(exists=True), argstr='', copyfile=False, mandatory=True, desc='Parameter estimates for each column of the design matrix') corrections = File(exists=True, copyfile=False, mandatory=True, desc='statistical corrections used within FILM modelling') dof_file = File(exists=True, argstr='', copyfile=False, mandatory=True, desc='degrees of freedom') sigmasquareds = File(exists=True, argstr='', position=-2, copyfile=False, mandatory=True, desc='summary of residuals, See Woolrich, et. al., 2001') contrast_num = traits.Range(low=1, argstr='-cope', desc='contrast number to start labeling copes from') suffix = traits.Str(argstr='-suffix %s', desc='suffix to put on the end of the cope filename before the contrast number, default is nothing') class ContrastMgrOutputSpec(TraitedSpec): copes = OutputMultiPath(File(exists=True), desc='Contrast estimates for each contrast') varcopes = OutputMultiPath(File(exists=True), desc='Variance estimates for each contrast') zstats = OutputMultiPath(File(exists=True), desc='z-stat file for each contrast') tstats = OutputMultiPath(File(exists=True), desc='t-stat file for each contrast') fstats = OutputMultiPath(File(exists=True), desc='f-stat file for each contrast') zfstats = OutputMultiPath(File(exists=True), desc='z-stat file for each F contrast') neffs = OutputMultiPath(File(exists=True), desc='neff file ?? for each contrast') class ContrastMgr(FSLCommand): """Use FSL contrast_mgr command to evaluate contrasts In interface mode this file assumes that all the required inputs are in the same location. """ _cmd = 'contrast_mgr' input_spec = ContrastMgrInputSpec output_spec = ContrastMgrOutputSpec def _run_interface(self, runtime): # The returncode is meaningless in ContrastMgr. So check the output # in stderr and if it's set, then update the returncode # accordingly. runtime = super(ContrastMgr, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _format_arg(self, name, trait_spec, value): if name in ['param_estimates', 'corrections', 'dof_file']: return '' elif name in ['sigmasquareds']: path, _ = os.path.split(value) return path else: return super(ContrastMgr, self)._format_arg(name, trait_spec, value) def _get_design_root(self, infile): _, fname = os.path.split(infile) return fname.split('.')[0] def _get_numcons(self): numtcons = 0 numfcons = 0 if isdefined(self.inputs.tcon_file): fp = open(self.inputs.tcon_file, 'rt') for line in fp.readlines(): if line.startswith('/NumContrasts'): numtcons = int(line.split()[-1]) break fp.close() if isdefined(self.inputs.fcon_file): fp = open(self.inputs.fcon_file, 'rt') for line in fp.readlines(): if line.startswith('/NumContrasts'): numfcons = int(line.split()[-1]) break fp.close() return numtcons, numfcons def _list_outputs(self): outputs = self._outputs().get() pth, _ = os.path.split(self.inputs.sigmasquareds) numtcons, numfcons = self._get_numcons() base_contrast = 1 if isdefined(self.inputs.contrast_num): base_contrast = self.inputs.contrast_num copes = [] varcopes = [] zstats = [] tstats = [] neffs = [] for i in range(numtcons): copes.append(self._gen_fname('cope%d.nii' % (base_contrast + i), cwd=pth)) varcopes.append( self._gen_fname('varcope%d.nii' % (base_contrast + i), cwd=pth)) zstats.append(self._gen_fname('zstat%d.nii' % (base_contrast + i), cwd=pth)) tstats.append(self._gen_fname('tstat%d.nii' % (base_contrast + i), cwd=pth)) neffs.append(self._gen_fname('neff%d.nii' % (base_contrast + i), cwd=pth)) if copes: outputs['copes'] = copes outputs['varcopes'] = varcopes outputs['zstats'] = zstats outputs['tstats'] = tstats outputs['neffs'] = neffs fstats = [] zfstats = [] for i in range(numfcons): fstats.append(self._gen_fname('fstat%d.nii' % (base_contrast + i), cwd=pth)) zfstats.append( self._gen_fname('zfstat%d.nii' % (base_contrast + i), cwd=pth)) if fstats: outputs['fstats'] = fstats outputs['zfstats'] = zfstats return outputs class L2ModelInputSpec(BaseInterfaceInputSpec): num_copes = traits.Range(low=1, mandatory=True, desc='number of copes to be combined') class L2ModelOutputSpec(TraitedSpec): design_mat = File(exists=True, desc='design matrix file') design_con = File(exists=True, desc='design contrast file') design_grp = File(exists=True, desc='design group file') class L2Model(BaseInterface): """Generate subject specific second level model Examples -------- >>> from nipype.interfaces.fsl import L2Model >>> model = L2Model(num_copes=3) # 3 sessions """ input_spec = L2ModelInputSpec output_spec = L2ModelOutputSpec def _run_interface(self, runtime): cwd = os.getcwd() mat_txt = ['/NumWaves 1', '/NumPoints %d' % self.inputs.num_copes, '/PPheights %e' % 1, '', '/Matrix'] for i in range(self.inputs.num_copes): mat_txt += ['%e' % 1] mat_txt = '\n'.join(mat_txt) con_txt = ['/ContrastName1 group mean', '/NumWaves 1', '/NumContrasts 1', '/PPheights %e' % 1, '/RequiredEffect 100.0', # XX where does this # number come from '', '/Matrix', '%e' % 1] con_txt = '\n'.join(con_txt) grp_txt = ['/NumWaves 1', '/NumPoints %d' % self.inputs.num_copes, '', '/Matrix'] for i in range(self.inputs.num_copes): grp_txt += ['1'] grp_txt = '\n'.join(grp_txt) txt = {'design.mat': mat_txt, 'design.con': con_txt, 'design.grp': grp_txt} # write design files for i, name in enumerate(['design.mat', 'design.con', 'design.grp']): f = open(os.path.join(cwd, name), 'wt') f.write(txt[name]) f.close() return runtime def _list_outputs(self): outputs = self._outputs().get() for field in outputs.keys(): outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) return outputs class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec): contrasts = traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('F'), traits.List(traits.Tuple(traits.Str, traits.Enum('T'), traits.List( traits.Str), traits.List( traits.Float)), ))), mandatory=True, desc="List of contrasts with each contrast being a list of the form - \ [('name', 'stat', [condition list], [weight list])]. if \ session list is None or not provided, all sessions are used. For F \ contrasts, the condition list should contain previously defined \ T-contrasts without any weight list.") regressors = traits.Dict(traits.Str, traits.List(traits.Float), mandatory=True, desc='dictionary containing named lists of regressors') groups = traits.List(traits.Int, desc='list of group identifiers (defaults to single group)') class MultipleRegressDesignOutputSpec(TraitedSpec): design_mat = File(exists=True, desc='design matrix file') design_con = File(exists=True, desc='design t-contrast file') design_fts = File(exists=True, desc='design f-contrast file') design_grp = File(exists=True, desc='design group file') class MultipleRegressDesign(BaseInterface): """Generate multiple regression design .. note:: FSL does not demean columns for higher level analysis. Please see `FSL documentation `_ for more details on model specification for higher level analysis. Examples -------- >>> from nipype.interfaces.fsl import MultipleRegressDesign >>> model = MultipleRegressDesign() >>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]] >>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3]) >>> model.run() # doctest: +SKIP """ input_spec = MultipleRegressDesignInputSpec output_spec = MultipleRegressDesignOutputSpec def _run_interface(self, runtime): cwd = os.getcwd() regs = sorted(self.inputs.regressors.keys()) nwaves = len(regs) npoints = len(self.inputs.regressors[regs[0]]) ntcons = sum([1 for con in self.inputs.contrasts if con[1] == 'T']) nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) # write mat file mat_txt = ['/NumWaves %d' % nwaves, '/NumPoints %d' % npoints] ppheights = [] for reg in regs: maxreg = np.max(self.inputs.regressors[reg]) minreg = np.min(self.inputs.regressors[reg]) if np.sign(maxreg) == np.sign(minreg): regheight = max([abs(minreg), abs(maxreg)]) else: regheight = abs(maxreg - minreg) ppheights.append('%e' % regheight) mat_txt += ['/PPheights ' + ' '.join(ppheights)] mat_txt += ['', '/Matrix'] for cidx in range(npoints): mat_txt.append(' '.join( ['%e' % self.inputs.regressors[key][cidx] for key in regs])) mat_txt = '\n'.join(mat_txt) + '\n' # write t-con file con_txt = [] counter = 0 tconmap = {} for conidx, con in enumerate(self.inputs.contrasts): if con[1] == 'T': tconmap[conidx] = counter counter += 1 con_txt += ['/ContrastName%d %s' % (counter, con[0])] con_txt += ['/NumWaves %d' % nwaves, '/NumContrasts %d' % ntcons, '/PPheights %s' % ' '.join( ['%e' % 1 for i in range(counter)]), '/RequiredEffect %s' % ' '.join( ['%.3f' % 100 for i in range(counter)]), '', '/Matrix'] for idx in sorted(tconmap.keys()): convals = np.zeros((nwaves, 1)) for regidx, reg in enumerate(self.inputs.contrasts[idx][2]): convals[regs.index(reg) ] = self.inputs.contrasts[idx][3][regidx] con_txt.append(' '.join(['%e' % val for val in convals])) con_txt = '\n'.join(con_txt) + '\n' # write f-con file fcon_txt = '' if nfcons: fcon_txt = ['/NumWaves %d' % ntcons, '/NumContrasts %d' % nfcons, '', '/Matrix'] for conidx, con in enumerate(self.inputs.contrasts): if con[1] == 'F': convals = np.zeros((ntcons, 1)) for tcon in con[2]: convals[tconmap[self.inputs.contrasts.index(tcon)]] = 1 fcon_txt.append(' '.join(['%d' % val for val in convals])) fcon_txt = '\n'.join(fcon_txt) fcon_txt += '\n' # write group file grp_txt = ['/NumWaves 1', '/NumPoints %d' % npoints, '', '/Matrix'] for i in range(npoints): if isdefined(self.inputs.groups): grp_txt += ['%d' % self.inputs.groups[i]] else: grp_txt += ['1'] grp_txt = '\n'.join(grp_txt) + '\n' txt = {'design.mat': mat_txt, 'design.con': con_txt, 'design.fts': fcon_txt, 'design.grp': grp_txt} # write design files for key, val in txt.items(): if ('fts' in key) and (nfcons == 0): continue filename = key.replace('_', '.') f = open(os.path.join(cwd, filename), 'wt') f.write(val) f.close() return runtime def _list_outputs(self): outputs = self._outputs().get() nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) for field in outputs.keys(): if ('fts' in field) and (nfcons == 0): continue outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) return outputs class SMMInputSpec(FSLCommandInputSpec): spatial_data_file = File( exists=True, position=0, argstr='--sdf="%s"', mandatory=True, desc="statistics spatial map", copyfile=False) mask = File(exists=True, position=1, argstr='--mask="%s"', mandatory=True, desc="mask file", copyfile=False) no_deactivation_class = traits.Bool(position=2, argstr="--zfstatmode", desc="enforces no deactivation class") class SMMOutputSpec(TraitedSpec): null_p_map = File(exists=True) activation_p_map = File(exists=True) deactivation_p_map = File(exists=True) class SMM(FSLCommand): ''' Spatial Mixture Modelling. For more detail on the spatial mixture modelling see Mixture Models with Adaptive Spatial Regularisation for Segmentation with an Application to FMRI Data; Woolrich, M., Behrens, T., Beckmann, C., and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005. ''' _cmd = 'mm --ld=logdir' input_spec = SMMInputSpec output_spec = SMMOutputSpec def _list_outputs(self): outputs = self._outputs().get() # TODO get the true logdir from the stdout outputs['null_p_map'] = self._gen_fname(basename="w1_mean", cwd="logdir") outputs['activation_p_map'] = self._gen_fname( basename="w2_mean", cwd="logdir") if not isdefined(self.inputs.no_deactivation_class) or not self.inputs.no_deactivation_class: outputs['deactivation_p_map'] = self._gen_fname( basename="w3_mean", cwd="logdir") return outputs class MELODICInputSpec(FSLCommandInputSpec): in_files = InputMultiPath( File(exists=True), argstr="-i %s", mandatory=True, position=0, desc="input file names (either single file name or a list)") out_dir = Directory( argstr="-o %s", desc="output directory name", genfile=True) mask = File(exists=True, argstr="-m %s", desc="file name of mask for thresholding") no_mask = traits.Bool(argstr="--nomask", desc="switch off masking") update_mask = traits.Bool( argstr="--update_mask", desc="switch off mask updating") no_bet = traits.Bool(argstr="--nobet", desc="switch off BET") bg_threshold = traits.Float( argstr="--bgthreshold=%f", desc="brain/non-brain threshold used to mask non-brain voxels, as a percentage (only if --nobet selected)") dim = traits.Int(argstr="-d %d", desc="dimensionality reduction into #num dimensions" "(default: automatic estimation)") dim_est = traits.Str(argstr="--dimest=%s", desc="use specific dim. estimation technique:" " lap, bic, mdl, aic, mean (default: lap)") sep_whiten = traits.Bool( argstr="--sep_whiten", desc="switch on separate whitening") sep_vn = traits.Bool( argstr="--sep_vn", desc="switch off joined variance normalization") num_ICs = traits.Int( argstr="-n %d", desc="number of IC's to extract (for deflation approach)") approach = traits.Str(argstr="-a %s", desc="approach for decomposition, 2D: defl, symm (default), " " 3D: tica (default), concat") non_linearity = traits.Str( argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4") var_norm = traits.Bool( argstr="--vn", desc="switch off variance normalization") pbsc = traits.Bool( argstr="--pbsc", desc="switch off conversion to percent BOLD signal change") cov_weight = traits.Float(argstr="--covarweight=%f", desc="voxel-wise weights for the covariance " "matrix (e.g. segmentation information)") epsilon = traits.Float(argstr="--eps=%f", desc="minimum error change") epsilonS = traits.Float( argstr="--epsS=%f", desc="minimum error change for rank-1 approximation in TICA") maxit = traits.Int(argstr="--maxit=%d", desc="maximum number of iterations before restart") max_restart = traits.Int( argstr="--maxrestart=%d", desc="maximum number of restarts") mm_thresh = traits.Float( argstr="--mmthresh=%f", desc="threshold for Mixture Model based inference") no_mm = traits.Bool( argstr="--no_mm", desc="switch off mixture modelling on IC maps") ICs = File(exists=True, argstr="--ICs=%s", desc="filename of the IC components file for mixture modelling") mix = File(exists=True, argstr="--mix=%s", desc="mixing matrix for mixture modelling / filtering") smode = File(exists=True, argstr="--smode=%s", desc="matrix of session modes for report generation") rem_cmp = traits.List( traits.Int, argstr="-f %d", desc="component numbers to remove") report = traits.Bool(argstr="--report", desc="generate Melodic web report") bg_image = File(exists=True, argstr="--bgimage=%s", desc="specify background image for report" " (default: mean image)") tr_sec = traits.Float(argstr="--tr=%f", desc="TR in seconds") log_power = traits.Bool( argstr="--logPower", desc="calculate log of power for frequency spectrum") t_des = File(exists=True, argstr="--Tdes=%s", desc="design matrix across time-domain") t_con = File(exists=True, argstr="--Tcon=%s", desc="t-contrast matrix across time-domain") s_des = File(exists=True, argstr="--Sdes=%s", desc="design matrix across subject-domain") s_con = File(exists=True, argstr="--Scon=%s", desc="t-contrast matrix across subject-domain") out_all = traits.Bool(argstr="--Oall", desc="output everything") out_unmix = traits.Bool(argstr="--Ounmix", desc="output unmixing matrix") out_stats = traits.Bool( argstr="--Ostats", desc="output thresholded maps and probability maps") out_pca = traits.Bool(argstr="--Opca", desc="output PCA results") out_white = traits.Bool( argstr="--Owhite", desc="output whitening/dewhitening matrices") out_orig = traits.Bool(argstr="--Oorig", desc="output the original ICs") out_mean = traits.Bool(argstr="--Omean", desc="output mean volume") report_maps = traits.Str(argstr="--report_maps=%s", desc="control string for spatial map images (see slicer)") remove_deriv = traits.Bool(argstr="--remove_deriv", desc="removes every second entry in paradigm" " file (EV derivatives)") class MELODICOutputSpec(TraitedSpec): out_dir = Directory(exists=True) report_dir = Directory(exists=True) class MELODIC(FSLCommand): """Multivariate Exploratory Linear Optimised Decomposition into Independent Components Examples -------- >>> melodic_setup = MELODIC() >>> melodic_setup.inputs.approach = 'tica' >>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii'] >>> melodic_setup.inputs.no_bet = True >>> melodic_setup.inputs.bg_threshold = 10 >>> melodic_setup.inputs.tr_sec = 1.5 >>> melodic_setup.inputs.mm_thresh = 0.5 >>> melodic_setup.inputs.out_stats = True >>> melodic_setup.inputs.t_des = 'timeDesign.mat' >>> melodic_setup.inputs.t_con = 'timeDesign.con' >>> melodic_setup.inputs.s_des = 'subjectDesign.mat' >>> melodic_setup.inputs.s_con = 'subjectDesign.con' >>> melodic_setup.inputs.out_dir = 'groupICA.out' >>> melodic_setup.run() # doctest: +SKIP """ input_spec = MELODICInputSpec output_spec = MELODICOutputSpec _cmd = 'melodic' def _list_outputs(self): outputs = self.output_spec().get() outputs['out_dir'] = self.inputs.out_dir if not isdefined(outputs['out_dir']): outputs['out_dir'] = self._gen_filename("out_dir") if isdefined(self.inputs.report) and self.inputs.report: outputs['report_dir'] = os.path.join( self._gen_filename("out_dir"), "report") return outputs def _gen_filename(self, name): if name == "out_dir": return os.getcwd() class SmoothEstimateInputSpec(FSLCommandInputSpec): dof = traits.Int(argstr='--dof=%d', mandatory=True, xor=['zstat_file'], desc='number of degrees of freedom') mask_file = File(argstr='--mask=%s', exists=True, mandatory=True, desc='brain mask volume') residual_fit_file = File(argstr='--res=%s', exists=True, requires=['dof'], desc='residual-fit image file') zstat_file = File(argstr='--zstat=%s', exists=True, xor=['dof'], desc='zstat image file') class SmoothEstimateOutputSpec(TraitedSpec): dlh = traits.Float(desc='smoothness estimate sqrt(det(Lambda))') volume = traits.Int(desc='number of voxels in mask') resels = traits.Float(desc='number of resels') class SmoothEstimate(FSLCommand): """ Estimates the smoothness of an image Examples -------- >>> est = SmoothEstimate() >>> est.inputs.zstat_file = 'zstat1.nii.gz' >>> est.inputs.mask_file = 'mask.nii' >>> est.cmdline 'smoothest --mask=mask.nii --zstat=zstat1.nii.gz' """ input_spec = SmoothEstimateInputSpec output_spec = SmoothEstimateOutputSpec _cmd = 'smoothest' def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() stdout = runtime.stdout.split('\n') outputs.dlh = float(stdout[0].split()[1]) outputs.volume = int(stdout[1].split()[1]) outputs.resels = float(stdout[2].split()[1]) return outputs class ClusterInputSpec(FSLCommandInputSpec): in_file = File(argstr='--in=%s', mandatory=True, exists=True, desc='input volume') threshold = traits.Float(argstr='--thresh=%.10f', mandatory=True, desc='threshold for input volume') out_index_file = traits.Either(traits.Bool, File, argstr='--oindex=%s', desc='output of cluster index (in size order)', hash_files=False) out_threshold_file = traits.Either(traits.Bool, File, argstr='--othresh=%s', desc='thresholded image', hash_files=False) out_localmax_txt_file = traits.Either(traits.Bool, File, argstr='--olmax=%s', desc='local maxima text file', hash_files=False) out_localmax_vol_file = traits.Either(traits.Bool, File, argstr='--olmaxim=%s', desc='output of local maxima volume', hash_files=False) out_size_file = traits.Either(traits.Bool, File, argstr='--osize=%s', desc='filename for output of size image', hash_files=False) out_max_file = traits.Either(traits.Bool, File, argstr='--omax=%s', desc='filename for output of max image', hash_files=False) out_mean_file = traits.Either(traits.Bool, File, argstr='--omean=%s', desc='filename for output of mean image', hash_files=False) out_pval_file = traits.Either(traits.Bool, File, argstr='--opvals=%s', desc='filename for image output of log pvals', hash_files=False) pthreshold = traits.Float(argstr='--pthresh=%.10f', requires=['dlh', 'volume'], desc='p-threshold for clusters') peak_distance = traits.Float(argstr='--peakdist=%.10f', desc='minimum distance between local maxima/minima, in mm (default 0)') cope_file = traits.File(argstr='--cope=%s', desc='cope volume') volume = traits.Int(argstr='--volume=%d', desc='number of voxels in the mask') dlh = traits.Float(argstr='--dlh=%.10f', desc='smoothness estimate = sqrt(det(Lambda))') fractional = traits.Bool('--fractional', desc='interprets the threshold as a fraction of the robust range') connectivity = traits.Int(argstr='--connectivity=%d', desc='the connectivity of voxels (default 26)') use_mm = traits.Bool('--mm', desc='use mm, not voxel, coordinates') find_min = traits.Bool('--min', desc='find minima instead of maxima') no_table = traits.Bool( '--no_table', desc='suppresses printing of the table info') minclustersize = traits.Bool(argstr='--minclustersize', desc='prints out minimum significant cluster size') xfm_file = File(argstr='--xfm=%s', desc='filename for Linear: input->standard-space transform. Non-linear: input->highres transform') std_space_file = File(argstr='--stdvol=%s', desc='filename for standard-space volume') num_maxima = traits.Int(argstr='--num=%d', desc='no of local maxima to report') warpfield_file = File(argstr='--warpvol=%s', desc='file contining warpfield') class ClusterOutputSpec(TraitedSpec): index_file = File(desc='output of cluster index (in size order)') threshold_file = File(desc='thresholded image') localmax_txt_file = File(desc='local maxima text file') localmax_vol_file = File(desc='output of local maxima volume') size_file = File(desc='filename for output of size image') max_file = File(desc='filename for output of max image') mean_file = File(desc='filename for output of mean image') pval_file = File(desc='filename for image output of log pvals') class Cluster(FSLCommand): """ Uses FSL cluster to perform clustering on statistical output Examples -------- >>> cl = Cluster() >>> cl.inputs.threshold = 2.3 >>> cl.inputs.in_file = 'zstat1.nii.gz' >>> cl.inputs.out_localmax_txt_file = 'stats.txt' >>> cl.cmdline 'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000' """ input_spec = ClusterInputSpec output_spec = ClusterOutputSpec _cmd = 'cluster' filemap = {'out_index_file': 'index', 'out_threshold_file': 'threshold', 'out_localmax_txt_file': 'localmax.txt', 'out_localmax_vol_file': 'localmax', 'out_size_file': 'size', 'out_max_file': 'max', 'out_mean_file': 'mean', 'out_pval_file': 'pval'} def _list_outputs(self): outputs = self.output_spec().get() for key, suffix in self.filemap.items(): outkey = key[4:] inval = getattr(self.inputs, key) if isdefined(inval): if isinstance(inval, bool): if inval: change_ext = True if suffix.endswith('.txt'): change_ext = False outputs[outkey] = self._gen_fname(self.inputs.in_file, suffix='_' + suffix, change_ext=change_ext) else: outputs[outkey] = os.path.abspath(inval) return outputs def _format_arg(self, name, spec, value): if name in self.filemap.keys(): if isinstance(value, bool): fname = self._list_outputs()[name[4:]] else: fname = value return spec.argstr % fname return super(Cluster, self)._format_arg(name, spec, value) class RandomiseInputSpec(FSLCommandInputSpec): in_file = File(exists=True, desc='4D input file', argstr='-i %s', position=0, mandatory=True) base_name = traits.Str( 'tbss_', desc='the rootname that all generated files will have', argstr='-o "%s"', position=1, usedefault=True) design_mat = File( exists=True, desc='design matrix file', argstr='-d %s', position=2) tcon = File( exists=True, desc='t contrasts file', argstr='-t %s', position=3) fcon = File(exists=True, desc='f contrasts file', argstr='-f %s') mask = File(exists=True, desc='mask image', argstr='-m %s') x_block_labels = File( exists=True, desc='exchangeability block labels file', argstr='-e %s') demean = traits.Bool( desc='demean data temporally before model fitting', argstr='-D') one_sample_group_mean = traits.Bool( desc='perform 1-sample group-mean test instead of generic permutation test', argstr='-1') show_total_perms = traits.Bool( desc='print out how many unique permutations would be generated and exit', argstr='-q') show_info_parallel_mode = traits.Bool( desc='print out information required for parallel mode and exit', argstr='-Q') vox_p_values = traits.Bool( desc='output voxelwise (corrected and uncorrected) p-value images', argstr='-x') tfce = traits.Bool( desc='carry out Threshold-Free Cluster Enhancement', argstr='-T') tfce2D = traits.Bool( desc='carry out Threshold-Free Cluster Enhancement with 2D optimisation', argstr='--T2') f_only = traits.Bool(desc='calculate f-statistics only', argstr='--f_only') raw_stats_imgs = traits.Bool( desc='output raw ( unpermuted ) statistic images', argstr='-R') p_vec_n_dist_files = traits.Bool( desc='output permutation vector and null distribution text files', argstr='-P') num_perm = traits.Int( argstr='-n %d', desc='number of permutations (default 5000, set to 0 for exhaustive)') seed = traits.Int( argstr='--seed=%d', desc='specific integer seed for random number generator') var_smooth = traits.Int( argstr='-v %d', desc='use variance smoothing (std is in mm)') c_thresh = traits.Float( argstr='-c %.2f', desc='carry out cluster-based thresholding') cm_thresh = traits.Float( argstr='-C %.2f', desc='carry out cluster-mass-based thresholding') f_c_thresh = traits.Float( argstr='-F %.2f', desc='carry out f cluster thresholding') f_cm_thresh = traits.Float( argstr='-S %.2f', desc='carry out f cluster-mass thresholding') tfce_H = traits.Float( argstr='--tfce_H=%.2f', desc='TFCE height parameter (default=2)') tfce_E = traits.Float( argstr='--tfce_E=%.2f', desc='TFCE extent parameter (default=0.5)') tfce_C = traits.Float( argstr='--tfce_C=%.2f', desc='TFCE connectivity (6 or 26; default=6)') class RandomiseOutputSpec(TraitedSpec): tstat_files = traits.List( File(exists=True), desc='t contrast raw statistic') fstat_files = traits.List( File(exists=True), desc='f contrast raw statistic') t_p_files = traits.List( File(exists=True), desc='f contrast uncorrected p values files') f_p_files = traits.List( File(exists=True), desc='f contrast uncorrected p values files') t_corrected_p_files = traits.List( File(exists=True), desc='t contrast FWE (Family-wise error) corrected p values files') f_corrected_p_files = traits.List( File(exists=True), desc='f contrast FWE (Family-wise error) corrected p values files') class Randomise(FSLCommand): """XXX UNSTABLE DO NOT USE FSL Randomise: feeds the 4D projected FA data into GLM modelling and thresholding in order to find voxels which correlate with your model Example ------- >>> import nipype.interfaces.fsl as fsl >>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat') >>> rand.cmdline 'randomise -i allFA.nii -o "tbss_" -d design.mat -t design.con -m mask.nii' """ _cmd = 'randomise' input_spec = RandomiseInputSpec output_spec = RandomiseOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['tstat_files'] = glob(self._gen_fname( '%s_tstat*.nii' % self.inputs.base_name)) outputs['fstat_files'] = glob(self._gen_fname( '%s_fstat*.nii' % self.inputs.base_name)) prefix = False if self.inputs.tfce or self.inputs.tfce2D: prefix = 'tfce' elif self.inputs.vox_p_values: prefix = 'vox' elif self.inputs.c_thresh or self.inputs.f_c_thresh: prefix = 'clustere' elif self.inputs.cm_thresh or self.inputs.f_cm_thresh: prefix = 'clusterm' if prefix: outputs['t_p_files'] = glob(self._gen_fname( '%s_%s_p_tstat*' % (self.inputs.base_name, prefix))) outputs['t_corrected_p_files'] = glob(self._gen_fname( '%s_%s_corrp_tstat*.nii' % (self.inputs.base_name, prefix))) outputs['f_p_files'] = glob(self._gen_fname( '%s_%s_p_fstat*.nii' % (self.inputs.base_name, prefix))) outputs['f_corrected_p_files'] = glob(self._gen_fname( '%s_%s_corrp_fstat*.nii' % (self.inputs.base_name, prefix))) return outputs class GLMInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1, desc='input file name (text matrix or 3D/4D image file)') out_file = File(name_template="%s_glm", argstr='-o %s', position=3, desc=('filename for GLM parameter estimates' + ' (GLM betas)'), name_source="in_file", keep_extension=True) design = File(exists=True, argstr='-d %s', mandatory=True, position=2, desc=('file name of the GLM design matrix (text time' + ' courses for temporal regression or an image' + ' file for spatial regression)')) contrasts = File(exists=True, argstr='-c %s', desc=('matrix of t-statics' + ' contrasts')) mask = File(exists=True, argstr='-m %s', desc=('mask image file name if' + ' input is image')) dof = traits.Int(argstr='--dof=%d', desc=('set degrees of freedom' + ' explicitly')) des_norm = traits.Bool(argstr='--des_norm', desc=('switch on normalization' + ' of the design matrix' + ' columns to unit std' + ' deviation')) dat_norm = traits.Bool(argstr='--dat_norm', desc=('switch on normalization' + ' of the data time' + ' series to unit std' + ' deviation')) var_norm = traits.Bool(argstr='--vn', desc=('perform MELODIC variance-' + 'normalisation on data')) demean = traits.Bool(argstr='--demean', desc=('switch on demeaining of ' + ' design and data')) out_cope = File(argstr='--out_cope=%s', desc='output file name for COPE (either as txt or image') out_z_name = File(argstr='--out_z=%s', desc='output file name for Z-stats (either as txt or image') out_t_name = File(argstr='--out_t=%s', desc='output file name for t-stats (either as txt or image') out_p_name = File(argstr='--out_p=%s', desc=('output file name for p-values of Z-stats (either as' + ' text file or image)')) out_f_name = File(argstr='--out_f=%s', desc='output file name for F-value of full model fit') out_pf_name = File(argstr='--out_pf=%s', desc='output file name for p-value for full model fit') out_res_name = File(argstr='--out_res=%s', desc='output file name for residuals') out_varcb_name = File(argstr='--out_varcb=%s', desc='output file name for variance of COPEs') out_sigsq_name = File(argstr='--out_sigsq=%s', desc=('output file name for residual noise variance' + ' sigma-square')) out_data_name = File(argstr='--out_data=%s', desc='output file name for pre-processed data') out_vnscales_name = File(argstr='--out_vnscales=%s', desc=('output file name for scaling factors for variance' + ' normalisation')) class GLMOutputSpec(TraitedSpec): out_file = File(exists=True, desc=('file name of GLM parameters' ' (if generated)')) out_cope = OutputMultiPath(File(exists=True), desc=('output file name for COPEs (either as ' 'text file or image)')) out_z = OutputMultiPath(File(exists=True), desc=('output file name for COPEs (either as text ' 'file or image)')) out_t = OutputMultiPath(File(exists=True), desc=('output file name for t-stats (either as ' 'text file or image)')) out_p = OutputMultiPath(File(exists=True), desc=('output file name for p-values of Z-stats ' '(either as text file or image)')) out_f = OutputMultiPath(File(exists=True), desc=('output file name for F-value of full model ' 'fit')) out_pf = OutputMultiPath(File(exists=True), desc=('output file name for p-value for full ' 'model fit')) out_res = OutputMultiPath(File(exists=True), desc='output file name for residuals') out_varcb = OutputMultiPath(File(exists=True), desc='output file name for variance of COPEs') out_sigsq = OutputMultiPath(File(exists=True), desc=('output file name for residual noise ' 'variance sigma-square')) out_data = OutputMultiPath(File(exists=True), desc='output file for preprocessed data') out_vnscales = OutputMultiPath(File(exists=True), desc=('output file name for scaling factors ' 'for variance normalisation')) class GLM(FSLCommand): """ FSL GLM: Example ------- >>> import nipype.interfaces.fsl as fsl >>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI') >>> glm.cmdline 'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii' """ _cmd = 'fsl_glm' input_spec = GLMInputSpec output_spec = GLMOutputSpec def _list_outputs(self): outputs = super(GLM, self)._list_outputs() if isdefined(self.inputs.out_cope): outputs['out_cope'] = os.path.abspath(self.inputs.out_cope) if isdefined(self.inputs.out_z_name): outputs['out_z'] = os.path.abspath(self.inputs.out_z_name) if isdefined(self.inputs.out_t_name): outputs['out_t'] = os.path.abspath(self.inputs.out_t_name) if isdefined(self.inputs.out_p_name): outputs['out_p'] = os.path.abspath(self.inputs.out_p_name) if isdefined(self.inputs.out_f_name): outputs['out_f'] = os.path.abspath(self.inputs.out_f_name) if isdefined(self.inputs.out_pf_name): outputs['out_pf'] = os.path.abspath(self.inputs.out_pf_name) if isdefined(self.inputs.out_res_name): outputs['out_res'] = os.path.abspath(self.inputs.out_res_name) if isdefined(self.inputs.out_varcb_name): outputs['out_varcb'] = os.path.abspath(self.inputs.out_varcb_name) if isdefined(self.inputs.out_sigsq_name): outputs['out_sigsq'] = os.path.abspath(self.inputs.out_sigsq_name) if isdefined(self.inputs.out_data_name): outputs['out_data'] = os.path.abspath(self.inputs.out_data_name) if isdefined(self.inputs.out_vnscales_name): outputs['out_vnscales'] = os.path.abspath( self.inputs.out_vnscales_name) return outputs nipype-0.9.2/nipype/interfaces/fsl/preprocess.py000066400000000000000000002141011227300005300217670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This was written to work with FSL version 4.1.4. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os import os.path as op import warnings import numpy as np from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec from nipype.interfaces.base import (TraitedSpec, File, InputMultiPath, OutputMultiPath, Undefined, traits, isdefined, OutputMultiPath) from nipype.utils.filemanip import split_filename from nibabel import load warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class BETInputSpec(FSLCommandInputSpec): # We use position args here as list indices - so a negative number # will put something on the end in_file = File(exists=True, desc='input file to skull strip', argstr='%s', position=0, mandatory=True) out_file = File(desc='name of output skull stripped image', argstr='%s', position=1, genfile=True, hash_files=False) outline = traits.Bool(desc='create surface outline image', argstr='-o') mask = traits.Bool(desc='create binary mask image', argstr='-m') skull = traits.Bool(desc='create skull image', argstr='-s') no_output = traits.Bool(argstr='-n', desc="Don't generate segmented output") frac = traits.Float(desc='fractional intensity threshold', argstr='-f %.2f') vertical_gradient = traits.Float(argstr='-g %.2f', desc='vertical gradient in fractional intensity ' 'threshold (-1, 1)') radius = traits.Int(argstr='-r %d', units='mm', desc="head radius") center = traits.List(traits.Int, desc='center of gravity in voxels', argstr='-c %s', minlen=0, maxlen=3, units='voxels') threshold = traits.Bool(argstr='-t', desc="apply thresholding to segmented brain image and mask") mesh = traits.Bool(argstr='-e', desc="generate a vtk mesh brain surface") # the remaining 'options' are more like modes (mutually exclusive) that # FSL actually implements in a shell script wrapper around the bet binary. # for some combinations of them in specific order a call would not fail, # but in general using more than one of the following is clearly not # supported _xor_inputs = ('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided') robust = traits.Bool(desc='robust brain centre estimation ' '(iterates BET several times)', argstr='-R', xor=_xor_inputs) padding = traits.Bool(desc='improve BET if FOV is very small in Z ' '(by temporarily padding end slices)', argstr='-Z', xor=_xor_inputs) remove_eyes = traits.Bool(desc='eye & optic nerve cleanup (can be ' 'useful in SIENA)', argstr='-S', xor=_xor_inputs) surfaces = traits.Bool(desc='run bet2 and then betsurf to get additional ' 'skull and scalp surfaces (includes ' 'registrations)', argstr='-A', xor=_xor_inputs) t2_guided = File(desc='as with creating surfaces, when also feeding in ' 'non-brain-extracted T2 (includes registrations)', argstr='-A2 %s', xor=_xor_inputs) functional = traits.Bool(argstr='-F', xor=_xor_inputs, desc="apply to 4D fMRI data") reduce_bias = traits.Bool(argstr='-B', xor=_xor_inputs, desc="bias field and neck cleanup") class BETOutputSpec(TraitedSpec): out_file = File( desc="path/name of skullstripped file (if generated)") mask_file = File( desc="path/name of binary brain mask (if generated)") outline_file = File( desc="path/name of outline file (if generated)") meshfile = File( desc="path/name of vtk mesh file (if generated)") inskull_mask_file = File( desc="path/name of inskull mask (if generated)") inskull_mesh_file = File( desc="path/name of inskull mesh outline (if generated)") outskull_mask_file = File( desc="path/name of outskull mask (if generated)") outskull_mesh_file = File( desc="path/name of outskull mesh outline (if generated)") outskin_mask_file = File( desc="path/name of outskin mask (if generated)") outskin_mesh_file = File( desc="path/name of outskin mesh outline (if generated)") skull_mask_file = File( desc="path/name of skull mask (if generated)") class BET(FSLCommand): """Use FSL BET command for skull stripping. For complete details, see the `BET Documentation. `_ Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> btr = fsl.BET() >>> btr.inputs.in_file = example_data('structural.nii') >>> btr.inputs.frac = 0.7 >>> res = btr.run() # doctest: +SKIP """ _cmd = 'bet' input_spec = BETInputSpec output_spec = BETOutputSpec def _run_interface(self, runtime): # The returncode is meaningless in BET. So check the output # in stderr and if it's set, then update the returncode # accordingly. runtime = super(BET, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _gen_outfilename(self): out_file = self.inputs.out_file if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_brain') return os.path.abspath(out_file) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self._gen_outfilename() if ((isdefined(self.inputs.mesh) and self.inputs.mesh) or (isdefined(self.inputs.surfaces) and self.inputs.surfaces)): outputs['meshfile'] = self._gen_fname(outputs['out_file'], suffix='_mesh.vtk', change_ext=False) if (isdefined(self.inputs.mask) and self.inputs.mask) or \ (isdefined(self.inputs.reduce_bias) and self.inputs.reduce_bias): outputs['mask_file'] = self._gen_fname(outputs['out_file'], suffix='_mask') if isdefined(self.inputs.outline) and self.inputs.outline: outputs['outline_file'] = self._gen_fname(outputs['out_file'], suffix='_overlay') if isdefined(self.inputs.surfaces) and self.inputs.surfaces: outputs['inskull_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_inskull_mask') outputs['inskull_mesh_file'] = self._gen_fname(outputs['out_file'], suffix='_inskull_mesh') outputs[ 'outskull_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_outskull_mask') outputs[ 'outskull_mesh_file'] = self._gen_fname(outputs['out_file'], suffix='_outskull_mesh') outputs['outskin_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_outskin_mask') outputs['outskin_mesh_file'] = self._gen_fname(outputs['out_file'], suffix='_outskin_mesh') outputs['skull_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_skull_mask') if isdefined(self.inputs.no_output) and self.inputs.no_output: outputs['out_file'] = Undefined return outputs def _gen_filename(self, name): if name == 'out_file': return self._gen_outfilename() return None class FASTInputSpec(FSLCommandInputSpec): """ Defines inputs (trait classes) for FAST """ in_files = InputMultiPath(File(exists=True), copyfile=False, desc='image, or multi-channel set of images, ' 'to be segmented', argstr='%s', position=-1, mandatory=True) out_basename = File(desc='base name of output files', argstr='-o %s') # uses in_file name as basename if none given number_classes = traits.Range(low=1, high=10, argstr='-n %d', desc='number of tissue-type classes') output_biasfield = traits.Bool(desc='output estimated bias field', argstr='-b') output_biascorrected = traits.Bool(desc='output restored image ' '(bias-corrected image)', argstr='-B') img_type = traits.Enum((1, 2, 3), desc='int specifying type of image: ' '(1 = T1, 2 = T2, 3 = PD)', argstr='-t %d') bias_iters = traits.Range(low=1, high=10, argstr='-I %d', desc='number of main-loop iterations during ' 'bias-field removal') bias_lowpass = traits.Range(low=4, high=40, desc='bias field smoothing extent (FWHM) ' 'in mm', argstr='-l %d', units='mm') init_seg_smooth = traits.Range(low=0.0001, high=0.1, desc='initial segmentation spatial ' 'smoothness (during bias field ' 'estimation)', argstr='-f %.3f') segments = traits.Bool(desc='outputs a separate binary image for each ' 'tissue type', argstr='-g') init_transform = File(exists=True, desc=' initialise' ' using priors', argstr='-a %s') other_priors = InputMultiPath( File(exist=True), desc='alternative prior images', argstr='-A %s', minlen=3, maxlen=3) no_pve = traits.Bool(desc='turn off PVE (partial volume estimation)', argstr='--nopve') no_bias = traits.Bool(desc='do not remove bias field', argstr='-N') use_priors = traits.Bool(desc='use priors throughout', argstr='-P') # must also set -a!, # mutually inclusive?? # No, conditional # mandatory... need to # figure out how to # handle with traits. segment_iters = traits.Range(low=1, high=50, desc='number of segmentation-initialisation' ' iterations', argstr='-W %d') mixel_smooth = traits.Range(low=0.0, high=1.0, desc='spatial smoothness for mixeltype', argstr='-R %.2f') iters_afterbias = traits.Range(low=1, high=20, desc='number of main-loop iterations ' 'after bias-field removal', argstr='-O %d') hyper = traits.Range(low=0.0, high=1.0, desc='segmentation spatial smoothness', argstr='-H %.2f') verbose = traits.Bool(desc='switch on diagnostic messages', argstr='-v') manual_seg = File(exists=True, desc='Filename containing intensities', argstr='-s %s') probability_maps = traits.Bool(desc='outputs individual probability maps', argstr='-p') class FASTOutputSpec(TraitedSpec): """Specify possible outputs from FAST""" tissue_class_map = File(exists=True, desc='path/name of binary segmented volume file' ' one val for each class _seg') tissue_class_files = OutputMultiPath(File(desc='path/name of binary segmented volumes ' 'one file for each class _seg_x')) restored_image = OutputMultiPath(File(desc='restored images (one for each input image) ' 'named according to the input images _restore')) mixeltype = File(desc="path/name of mixeltype volume file _mixeltype") partial_volume_map = File(desc="path/name of partial volume file _pveseg") partial_volume_files = OutputMultiPath(File(desc='path/name of partial volumes files ' 'one for each class, _pve_x')) bias_field = OutputMultiPath(File(desc='Estimated bias field _bias')) probability_maps = OutputMultiPath(File(desc='filenames, one for each class, for each ' 'input, prob_x')) class FAST(FSLCommand): """ Use FSL FAST for segmenting and bias correction. For complete details, see the `FAST Documentation. `_ Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data Assign options through the ``inputs`` attribute: >>> fastr = fsl.FAST() >>> fastr.inputs.in_files = example_data('structural.nii') >>> out = fastr.run() #doctest: +SKIP """ _cmd = 'fast' input_spec = FASTInputSpec output_spec = FASTOutputSpec def _format_arg(self, name, spec, value): # first do what should be done in general formated = super(FAST, self)._format_arg(name, spec, value) if name == 'in_files': # FAST needs the -S parameter value to correspond to the number # of input images, otherwise it will ignore all but the first formated = "-S %d %s" % (len(value), formated) return formated def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.number_classes): nclasses = 3 else: nclasses = self.inputs.number_classes # when using multichannel, results basename is based on last # input filename if isdefined(self.inputs.out_basename): basefile = self.inputs.out_basename else: basefile = self.inputs.in_files[-1] outputs['tissue_class_map'] = self._gen_fname(basefile, suffix='_seg') if self.inputs.segments: outputs['tissue_class_files'] = [] for i in range(nclasses): outputs['tissue_class_files'].append( self._gen_fname(basefile, suffix='_seg_%d' % i)) if isdefined(self.inputs.output_biascorrected): outputs['restored_image'] = [] if len(self.inputs.in_files) > 1: # for multi-image segmentation there is one corrected image # per input for val, f in enumerate(self.inputs.in_files): # image numbering is 1-based outputs['restored_image'].append( self._gen_fname(basefile, suffix='_restore_%d' % (val + 1))) else: # single image segmentation has unnumbered output image outputs['restored_image'].append( self._gen_fname(basefile, suffix='_restore')) outputs['mixeltype'] = self._gen_fname(basefile, suffix='_mixeltype') if not self.inputs.no_pve: outputs['partial_volume_map'] = self._gen_fname( basefile, suffix='_pveseg') outputs['partial_volume_files'] = [] for i in range(nclasses): outputs[ 'partial_volume_files'].append(self._gen_fname(basefile, suffix='_pve_%d' % i)) if self.inputs.output_biasfield: outputs['bias_field'] = [] if len(self.inputs.in_files) > 1: # for multi-image segmentation there is one bias field image # per input for val, f in enumerate(self.inputs.in_files): # image numbering is 1-based outputs['bias_field'].append( self._gen_fname(basefile, suffix='_bias_%d' % (val + 1))) else: # single image segmentation has unnumbered output image outputs['bias_field'].append( self._gen_fname(basefile, suffix='_bias')) if self.inputs.probability_maps: outputs['probability_maps'] = [] for i in range(nclasses): outputs['probability_maps'].append( self._gen_fname(basefile, suffix='_prob_%d' % i)) return outputs class FLIRTInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='-in %s', mandatory=True, position=0, desc='input file') reference = File(exists=True, argstr='-ref %s', mandatory=True, position=1, desc='reference file') out_file = File(argstr='-out %s', desc='registered output file', name_source=['in_file'], name_template='%s_flirt', position=2, hash_files=False) out_matrix_file = File(argstr='-omat %s', name_source=['in_file'], keep_extension=True, name_template='%s_flirt.mat', desc='output affine matrix in 4x4 asciii format', position=3, hash_files=False) out_log = File(name_source=['in_file'], keep_extension=True, requires=['save_log'], name_template='%s_flirt.log', desc='output log') in_matrix_file = File(argstr='-init %s', desc='input 4x4 affine matrix') apply_xfm = traits.Bool(argstr='-applyxfm', requires=['in_matrix_file'], desc='apply transformation supplied by in_matrix_file') apply_isoxfm = traits.Float(argstr='-applyisoxfm %f', xor=['apply_xfm'], desc='as applyxfm but forces isotropic resampling') datatype = traits.Enum('char', 'short', 'int', 'float', 'double', argstr='-datatype %s', desc='force output data type') cost = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi', 'leastsq', 'labeldiff', 'bbr', argstr='-cost %s', desc='cost function') # XXX What is the difference between 'cost' and 'searchcost'? Are # these both necessary or do they map to the same variable. cost_func = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi', 'leastsq', 'labeldiff', 'bbr', argstr='-searchcost %s', desc='cost function') uses_qform = traits.Bool(argstr='-usesqform', desc='initialize using sform or qform') display_init = traits.Bool(argstr='-displayinit', desc='display initial matrix') angle_rep = traits.Enum('quaternion', 'euler', argstr='-anglerep %s', desc='representation of rotation angles') interp = traits.Enum('trilinear', 'nearestneighbour', 'sinc', 'spline', argstr='-interp %s', desc='final interpolation method used in reslicing') sinc_width = traits.Int(argstr='-sincwidth %d', units='voxels', desc='full-width in voxels') sinc_window = traits.Enum('rectangular', 'hanning', 'blackman', argstr='-sincwindow %s', desc='sinc window') # XXX better doc bins = traits.Int(argstr='-bins %d', desc='number of histogram bins') dof = traits.Int(argstr='-dof %d', desc='number of transform degrees of freedom') no_resample = traits.Bool(argstr='-noresample', desc='do not change input sampling') force_scaling = traits.Bool(argstr='-forcescaling', desc='force rescaling even for low-res images') min_sampling = traits.Float(argstr='-minsampling %f', units='mm', desc='set minimum voxel dimension for sampling') padding_size = traits.Int(argstr='-paddingsize %d', units='voxels', desc='for applyxfm: interpolates outside image ' 'by size') searchr_x = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees', argstr='-searchrx %s', desc='search angles along x-axis, in degrees') searchr_y = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees', argstr='-searchry %s', desc='search angles along y-axis, in degrees') searchr_z = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees', argstr='-searchrz %s', desc='search angles along z-axis, in degrees') no_search = traits.Bool(argstr='-nosearch', desc='set all angular searches to ranges 0 to 0') coarse_search = traits.Int(argstr='-coarsesearch %d', units='degrees', desc='coarse search delta angle') fine_search = traits.Int(argstr='-finesearch %d', units='degrees', desc='fine search delta angle') schedule = File(exists=True, argstr='-schedule %s', desc='replaces default schedule') ref_weight = File(exists=True, argstr='-refweight %s', desc='File for reference weighting volume') in_weight = File(exists=True, argstr='-inweight %s', desc='File for input weighting volume') no_clamp = traits.Bool(argstr='-noclamp', desc='do not use intensity clamping') no_resample_blur = traits.Bool(argstr='-noresampblur', desc='do not use blurring on downsampling') rigid2D = traits.Bool(argstr='-2D', desc='use 2D rigid body mode - ignores dof') save_log = traits.Bool(desc='save to log file') verbose = traits.Int(argstr='-verbose %d', desc='verbose mode, 0 is least') # BBR options wm_seg = File( argstr='-wmseg %s', min_ver='5.0.0', desc='white matter segmentation volume needed by BBR cost function') wmcoords = File( argstr='-wmcoords %s', min_ver='5.0.0', desc='white matter boundary coordinates for BBR cost function') wmnorms = File( argstr='-wmnorms %s', min_ver='5.0.0', desc='white matter boundary normals for BBR cost function') fieldmap = File( argstr='-fieldmap %s', min_ver='5.0.0', desc='fieldmap image in rads/s - must be already registered to the reference image') fieldmapmask = File( argstr='-fieldmapmask %s', min_ver='5.0.0', desc='mask for fieldmap image') pedir = traits.Int( argstr='-pedir %d', min_ver='5.0.0', desc='phase encode direction of EPI - 1/2/3=x/y/z & -1/-2/-3=-x/-y/-z') echospacing = traits.Float( argstr='-echospacing %f', min_ver='5.0.0', desc='value of EPI echo spacing - units of seconds') bbrtype = traits.Enum( 'signed', 'global_abs', 'local_abs', argstr='-bbrtype %s', min_ver='5.0.0', desc='type of bbr cost function: signed [default], global_abs, local_abs') bbrslope = traits.Float( argstr='-bbrslope %f', min_ver='5.0.0', desc='value of bbr slope') class FLIRTOutputSpec(TraitedSpec): out_file = File(exists=True, desc='path/name of registered file (if generated)') out_matrix_file = File(exists=True, desc='path/name of calculated affine transform ' '(if generated)') out_log = File(desc='path/name of output log (if generated)') class FLIRT(FSLCommand): """Use FSL FLIRT for coregistration. For complete details, see the `FLIRT Documentation. `_ To print out the command line help, use: fsl.FLIRT().inputs_help() Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo') >>> flt.inputs.in_file = example_data('structural.nii') >>> flt.inputs.reference = example_data('mni.nii') >>> flt.cmdline #doctest: +ELLIPSIS 'flirt -in .../structural.nii -ref .../mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo' >>> res = flt.run() #doctest: +SKIP """ _cmd = 'flirt' input_spec = FLIRTInputSpec output_spec = FLIRTOutputSpec def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = super(FLIRT, self).aggregate_outputs( runtime=runtime, needed_outputs=needed_outputs) if isdefined(self.inputs.save_log) and self.inputs.save_log: with open(outputs.out_log, "a") as text_file: text_file.write(runtime.stdout + '\n') return outputs def _parse_inputs(self, skip=None): skip = [] if isdefined(self.inputs.save_log) and self.inputs.save_log: if not isdefined(self.inputs.verbose) or self.inputs.verbose == 0: self.inputs.verbose = 1 skip.append('save_log') return super(FLIRT, self)._parse_inputs(skip=skip) class ApplyXfmInputSpec(FLIRTInputSpec): apply_xfm = traits.Bool( True, argstr='-applyxfm', requires=['in_matrix_file'], desc='apply transformation supplied by in_matrix_file', usedefault=True) class ApplyXfm(FLIRT): """Currently just a light wrapper around FLIRT, with no modifications ApplyXfm is used to apply an existing tranform to an image Examples -------- >>> import nipype.interfaces.fsl as fsl >>> from nipype.testing import example_data >>> applyxfm = fsl.ApplyXfm() >>> applyxfm.inputs.in_file = example_data('structural.nii') >>> applyxfm.inputs.in_matrix_file = example_data('trans.mat') >>> applyxfm.inputs.out_file = 'newfile.nii' >>> applyxfm.inputs.reference = example_data('mni.nii') >>> applyxfm.inputs.apply_xfm = True >>> result = applyxfm.run() # doctest: +SKIP """ input_spec = ApplyXfmInputSpec class MCFLIRTInputSpec(FSLCommandInputSpec): in_file = File(exists=True, position=0, argstr="-in %s", mandatory=True, desc="timeseries to motion-correct") out_file = File(argstr='-out %s', genfile=True, desc="file to write", hash_files=False) cost = traits.Enum( 'mutualinfo', 'woods', 'corratio', 'normcorr', 'normmi', 'leastsquares', argstr='-cost %s', desc="cost function to optimize") bins = traits.Int(argstr='-bins %d', desc="number of histogram bins") dof = traits.Int( argstr='-dof %d', desc="degrees of freedom for the transformation") ref_vol = traits.Int(argstr='-refvol %d', desc="volume to align frames to") scaling = traits.Float( argstr='-scaling %.2f', desc="scaling factor to use") smooth = traits.Float( argstr='-smooth %.2f', desc="smoothing factor for the cost function") rotation = traits.Int( argstr='-rotation %d', desc="scaling factor for rotation tolerances") stages = traits.Int(argstr='-stages %d', desc="stages (if 4, perform final search with sinc interpolation") init = File(exists=True, argstr='-init %s', desc="inital transformation matrix") interpolation = traits.Enum("spline", "nn", "sinc", argstr="-%s_final", desc="interpolation method for transformation") use_gradient = traits.Bool( argstr='-gdt', desc="run search on gradient images") use_contour = traits.Bool( argstr='-edge', desc="run search on contour images") mean_vol = traits.Bool(argstr='-meanvol', desc="register to mean volume") stats_imgs = traits.Bool( argstr='-stats', desc="produce variance and std. dev. images") save_mats = traits.Bool( argstr='-mats', desc="save transformation matrices") save_plots = traits.Bool( argstr='-plots', desc="save transformation parameters") save_rms = traits.Bool( argstr='-rmsabs -rmsrel', desc="save rms displacement parameters") ref_file = File(exists=True, argstr='-reffile %s', desc="target image for motion correction") class MCFLIRTOutputSpec(TraitedSpec): out_file = File(exists=True, desc="motion-corrected timeseries") variance_img = File(exists=True, desc="variance image") std_img = File(exists=True, desc="standard deviation image") mean_img = File(exists=True, desc="mean timeseries image") par_file = File(exists=True, desc="text-file with motion parameters") mat_file = OutputMultiPath(File( exists=True), desc="transformation matrices") rms_files = OutputMultiPath(File(exists=True), desc="absolute and relative displacement parameters") class MCFLIRT(FSLCommand): """Use FSL MCFLIRT to do within-modality motion correction. For complete details, see the `MCFLIRT Documentation. `_ Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> mcflt = fsl.MCFLIRT(in_file=example_data('functional.nii'), cost='mutualinfo') >>> res = mcflt.run() # doctest: +SKIP """ _cmd = 'mcflirt' input_spec = MCFLIRTInputSpec output_spec = MCFLIRTOutputSpec def _format_arg(self, name, spec, value): if name == "interpolation": if value == "trilinear": return "" else: return spec.argstr % value return super(MCFLIRT, self)._format_arg(name, spec, value) def _list_outputs(self): cwd = os.getcwd() outputs = self._outputs().get() outputs['out_file'] = self._gen_outfilename() if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs: outputs['variance_img'] = self._gen_fname(outputs['out_file'] + '_variance.ext', cwd=cwd) outputs['std_img'] = self._gen_fname(outputs['out_file'] + '_sigma.ext', cwd=cwd) # The mean image created if -stats option is specified ('meanvol') # is missing the top and bottom slices. Therefore we only expose the # mean image created by -meanvol option ('mean_reg') which isn't # corrupted. # Note that the same problem holds for the std and variance image. if isdefined(self.inputs.mean_vol) and self.inputs.mean_vol: outputs['mean_img'] = self._gen_fname(outputs['out_file'] + '_mean_reg.ext', cwd=cwd) if isdefined(self.inputs.save_mats) and self.inputs.save_mats: _, filename = os.path.split(outputs['out_file']) matpathname = os.path.join(cwd, filename + '.mat') _, _, _, timepoints = load(self.inputs.in_file).get_shape() outputs['mat_file'] = [] for t in range(timepoints): outputs['mat_file'].append(os.path.join(matpathname, 'MAT_%04d' % t)) if isdefined(self.inputs.save_plots) and self.inputs.save_plots: # Note - if e.g. out_file has .nii.gz, you get .nii.gz.par, # which is what mcflirt does! outputs['par_file'] = outputs['out_file'] + '.par' if isdefined(self.inputs.save_rms) and self.inputs.save_rms: outfile = outputs['out_file'] outputs['rms_files'] = [outfile + '_abs.rms', outfile + '_rel.rms'] return outputs def _gen_filename(self, name): if name == 'out_file': return self._gen_outfilename() return None def _gen_outfilename(self): out_file = self.inputs.out_file if isdefined(out_file): out_file = os.path.realpath(out_file) if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_mcf') return os.path.abspath(out_file) class FNIRTInputSpec(FSLCommandInputSpec): ref_file = File(exists=True, argstr='--ref=%s', mandatory=True, desc='name of reference image') in_file = File(exists=True, argstr='--in=%s', mandatory=True, desc='name of input image') affine_file = File(exists=True, argstr='--aff=%s', desc='name of file containing affine transform') inwarp_file = File(exists=True, argstr='--inwarp=%s', desc='name of file containing initial non-linear warps') in_intensitymap_file = File(exists=True, argstr='--intin=%s', desc='name of file/files containing initial intensity maping' 'usually generated by previos fnirt run') fieldcoeff_file = traits.Either(traits.Bool, File, argstr='--cout=%s', desc='name of output file with field coefficients or true') warped_file = File(argstr='--iout=%s', desc='name of output image', genfile=True, hash_files=False) field_file = traits.Either(traits.Bool, File, argstr='--fout=%s', desc='name of output file with field or true', hash_files=False) jacobian_file = traits.Either(traits.Bool, File, argstr='--jout=%s', desc='name of file for writing out the Jacobian' 'of the field (for diagnostic or VBM purposes)', hash_files=False) modulatedref_file = traits.Either(traits.Bool, File, argstr='--refout=%s', desc='name of file for writing out intensity modulated' '--ref (for diagnostic purposes)', hash_files=False) out_intensitymap_file = traits.Either(traits.Bool, File, argstr='--intout=%s', desc='name of files for writing information pertaining ' 'to intensity mapping', hash_files=False) log_file = File(argstr='--logout=%s', desc='Name of log-file', genfile=True, hash_files=False) config_file = traits.Either( traits.Enum("T1_2_MNI152_2mm", "FA_2_FMRIB58_1mm"), File(exists=True), argstr='--config=%s', desc='Name of config file specifying command line arguments') refmask_file = File(exists=True, argstr='--refmask=%s', desc='name of file with mask in reference space') inmask_file = File(exists=True, argstr='--inmask=%s', desc='name of file with mask in input image space') skip_refmask = traits.Bool( argstr='--applyrefmask=0', xor=['apply_refmask'], desc='Skip specified refmask if set, default false') skip_inmask = traits.Bool(argstr='--applyinmask=0', xor=['apply_inmask'], desc='skip specified inmask if set, default false') apply_refmask = traits.List( traits.Enum(0, 1), argstr='--applyrefmask=%s', xor=['skip_refmask'], desc='list of iterations to use reference mask on (1 to use, 0 to skip)', sep=",") apply_inmask = traits.List( traits.Enum(0, 1), argstr='--applyinmask=%s', xor=['skip_inmask'], desc='list of iterations to use input mask on (1 to use, 0 to skip)', sep=",") skip_implicit_ref_masking = traits.Bool(argstr='--imprefm=0', desc='skip implicit masking based on value' 'in --ref image. Default = 0') skip_implicit_in_masking = traits.Bool(argstr='--impinm=0', desc='skip implicit masking based on value' 'in --in image. Default = 0') refmask_val = traits.Float(argstr='--imprefval=%f', desc='Value to mask out in --ref image. Default =0.0') inmask_val = traits.Float(argstr='--impinval=%f', desc='Value to mask out in --in image. Default =0.0') max_nonlin_iter = traits.List(traits.Int, argstr='--miter=%s', desc='Max # of non-linear iterations list, default [5, 5, 5, 5]', sep=",") subsampling_scheme = traits.List(traits.Int, argstr='--subsamp=%s', desc='sub-sampling scheme, list, default [4, 2, 1, 1]', sep=",") warp_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--warpres=%d,%d,%d', desc='(approximate) resolution (in mm) of warp basis ' 'in x-, y- and z-direction, default 10, 10, 10') spline_order = traits.Int(argstr='--splineorder=%d', desc='Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3') in_fwhm = traits.List(traits.Int, argstr='--infwhm=%s', desc='FWHM (in mm) of gaussian smoothing kernel for input volume, default [6, 4, 2, 2]', sep=",") ref_fwhm = traits.List(traits.Int, argstr='--reffwhm=%s', desc='FWHM (in mm) of gaussian smoothing kernel for ref volume, default [4, 2, 0, 0]', sep=",") regularization_model = traits.Enum('membrane_energy', 'bending_energy', argstr='--regmod=%s', desc='Model for regularisation of warp-field [membrane_energy bending_energy], default bending_energy') regularization_lambda = traits.List(traits.Float, argstr='--lambda=%s', desc='Weight of regularisation, default depending on --ssqlambda and --regmod ' 'switches. See user documetation.', sep=",") skip_lambda_ssq = traits.Bool(argstr='--ssqlambda=0', desc='If true, lambda is not weighted by current ssq, default false') jacobian_range = traits.Tuple(traits.Float, traits.Float, argstr='--jacrange=%f,%f', desc='Allowed range of Jacobian determinants, default 0.01, 100.0') derive_from_ref = traits.Bool(argstr='--refderiv', desc='If true, ref image is used to calculate derivatives. Default false') intensity_mapping_model = traits.Enum('none', 'global_linear', 'global_non_linear' 'local_linear', 'global_non_linear_with_bias', 'local_non_linear', argstr='--intmod=%s', desc='Model for intensity-mapping') intensity_mapping_order = traits.Int(argstr='--intorder=%d', desc='Order of poynomial for mapping intensities, default 5') biasfield_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--biasres=%d,%d,%d', desc='Resolution (in mm) of bias-field modelling ' 'local intensities, default 50, 50, 50') bias_regularization_lambda = traits.Float(argstr='--biaslambda=%f', desc='Weight of regularisation for bias-field, default 10000') skip_intensity_mapping = traits.Bool( argstr='--estint=0', xor=['apply_intensity_mapping'], desc='Skip estimate intensity-mapping default false') apply_intensity_mapping = traits.List( traits.Enum(0, 1), argstr='--estint=%s', xor=['skip_intensity_mapping'], desc='List of subsampling levels to apply intensity mapping for (0 to skip, 1 to apply)', sep=",") hessian_precision = traits.Enum('double', 'float', argstr='--numprec=%s', desc='Precision for representing Hessian, double or float. Default double') class FNIRTOutputSpec(TraitedSpec): fieldcoeff_file = File(exists=True, desc='file with field coefficients') warped_file = File(exists=True, desc='warped image') field_file = File(desc='file with warp field') jacobian_file = File(desc='file containing Jacobian of the field') modulatedref_file = File(desc='file containing intensity modulated --ref') out_intensitymap_file = File( desc='file containing info pertaining to intensity mapping') log_file = File(desc='Name of log-file') class FNIRT(FSLCommand): """Use FSL FNIRT for non-linear registration. Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat')) >>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP T1 -> Mni153 >>> from nipype.interfaces import fsl >>> fnirt_mprage = fsl.FNIRT() >>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2] >>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1] Specify the resolution of the warps >>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6) >>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP We can check the command line and confirm that it's what we expect. >>> fnirt_mprage.cmdline #doctest: +SKIP 'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii' """ _cmd = 'fnirt' input_spec = FNIRTInputSpec output_spec = FNIRTOutputSpec filemap = {'warped_file': 'warped', 'field_file': 'field', 'jacobian_file': 'field_jacobian', 'modulatedref_file': 'modulated', 'out_intensitymap_file': 'intmap', 'log_file': 'log.txt', 'fieldcoeff_file': 'fieldwarp'} def _list_outputs(self): outputs = self.output_spec().get() for key, suffix in self.filemap.items(): inval = getattr(self.inputs, key) change_ext = True if key in ['warped_file', 'log_file']: if suffix.endswith('.txt'): change_ext = False if isdefined(inval): outputs[key] = inval else: outputs[key] = self._gen_fname(self.inputs.in_file, suffix='_' + suffix, change_ext=change_ext) elif isdefined(inval): if isinstance(inval, bool): if inval: outputs[key] = self._gen_fname(self.inputs.in_file, suffix='_' + suffix, change_ext=change_ext) else: outputs[key] = os.path.abspath(inval) return outputs def _format_arg(self, name, spec, value): if name in self.filemap.keys(): return spec.argstr % self._list_outputs()[name] return super(FNIRT, self)._format_arg(name, spec, value) def _gen_filename(self, name): if name in ['warped_file', 'log_file']: return self._list_outputs()[name] return None def write_config(self, configfile): """Writes out currently set options to specified config file XX TODO : need to figure out how the config file is written Parameters ---------- configfile : /path/to/configfile """ try: fid = open(configfile, 'w+') except IOError: print ('unable to create config_file %s' % (configfile)) for item in self.inputs.get().items(): fid.write('%s\n' % (item)) fid.close() class ApplyWarpInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='--in=%s', mandatory=True, desc='image to be warped') out_file = File(argstr='--out=%s', genfile=True, desc='output filename', hash_files=False) ref_file = File(exists=True, argstr='--ref=%s', mandatory=True, desc='reference image') field_file = File(exists=True, argstr='--warp=%s', desc='file containing warp field') abswarp = traits.Bool(argstr='--abs', xor=['relwarp'], desc="treat warp field as absolute: x' = w(x)") relwarp = traits.Bool(argstr='--rel', xor=['abswarp'], desc="treat warp field as relative: x' = x + w(x)") datatype = traits.Enum('char', 'short', 'int', 'float', 'double', argstr='--datatype=%s', desc='Force output data type [char short int float double].') supersample = traits.Bool(argstr='--super', desc='intermediary supersampling of output, default is off') superlevel = traits.Either(traits.Enum('a'), traits.Int, argstr='--superlevel=%s', desc="level of intermediary supersampling, a for 'automatic' or integer level. Default = 2") premat = File(exists=True, argstr='--premat=%s', desc='filename for pre-transform (affine matrix)') postmat = File(exists=True, argstr='--postmat=%s', desc='filename for post-transform (affine matrix)') mask_file = File(exists=True, argstr='--mask=%s', desc='filename for mask image (in reference space)') interp = traits.Enum( 'nn', 'trilinear', 'sinc', 'spline', argstr='--interp=%s', desc='interpolation method') class ApplyWarpOutputSpec(TraitedSpec): out_file = File(exists=True, desc='Warped output file') class ApplyWarp(FSLCommand): """Use FSL's applywarp to apply the results of a FNIRT registration Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> aw = fsl.ApplyWarp() >>> aw.inputs.in_file = example_data('structural.nii') >>> aw.inputs.ref_file = example_data('mni.nii') >>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP >>> res = aw.run() #doctest: +SKIP """ _cmd = 'applywarp' input_spec = ApplyWarpInputSpec output_spec = ApplyWarpOutputSpec def _format_arg(self, name, spec, value): if name == 'superlevel': return spec.argstr % str(value) return super(ApplyWarp, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() if not isdefined(self.inputs.out_file): outputs['out_file'] = self._gen_fname(self.inputs.in_file, suffix='_warp') else: outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] return None class SliceTimerInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='--in=%s', mandatory=True, position=0, desc='filename of input timeseries') out_file = File(argstr='--out=%s', genfile=True, desc='filename of output timeseries', hash_files=False) index_dir = traits.Bool(argstr='--down', desc='slice indexing from top to bottom') time_repetition = traits.Float(argstr='--repeat=%f', desc='Specify TR of data - default is 3s') slice_direction = traits.Enum(1, 2, 3, argstr='--direction=%d', desc='direction of slice acquisition (x=1, y=2, z=3) - default is z') interleaved = traits.Bool(argstr='--odd', desc='use interleaved acquisition') custom_timings = File(exists=True, argstr='--tcustom=%s', desc='slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift)') global_shift = traits.Float(argstr='--tglobal', desc='shift in fraction of TR, range 0:1 (default is 0.5 = no shift)') custom_order = File(exists=True, argstr='--ocustom=%s', desc='filename of single-column custom interleave order file (first slice is referred to as 1 not 0)') class SliceTimerOutputSpec(TraitedSpec): slice_time_corrected_file = File( exists=True, desc='slice time corrected file') class SliceTimer(FSLCommand): """ use FSL slicetimer to perform slice timing correction. Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> st = fsl.SliceTimer() >>> st.inputs.in_file = example_data('functional.nii') >>> st.inputs.interleaved = True >>> result = st.run() #doctest: +SKIP """ _cmd = 'slicetimer' input_spec = SliceTimerInputSpec output_spec = SliceTimerOutputSpec def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_st') outputs['slice_time_corrected_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['slice_time_corrected_file'] return None class SUSANInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=1, desc='filename of input timeseries') brightness_threshold = traits.Float(argstr='%.10f', position=2, mandatory=True, desc='brightness threshold and should be greater than ' 'noise level and less than contrast of edges to ' 'be preserved.') fwhm = traits.Float(argstr='%.10f', position=3, mandatory=True, desc='fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))') dimension = traits.Enum(3, 2, argstr='%d', position=4, usedefault=True, desc='within-plane (2) or fully 3D (3)') use_median = traits.Enum(1, 0, argstr='%d', position=5, usedefault=True, desc='whether to use a local median filter in the cases where single-point noise is detected') usans = traits.List( traits.Tuple(File(exists=True), traits.Float), maxlen=2, argstr='', position=6, default=[], usedefault=True, desc='determines whether the smoothing area (USAN) is to be ' 'found from secondary images (0, 1 or 2). A negative ' 'value for any brightness threshold will auto-set the ' 'threshold at 10% of the robust range') out_file = File(argstr='%s', position=-1, genfile=True, desc='output file name', hash_files=False) class SUSANOutputSpec(TraitedSpec): smoothed_file = File(exists=True, desc='smoothed output file') class SUSAN(FSLCommand): """ use FSL SUSAN to perform smoothing Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> print anatfile #doctest: +SKIP anatomical.nii #doctest: +SKIP >>> sus = fsl.SUSAN() >>> sus.inputs.in_file = example_data('structural.nii') >>> sus.inputs.brightness_threshold = 2000.0 >>> sus.inputs.fwhm = 8.0 >>> result = sus.run() #doctest: +SKIP """ _cmd = 'susan' input_spec = SUSANInputSpec output_spec = SUSANOutputSpec def _format_arg(self, name, spec, value): if name == 'fwhm': return spec.argstr % (float(value) / np.sqrt(8 * np.log(2))) if name == 'usans': if not value: return '0' arglist = [str(len(value))] for filename, thresh in value: arglist.extend([filename, '%.10f' % thresh]) return ' '.join(arglist) return super(SUSAN, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_smooth') outputs['smoothed_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['smoothed_file'] return None class FUGUEInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='--in=%s', desc='filename of input volume') unwarped_file = File( argstr='--unwarp=%s', genfile=True, desc='apply unwarping and save as filename', hash_files=False) forward_warping = traits.Bool( False, usedefault=True, desc='apply forward warping instead of unwarping') warped_file = File(argstr='--warp=%s', desc='apply forward warping and save as filename', hash_files=False) phasemap_file = File(exists=True, argstr='--phasemap=%s', desc='filename for input phase image') dwell_to_asym_ratio = traits.Float(argstr='--dwelltoasym=%.10f', desc='set the dwell to asym time ratio') dwell_time = traits.Float(argstr='--dwell=%.10f', desc='set the EPI dwell time per phase-encode line - same as echo spacing - (sec)') asym_se_time = traits.Float(argstr='--asym=%.10f', desc='set the fieldmap asymmetric spin echo time (sec)') fmap_out_file = File(argstr='--savefmap=%s', desc='filename for saving fieldmap (rad/s)', hash_files=False) fmap_in_file = File(exists=True, argstr='--loadfmap=%s', desc='filename for loading fieldmap (rad/s)') save_shift = traits.Bool(desc='output pixel shift volume') shift_out_file = traits.File(argstr='--saveshift=%s', desc='filename for saving pixel shift volume', hash_files=False) shift_in_file = File(exists=True, argstr='--loadshift=%s', desc='filename for reading pixel shift volume') median_2dfilter = traits.Bool(argstr='--median', desc='apply 2D median filtering') despike_2dfilter = traits.Bool(argstr='--despike', desc='apply a 2D de-spiking filter') no_gap_fill = traits.Bool(argstr='--nofill', desc='do not apply gap-filling measure to the fieldmap') no_extend = traits.Bool(argstr='--noextend', desc='do not apply rigid-body extrapolation to the fieldmap') smooth2d = traits.Float(argstr='--smooth2=%.2f', desc='apply 2D Gaussian smoothing of sigma N (in mm)') smooth3d = traits.Float(argstr='--smooth3=%.2f', desc='apply 3D Gaussian smoothing of sigma N (in mm)') poly_order = traits.Int(argstr='--poly=%d', desc='apply polynomial fitting of order N') fourier_order = traits.Int(argstr='--fourier=%d', desc='apply Fourier (sinusoidal) fitting of order N') pava = traits.Bool(argstr='--pava', desc='apply monotonic enforcement via PAVA') despike_threshold = traits.Float(argstr='--despikethreshold=%s', desc='specify the threshold for de-spiking (default=3.0)') unwarp_direction = traits.Enum('x', 'y', 'z', 'x-', 'y-', 'z-', argstr='--unwarpdir=%s', desc='specifies direction of warping (default y)') phase_conjugate = traits.Bool(argstr='--phaseconj', desc='apply phase conjugate method of unwarping') icorr = traits.Bool(argstr='--icorr', requires=['shift_in_file'], desc='apply intensity correction to unwarping (pixel shift method only)') icorr_only = traits.Bool(argstr='--icorronly', requires=['unwarped_file'], desc='apply intensity correction only') mask_file = File(exists=True, argstr='--mask=%s', desc='filename for loading valid mask') save_unmasked_fmap = traits.Bool(argstr='--unmaskfmap', requires=['fmap_out_file'], desc='saves the unmasked fieldmap when using --savefmap') save_unmasked_shift = traits.Bool(argstr='--unmaskshift', requires=['shift_out_file'], desc='saves the unmasked shiftmap when using --saveshift') nokspace = traits.Bool( argstr='--nokspace', desc='do not use k-space forward warping') class FUGUEOutputSpec(TraitedSpec): unwarped_file = File(desc='unwarped file') warped_file = File(desc='forward warped file') shift_out_file = File(desc='voxel shift map file') fmap_out_file = File(desc='fieldmap file') class FUGUE(FSLCommand): """Use FSL FUGUE to unwarp epi's with fieldmaps Examples -------- Please insert examples for use of this command """ _cmd = 'fugue' input_spec = FUGUEInputSpec output_spec = FUGUEOutputSpec def __init__(self, **kwargs): super(FUGUE, self).__init__(**kwargs) warn( 'This interface has not been fully tested. Please report any failures.') def _list_outputs(self): outputs = self._outputs().get() if self.inputs.forward_warping: out_field = 'warped_file' else: out_field = 'unwarped_file' out_file = getattr(self.inputs, out_field) if not isdefined(out_file): if isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_'+out_field[:-5]) if isdefined(out_file): outputs[out_field] = os.path.abspath(out_file) if isdefined(self.inputs.fmap_out_file): outputs['fmap_out_file'] = os.path.abspath( self.inputs.fmap_out_file) if isdefined(self.inputs.shift_out_file): outputs['shift_out_file'] = os.path.abspath( self.inputs.shift_out_file) return outputs def _gen_filename(self, name): if name == 'unwarped_file' and not self.inputs.forward_warping: return self._list_outputs()['unwarped_file'] if name == 'warped_file' and self.inputs.forward_warping: return self._list_outputs()['warped_file'] return None def _parse_inputs(self, skip=None): if skip is None: skip = [] if not isdefined(self.inputs.save_shift) or not self.inputs.save_shift: skip += ['shift_out_file'] else: if not isdefined(self.inputs.shift_out_file): self.inputs.shift_out_file = self._gen_fname( self.inputs.in_file, suffix='_vsm') if not isdefined(self.inputs.in_file): skip += ['unwarped_file', 'warped_file'] elif self.inputs.forward_warping: if not isdefined(self.inputs.warped_file): self.inputs.warped_file = self._gen_fname( self.inputs.in_file, suffix='_warped') elif not self.inputs.forward_warping: if not isdefined(self.inputs.unwarped_file): self.inputs.unwarped_file = self._gen_fname( self.inputs.in_file, suffix='_unwarped') return super(FUGUE, self)._parse_inputs(skip=skip) class PRELUDEInputSpec(FSLCommandInputSpec): complex_phase_file = File(exists=True, argstr='--complex=%s', mandatory=True, xor=[ 'magnitude_file', 'phase_file'], desc='complex phase input volume') magnitude_file = File(exists=True, argstr='--abs=%s', mandatory=True, xor=['complex_phase_file'], desc='file containing magnitude image') phase_file = File(exists=True, argstr='--phase=%s', mandatory=True, xor=['complex_phase_file'], desc='raw phase file') unwrapped_phase_file = File(genfile=True, argstr='--unwrap=%s', desc='file containing unwrapepd phase', hash_files=False) num_partitions = traits.Int(argstr='--numphasesplit=%d', desc='number of phase partitions to use') labelprocess2d = traits.Bool(argstr='--labelslices', desc='does label processing in 2D (slice at a time)') process2d = traits.Bool(argstr='--slices', xor=['labelprocess2d'], desc='does all processing in 2D (slice at a time)') process3d = traits.Bool(argstr='--force3D', xor=['labelprocess2d', 'process2d'], desc='forces all processing to be full 3D') threshold = traits.Float(argstr='--thresh=%.10f', desc='intensity threshold for masking') mask_file = File(exists=True, argstr='--mask=%s', desc='filename of mask input volume') start = traits.Int(argstr='--start=%d', desc='first image number to process (default 0)') end = traits.Int(argstr='--end=%d', desc='final image number to process (default Inf)') savemask_file = File(argstr='--savemask=%s', desc='saving the mask volume', hash_files=False) rawphase_file = File(argstr='--rawphase=%s', desc='saving the raw phase output', hash_files=False) label_file = File(argstr='--labels=%s', desc='saving the area labels output', hash_files=False) removeramps = traits.Bool(argstr='--removeramps', desc='remove phase ramps during unwrapping') class PRELUDEOutputSpec(TraitedSpec): unwrapped_phase_file = File(exists=True, desc='unwrapped phase file') class PRELUDE(FSLCommand): """Use FSL prelude to do phase unwrapping Examples -------- Please insert examples for use of this command """ input_spec = PRELUDEInputSpec output_spec = PRELUDEOutputSpec _cmd = 'prelude' def __init__(self, **kwargs): super(PRELUDE, self).__init__(**kwargs) warn('This has not been fully tested. Please report any failures.') def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.unwrapped_phase_file if not isdefined(out_file): if isdefined(self.inputs.phase_file): out_file = self._gen_fname(self.inputs.phase_file, suffix='_unwrapped') elif isdefined(self.inputs.complex_phase_file): out_file = self._gen_fname(self.inputs.complex_phase_file, suffix='_phase_unwrapped') outputs['unwrapped_phase_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'unwrapped_phase_file': return self._list_outputs()['unwrapped_phase_file'] return None class FIRSTInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, position=-2, argstr='-i %s', desc='input data file') out_file = File('segmented', usedefault=True, mandatory=True, position=-1, argstr='-o %s', desc='output data file', hash_files=False) verbose = traits.Bool(argstr='-v', position=1, desc="Use verbose logging.") brain_extracted = traits.Bool(argstr='-b', position=2, desc="Input structural image is already brain-extracted") no_cleanup = traits.Bool(argstr='-d', position=3, desc="Input structural image is already brain-extracted") method = traits.Enum('auto', 'fast', 'none', xor=['method_as_numerical_threshold'], argstr='-m', position=4, desc=("Method must be one of auto, fast, none, or it can be entered " "using the 'method_as_numerical_threshold' input")) method_as_numerical_threshold = traits.Float(argstr='-m', position=4, desc=("Specify a numerical threshold value or use the 'method' input " "to choose auto, fast, or none")) list_of_specific_structures = traits.List(traits.Str, argstr='-s %s', sep=',', position=5, minlen=1, desc='Runs only on the specified structures (e.g. L_Hipp, R_Hipp' 'L_Accu, R_Accu, L_Amyg, R_Amyg' 'L_Caud, R_Caud, L_Pall, R_Pall' 'L_Puta, R_Puta, L_Thal, R_Thal, BrStem') affine_file = File(exists=True, position=6, argstr='-a %s', desc=('Affine matrix to use (e.g. img2std.mat) (does not ' 're-run registration)')) class FIRSTOutputSpec(TraitedSpec): vtk_surfaces = OutputMultiPath(File(exists=True), desc='VTK format meshes for each subcortical region') bvars = OutputMultiPath(File(exists=True), desc='bvars for each subcortical region') original_segmentations = File(exists=True, desc=('3D image file containing the segmented regions as integer ' 'values. Uses CMA labelling')) segmentation_file = File(exists=True, desc='4D image file containing a single volume per segmented region') class FIRST(FSLCommand): """Use FSL's run_first_all command to segment subcortical volumes http://www.fmrib.ox.ac.uk/fsl/first/index.html Examples -------- >>> from nipype.interfaces import fsl >>> first = fsl.FIRST() >>> first.inputs.in_file = 'structural.nii' >>> first.inputs.out_file = 'segmented.nii' >>> res = first.run() #doctest: +SKIP """ _cmd = 'run_first_all' input_spec = FIRSTInputSpec output_spec = FIRSTOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.list_of_specific_structures): structures = self.inputs.list_of_specific_structures else: structures = ['L_Hipp', 'R_Hipp', 'L_Accu', 'R_Accu', 'L_Amyg', 'R_Amyg', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal', 'BrStem'] outputs['original_segmentations'] = \ self._gen_fname('original_segmentations') outputs['segmentation_file'] = self._gen_fname('segmentation_file') outputs['vtk_surfaces'] = self._gen_mesh_names('vtk_surfaces', structures) outputs['bvars'] = self._gen_mesh_names('bvars', structures) return outputs def _gen_fname(self, name): path, outname, ext = split_filename(self.inputs.out_file) if name == 'original_segmentations': return op.abspath(outname + '_all_fast_origsegs.nii.gz') if name == 'segmentation_file': return op.abspath(outname + '_all_fast_firstseg.nii.gz') return None def _gen_mesh_names(self, name, structures): path, prefix, ext = split_filename(self.inputs.out_file) if name == 'vtk_surfaces': vtks = list() for struct in structures: vtk = prefix + '-' + struct + '_first.vtk' vtks.append(op.abspath(vtk)) return vtks if name == 'bvars': bvars = list() for struct in structures: bvar = prefix + '-' + struct + '_first.bvars' bvars.append(op.abspath(bvar)) return bvars return None nipype-0.9.2/nipype/interfaces/fsl/setup.py000066400000000000000000000007101227300005300207410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fsl', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/fsl/tests/000077500000000000000000000000001227300005300203735ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/fsl/tests/test_FILMGLS.py000066400000000000000000000100751227300005300231040ustar00rootroot00000000000000from nipype.testing import assert_equal from nipype.interfaces.fsl.model import FILMGLS, FILMGLSInputSpec def test_filmgls(): input_map = dict(args = dict(argstr='%s',), autocorr_estimate_only = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-ac',), autocorr_noestimate = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-noest',), brightness_threshold = dict(argstr='-epith %d',), design_file = dict(argstr='%s',), environ = dict(usedefault=True,), fit_armodel = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-ar',), full_data = dict(argstr='-v',), ignore_exception = dict(usedefault=True,), in_file = dict(mandatory=True,argstr='%s',), mask_size = dict(argstr='-ms %d',), multitaper_product = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-mt %d',), output_pwdata = dict(argstr='-output_pwdata',), output_type = dict(), results_dir = dict(usedefault=True,argstr='-rn %s',), smooth_autocorr = dict(argstr='-sa',), threshold = dict(argstr='%f',), tukey_window = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-tukey %d',), use_pava = dict(argstr='-pava',), ) input_map2 = dict(args = dict(argstr='%s',), autocorr_estimate_only = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--ac',), autocorr_noestimate = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--noest',), brightness_threshold = dict(argstr='--epith=%d',), design_file = dict(argstr='--pd=%s',), environ = dict(usedefault=True,), fit_armodel = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--ar',), full_data = dict(argstr='-v',), ignore_exception = dict(usedefault=True,), in_file = dict(mandatory=True,argstr='--in=%s',), mask_size = dict(argstr='--ms=%d',), multitaper_product = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--mt=%d',), output_pwdata = dict(argstr='--outputPWdata',), output_type = dict(), results_dir = dict(argstr='--rn=%s',usedefault=True,), smooth_autocorr = dict(argstr='--sa',), terminal_output = dict(mandatory=True,), threshold = dict(usedefault=True,argstr='--thr=%f',), tukey_window = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--tukey=%d',), use_pava = dict(argstr='--pava',), ) instance = FILMGLS() if isinstance(instance.inputs, FILMGLSInputSpec): for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value else: for key, metadata in input_map2.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py000066400000000000000000000025431227300005300247410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import ApplyMask def test_ApplyMask_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), mask_file=dict(argstr='-mas %s', mandatory=True, position=4, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ApplyMask.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyMask_outputs(): output_map = dict(out_file=dict(), ) outputs = ApplyMask.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py000066400000000000000000000025411227300005300247530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.epi import ApplyTOPUP def test_ApplyTOPUP_inputs(): input_map = dict(args=dict(argstr='%s', ), datatype=dict(argstr='-d=%s', ), encoding_file=dict(argstr='--datain=%s', mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, ), in_index=dict(argstr='%s', mandatory=True, ), in_topup=dict(argstr='--topup=%s', mandatory=True, ), interp=dict(argstr='--interp=%s', ), method=dict(argstr='--method=%s', ), out_base=dict(argstr='--out=%s', ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ApplyTOPUP.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyTOPUP_outputs(): output_map = dict(out_corrected=dict(), ) outputs = ApplyTOPUP.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py000066400000000000000000000031671227300005300247620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import ApplyWarp def test_ApplyWarp_inputs(): input_map = dict(abswarp=dict(argstr='--abs', xor=['relwarp'], ), args=dict(argstr='%s', ), datatype=dict(argstr='--datatype=%s', ), environ=dict(nohash=True, usedefault=True, ), field_file=dict(argstr='--warp=%s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', mandatory=True, ), interp=dict(argstr='--interp=%s', ), mask_file=dict(argstr='--mask=%s', ), out_file=dict(argstr='--out=%s', genfile=True, hash_files=False, ), output_type=dict(), postmat=dict(argstr='--postmat=%s', ), premat=dict(argstr='--premat=%s', ), ref_file=dict(argstr='--ref=%s', mandatory=True, ), relwarp=dict(argstr='--rel', xor=['abswarp'], ), superlevel=dict(argstr='--superlevel=%s', ), supersample=dict(argstr='--super', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ApplyWarp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyWarp_outputs(): output_map = dict(out_file=dict(), ) outputs = ApplyWarp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ApplyXfm.py000066400000000000000000000076271227300005300246100ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import ApplyXfm def test_ApplyXfm_inputs(): input_map = dict(angle_rep=dict(argstr='-anglerep %s', ), apply_isoxfm=dict(argstr='-applyisoxfm %f', xor=['apply_xfm'], ), apply_xfm=dict(argstr='-applyxfm', requires=['in_matrix_file'], usedefault=True, ), args=dict(argstr='%s', ), bbrslope=dict(argstr='-bbrslope %f', min_ver='5.0.0', ), bbrtype=dict(argstr='-bbrtype %s', min_ver='5.0.0', ), bins=dict(argstr='-bins %d', ), coarse_search=dict(argstr='-coarsesearch %d', units='degrees', ), cost=dict(argstr='-cost %s', ), cost_func=dict(argstr='-searchcost %s', ), datatype=dict(argstr='-datatype %s', ), display_init=dict(argstr='-displayinit', ), dof=dict(argstr='-dof %d', ), echospacing=dict(argstr='-echospacing %f', min_ver='5.0.0', ), environ=dict(nohash=True, usedefault=True, ), fieldmap=dict(argstr='-fieldmap %s', min_ver='5.0.0', ), fieldmapmask=dict(argstr='-fieldmapmask %s', min_ver='5.0.0', ), fine_search=dict(argstr='-finesearch %d', units='degrees', ), force_scaling=dict(argstr='-forcescaling', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', mandatory=True, position=0, ), in_matrix_file=dict(argstr='-init %s', ), in_weight=dict(argstr='-inweight %s', ), interp=dict(argstr='-interp %s', ), min_sampling=dict(argstr='-minsampling %f', units='mm', ), no_clamp=dict(argstr='-noclamp', ), no_resample=dict(argstr='-noresample', ), no_resample_blur=dict(argstr='-noresampblur', ), no_search=dict(argstr='-nosearch', ), out_file=dict(argstr='-out %s', hash_files=False, name_source=['in_file'], name_template='%s_flirt', position=2, ), out_log=dict(keep_extension=True, name_source=['in_file'], name_template='%s_flirt.log', requires=['save_log'], ), out_matrix_file=dict(argstr='-omat %s', hash_files=False, keep_extension=True, name_source=['in_file'], name_template='%s_flirt.mat', position=3, ), output_type=dict(), padding_size=dict(argstr='-paddingsize %d', units='voxels', ), pedir=dict(argstr='-pedir %d', min_ver='5.0.0', ), ref_weight=dict(argstr='-refweight %s', ), reference=dict(argstr='-ref %s', mandatory=True, position=1, ), rigid2D=dict(argstr='-2D', ), save_log=dict(), schedule=dict(argstr='-schedule %s', ), searchr_x=dict(argstr='-searchrx %s', units='degrees', ), searchr_y=dict(argstr='-searchry %s', units='degrees', ), searchr_z=dict(argstr='-searchrz %s', units='degrees', ), sinc_width=dict(argstr='-sincwidth %d', units='voxels', ), sinc_window=dict(argstr='-sincwindow %s', ), terminal_output=dict(mandatory=True, nohash=True, ), uses_qform=dict(argstr='-usesqform', ), verbose=dict(argstr='-verbose %d', ), wm_seg=dict(argstr='-wmseg %s', min_ver='5.0.0', ), wmcoords=dict(argstr='-wmcoords %s', min_ver='5.0.0', ), wmnorms=dict(argstr='-wmnorms %s', min_ver='5.0.0', ), ) inputs = ApplyXfm.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyXfm_outputs(): output_map = dict(out_file=dict(), out_log=dict(), out_matrix_file=dict(), ) outputs = ApplyXfm.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_AvScale.py000066400000000000000000000022701227300005300243530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import AvScale def test_AvScale_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mat_file=dict(argstr='%s', position=0, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = AvScale.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AvScale_outputs(): output_map = dict(average_scaling=dict(), backward_half_transform=dict(), determinant=dict(), forward_half_transform=dict(), left_right_orientation_preserved=dict(), rotation_translation_matrix=dict(), scales=dict(), skews=dict(), ) outputs = AvScale.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX.py000066400000000000000000000030411227300005300242620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import BEDPOSTX def test_BEDPOSTX_inputs(): input_map = dict(args=dict(argstr='%s', ), bpx_directory=dict(argstr='%s', usedefault=True, ), burn_period=dict(argstr='-b %d', ), bvals=dict(mandatory=True, ), bvecs=dict(mandatory=True, ), dwi=dict(mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), fibres=dict(argstr='-n %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), jumps=dict(argstr='-j %d', ), mask=dict(mandatory=True, ), output_type=dict(), sampling=dict(argstr='-s %d', ), terminal_output=dict(mandatory=True, nohash=True, ), weight=dict(argstr='-w %.2f', ), ) inputs = BEDPOSTX.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BEDPOSTX_outputs(): output_map = dict(bpx_out_directory=dict(), dyads=dict(), mean_fsamples=dict(), mean_phsamples=dict(), mean_thsamples=dict(), merged_fsamples=dict(), merged_phsamples=dict(), merged_thsamples=dict(), xfms_directory=dict(), ) outputs = BEDPOSTX.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_BET.py000066400000000000000000000053071227300005300234530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import BET def test_BET_inputs(): input_map = dict(args=dict(argstr='%s', ), center=dict(argstr='-c %s', units='voxels', ), environ=dict(nohash=True, usedefault=True, ), frac=dict(argstr='-f %.2f', ), functional=dict(argstr='-F', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=0, ), mask=dict(argstr='-m', ), mesh=dict(argstr='-e', ), no_output=dict(argstr='-n', ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=1, ), outline=dict(argstr='-o', ), output_type=dict(), padding=dict(argstr='-Z', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), radius=dict(argstr='-r %d', units='mm', ), reduce_bias=dict(argstr='-B', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), remove_eyes=dict(argstr='-S', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), robust=dict(argstr='-R', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), skull=dict(argstr='-s', ), surfaces=dict(argstr='-A', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), t2_guided=dict(argstr='-A2 %s', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='-t', ), vertical_gradient=dict(argstr='-g %.2f', ), ) inputs = BET.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BET_outputs(): output_map = dict(inskull_mask_file=dict(), inskull_mesh_file=dict(), mask_file=dict(), meshfile=dict(), out_file=dict(), outline_file=dict(), outskin_mask_file=dict(), outskin_mesh_file=dict(), outskull_mask_file=dict(), outskull_mesh_file=dict(), skull_mask_file=dict(), ) outputs = BET.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py000066400000000000000000000030751227300005300252620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import BinaryMaths def test_BinaryMaths_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), operand_file=dict(argstr='%s', mandatory=True, position=5, xor=['operand_value'], ), operand_value=dict(argstr='%.8f', mandatory=True, position=5, xor=['operand_file'], ), operation=dict(argstr='-%s', mandatory=True, position=4, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = BinaryMaths.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BinaryMaths_outputs(): output_map = dict(out_file=dict(), ) outputs = BinaryMaths.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py000066400000000000000000000025001227300005300256520ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import ChangeDataType def test_ChangeDataType_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', mandatory=True, position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ChangeDataType.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ChangeDataType_outputs(): output_map = dict(out_file=dict(), ) outputs = ChangeDataType.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Cluster.py000066400000000000000000000047301227300005300244610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import Cluster def test_Cluster_inputs(): input_map = dict(args=dict(argstr='%s', ), connectivity=dict(argstr='--connectivity=%d', ), cope_file=dict(argstr='--cope=%s', ), dlh=dict(argstr='--dlh=%.10f', ), environ=dict(nohash=True, usedefault=True, ), find_min=dict(), fractional=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', mandatory=True, ), minclustersize=dict(argstr='--minclustersize', ), no_table=dict(), num_maxima=dict(argstr='--num=%d', ), out_index_file=dict(argstr='--oindex=%s', hash_files=False, ), out_localmax_txt_file=dict(argstr='--olmax=%s', hash_files=False, ), out_localmax_vol_file=dict(argstr='--olmaxim=%s', hash_files=False, ), out_max_file=dict(argstr='--omax=%s', hash_files=False, ), out_mean_file=dict(argstr='--omean=%s', hash_files=False, ), out_pval_file=dict(argstr='--opvals=%s', hash_files=False, ), out_size_file=dict(argstr='--osize=%s', hash_files=False, ), out_threshold_file=dict(argstr='--othresh=%s', hash_files=False, ), output_type=dict(), peak_distance=dict(argstr='--peakdist=%.10f', ), pthreshold=dict(argstr='--pthresh=%.10f', requires=['dlh', 'volume'], ), std_space_file=dict(argstr='--stdvol=%s', ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='--thresh=%.10f', mandatory=True, ), use_mm=dict(), volume=dict(argstr='--volume=%d', ), warpfield_file=dict(argstr='--warpvol=%s', ), xfm_file=dict(argstr='--xfm=%s', ), ) inputs = Cluster.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Cluster_outputs(): output_map = dict(index_file=dict(), localmax_txt_file=dict(), localmax_vol_file=dict(), max_file=dict(), mean_file=dict(), pval_file=dict(), size_file=dict(), threshold_file=dict(), ) outputs = Cluster.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Complex.py000066400000000000000000000073631227300005300244540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import Complex def test_Complex_inputs(): input_map = dict(args=dict(argstr='%s', ), complex_cartesian=dict(argstr='-complex', position=1, xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), complex_in_file=dict(argstr='%s', position=2, ), complex_in_file2=dict(argstr='%s', position=3, ), complex_merge=dict(argstr='-complexmerge', position=1, xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge', 'start_vol', 'end_vol'], ), complex_out_file=dict(argstr='%s', genfile=True, position=-3, xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_out_file', 'imaginary_out_file', 'real_polar', 'real_cartesian'], ), complex_polar=dict(argstr='-complexpolar', position=1, xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), complex_split=dict(argstr='-complexsplit', position=1, xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), end_vol=dict(argstr='%d', position=-1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), imaginary_in_file=dict(argstr='%s', position=3, ), imaginary_out_file=dict(argstr='%s', genfile=True, position=-3, xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_polar', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), magnitude_in_file=dict(argstr='%s', position=2, ), magnitude_out_file=dict(argstr='%s', genfile=True, position=-4, xor=['complex_out_file', 'real_out_file', 'imaginary_out_file', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), output_type=dict(), phase_in_file=dict(argstr='%s', position=3, ), phase_out_file=dict(argstr='%s', genfile=True, position=-3, xor=['complex_out_file', 'real_out_file', 'imaginary_out_file', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), real_cartesian=dict(argstr='-realcartesian', position=1, xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), real_in_file=dict(argstr='%s', position=2, ), real_out_file=dict(argstr='%s', genfile=True, position=-4, xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_polar', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), real_polar=dict(argstr='-realpolar', position=1, xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'], ), start_vol=dict(argstr='%d', position=-2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Complex.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Complex_outputs(): output_map = dict(complex_out_file=dict(), imaginary_out_file=dict(), magnitude_out_file=dict(), phase_out_file=dict(), real_out_file=dict(), ) outputs = Complex.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py000066400000000000000000000030671227300005300253050ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import ContrastMgr def test_ContrastMgr_inputs(): input_map = dict(args=dict(argstr='%s', ), contrast_num=dict(argstr='-cope', ), corrections=dict(copyfile=False, mandatory=True, ), dof_file=dict(argstr='', copyfile=False, mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), fcon_file=dict(argstr='-f %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), output_type=dict(), param_estimates=dict(argstr='', copyfile=False, mandatory=True, ), sigmasquareds=dict(argstr='', copyfile=False, mandatory=True, position=-2, ), suffix=dict(argstr='-suffix %s', ), tcon_file=dict(argstr='%s', mandatory=True, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ContrastMgr.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ContrastMgr_outputs(): output_map = dict(copes=dict(), fstats=dict(), neffs=dict(), tstats=dict(), varcopes=dict(), zfstats=dict(), zstats=dict(), ) outputs = ContrastMgr.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py000066400000000000000000000030711227300005300250300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import ConvertXFM def test_ConvertXFM_inputs(): input_map = dict(args=dict(argstr='%s', ), concat_xfm=dict(argstr='-concat', position=-3, requires=['in_file2'], xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], ), environ=dict(nohash=True, usedefault=True, ), fix_scale_skew=dict(argstr='-fixscaleskew', position=-3, requires=['in_file2'], xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-1, ), in_file2=dict(argstr='%s', position=-2, ), invert_xfm=dict(argstr='-inverse', position=-3, xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], ), out_file=dict(argstr='-omat %s', genfile=True, hash_files=False, position=1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ConvertXFM.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ConvertXFM_outputs(): output_map = dict(out_file=dict(), ) outputs = ConvertXFM.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_DTIFit.py000066400000000000000000000034661227300005300241300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import DTIFit def test_DTIFit_inputs(): input_map = dict(args=dict(argstr='%s', ), base_name=dict(argstr='-o %s', position=1, usedefault=True, ), bvals=dict(argstr='-b %s', mandatory=True, position=4, ), bvecs=dict(argstr='-r %s', mandatory=True, position=3, ), cni=dict(argstr='-cni %s', ), dwi=dict(argstr='-k %s', mandatory=True, position=0, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), little_bit=dict(argstr='--littlebit', ), mask=dict(argstr='-m %s', mandatory=True, position=2, ), max_x=dict(argstr='-X %d', ), max_y=dict(argstr='-Y %d', ), max_z=dict(argstr='-Z %d', ), min_x=dict(argstr='-x %d', ), min_y=dict(argstr='-y %d', ), min_z=dict(argstr='-z %d', ), output_type=dict(), save_tensor=dict(argstr='--save_tensor', ), sse=dict(argstr='--sse', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DTIFit.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTIFit_outputs(): output_map = dict(FA=dict(), L1=dict(), L2=dict(), L3=dict(), MD=dict(), MO=dict(), S0=dict(), V1=dict(), V2=dict(), V3=dict(), tensor=dict(), ) outputs = DTIFit.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_DilateImage.py000066400000000000000000000031241227300005300252010ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import DilateImage def test_DilateImage_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), kernel_file=dict(argstr='%s', position=5, xor=['kernel_size'], ), kernel_shape=dict(argstr='-kernel %s', position=4, ), kernel_size=dict(argstr='%.4f', position=5, xor=['kernel_file'], ), nan2zeros=dict(argstr='-nan', position=3, ), operation=dict(argstr='-dil%s', mandatory=True, position=6, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DilateImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DilateImage_outputs(): output_map = dict(out_file=dict(), ) outputs = DilateImage.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py000066400000000000000000000024241227300005300252260ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import DistanceMap def test_DistanceMap_inputs(): input_map = dict(args=dict(argstr='%s', ), distance_map=dict(argstr='--out=%s', genfile=True, hash_files=False, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', mandatory=True, ), invert_input=dict(argstr='--invert', ), local_max_file=dict(argstr='--localmax=%s', hash_files=False, ), mask_file=dict(argstr='--mask=%s', ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DistanceMap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DistanceMap_outputs(): output_map = dict(distance_map=dict(), local_max_file=dict(), ) outputs = DistanceMap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py000066400000000000000000000033651227300005300245630ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.epi import EPIDeWarp def test_EPIDeWarp_inputs(): input_map = dict(args=dict(argstr='%s', ), cleanup=dict(argstr='--cleanup', ), dph_file=dict(argstr='--dph %s', mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), epi_file=dict(argstr='--epi %s', ), epidw=dict(argstr='--epidw %s', genfile=False, ), esp=dict(argstr='--esp %s', usedefault=True, ), exf_file=dict(argstr='--exf %s', ), exfdw=dict(argstr='--exfdw %s', genfile=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mag_file=dict(argstr='--mag %s', mandatory=True, position=0, ), nocleanup=dict(argstr='--nocleanup', usedefault=True, ), output_type=dict(), sigma=dict(argstr='--sigma %s', usedefault=True, ), tediff=dict(argstr='--tediff %s', usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), tmpdir=dict(argstr='--tmpdir %s', genfile=True, ), vsm=dict(argstr='--vsm %s', genfile=True, ), ) inputs = EPIDeWarp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EPIDeWarp_outputs(): output_map = dict(exf_mask=dict(), exfdw=dict(), unwarped_file=dict(), vsm_file=dict(), ) outputs = EPIDeWarp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Eddy.py000066400000000000000000000032161227300005300237230ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.epi import Eddy def test_Eddy_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), flm=dict(argstr='--flm=%s', ), fwhm=dict(argstr='--fwhm=%s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_acqp=dict(argstr='--acqp=%s', mandatory=True, ), in_bval=dict(argstr='--bvals=%s', mandatory=True, ), in_bvec=dict(argstr='--bvecs=%s', mandatory=True, ), in_file=dict(argstr='--imain=%s', mandatory=True, ), in_index=dict(argstr='--index=%s', mandatory=True, ), in_mask=dict(argstr='--mask=%s', mandatory=True, ), in_topup=dict(argstr='--topup=%s', ), method=dict(argstr='--resamp=%s', ), niter=dict(argstr='--niter=%s', ), out_base=dict(argstr='--out=%s', ), output_type=dict(), repol=dict(argstr='--repol', ), session=dict(argstr='--session=%s', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Eddy.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Eddy_outputs(): output_map = dict(out_corrected=dict(), out_parameter=dict(), ) outputs = Eddy.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py000066400000000000000000000022521227300005300252440ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.epi import EddyCorrect def test_EddyCorrect_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=0, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=1, ), output_type=dict(), ref_num=dict(argstr='%d', mandatory=True, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = EddyCorrect.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EddyCorrect_outputs(): output_map = dict(eddy_corrected=dict(), ) outputs = EddyCorrect.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py000066400000000000000000000031211227300005300250320ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import ErodeImage def test_ErodeImage_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), kernel_file=dict(argstr='%s', position=5, xor=['kernel_size'], ), kernel_shape=dict(argstr='-kernel %s', position=4, ), kernel_size=dict(argstr='%.4f', position=5, xor=['kernel_file'], ), minimum_filter=dict(argstr='%s', position=6, usedefault=True, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ErodeImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ErodeImage_outputs(): output_map = dict(out_file=dict(), ) outputs = ErodeImage.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py000066400000000000000000000032011227300005300250140ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import ExtractROI def test_ExtractROI_inputs(): input_map = dict(args=dict(argstr='%s', ), crop_list=dict(argstr='%s', position=2, xor=['x_min', 'x_size', 'y_min', 'y_size', 'z_min', 'z_size', 't_min', 't_size'], ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=0, ), output_type=dict(), roi_file=dict(argstr='%s', genfile=True, hash_files=False, position=1, ), t_min=dict(argstr='%d', position=8, ), t_size=dict(argstr='%d', position=9, ), terminal_output=dict(mandatory=True, nohash=True, ), x_min=dict(argstr='%d', position=2, ), x_size=dict(argstr='%d', position=3, ), y_min=dict(argstr='%d', position=4, ), y_size=dict(argstr='%d', position=5, ), z_min=dict(argstr='%d', position=6, ), z_size=dict(argstr='%d', position=7, ), ) inputs = ExtractROI.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ExtractROI_outputs(): output_map = dict(roi_file=dict(), ) outputs = ExtractROI.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FAST.py000066400000000000000000000041551227300005300235760ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import FAST def test_FAST_inputs(): input_map = dict(args=dict(argstr='%s', ), bias_iters=dict(argstr='-I %d', ), bias_lowpass=dict(argstr='-l %d', units='mm', ), environ=dict(nohash=True, usedefault=True, ), hyper=dict(argstr='-H %.2f', ), ignore_exception=dict(nohash=True, usedefault=True, ), img_type=dict(argstr='-t %d', ), in_files=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), init_seg_smooth=dict(argstr='-f %.3f', ), init_transform=dict(argstr='-a %s', ), iters_afterbias=dict(argstr='-O %d', ), manual_seg=dict(argstr='-s %s', ), mixel_smooth=dict(argstr='-R %.2f', ), no_bias=dict(argstr='-N', ), no_pve=dict(argstr='--nopve', ), number_classes=dict(argstr='-n %d', ), other_priors=dict(argstr='-A %s', ), out_basename=dict(argstr='-o %s', ), output_biascorrected=dict(argstr='-B', ), output_biasfield=dict(argstr='-b', ), output_type=dict(), probability_maps=dict(argstr='-p', ), segment_iters=dict(argstr='-W %d', ), segments=dict(argstr='-g', ), terminal_output=dict(mandatory=True, nohash=True, ), use_priors=dict(argstr='-P', ), verbose=dict(argstr='-v', ), ) inputs = FAST.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FAST_outputs(): output_map = dict(bias_field=dict(), mixeltype=dict(), partial_volume_files=dict(), partial_volume_map=dict(), probability_maps=dict(), restored_image=dict(), tissue_class_files=dict(), tissue_class_map=dict(), ) outputs = FAST.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FEAT.py000066400000000000000000000017351227300005300235610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import FEAT def test_FEAT_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fsf_file=dict(argstr='%s', mandatory=True, position=0, ), ignore_exception=dict(nohash=True, usedefault=True, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = FEAT.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FEAT_outputs(): output_map = dict(feat_dir=dict(), ) outputs = FEAT.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FEATModel.py000066400000000000000000000023061227300005300245350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import FEATModel def test_FEATModel_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ev_files=dict(argstr='%s', copyfile=False, mandatory=True, position=1, ), fsf_file=dict(argstr='%s', copyfile=False, mandatory=True, position=0, ), ignore_exception=dict(nohash=True, usedefault=True, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = FEATModel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FEATModel_outputs(): output_map = dict(con_file=dict(), design_cov=dict(), design_file=dict(), design_image=dict(), fcon_file=dict(), ) outputs = FEATModel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py000066400000000000000000000016031227300005300252600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import FEATRegister def test_FEATRegister_inputs(): input_map = dict(feat_dirs=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), reg_dof=dict(usedefault=True, ), reg_image=dict(mandatory=True, ), ) inputs = FEATRegister.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FEATRegister_outputs(): output_map = dict(fsf_file=dict(), ) outputs = FEATRegister.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FIRST.py000066400000000000000000000032431227300005300237250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import FIRST def test_FIRST_inputs(): input_map = dict(affine_file=dict(argstr='-a %s', position=6, ), args=dict(argstr='%s', ), brain_extracted=dict(argstr='-b', position=2, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=-2, ), list_of_specific_structures=dict(argstr='-s %s', position=5, sep=',', ), method=dict(argstr='-m', position=4, xor=['method_as_numerical_threshold'], ), method_as_numerical_threshold=dict(argstr='-m', position=4, ), no_cleanup=dict(argstr='-d', position=3, ), out_file=dict(argstr='-o %s', hash_files=False, mandatory=True, position=-1, usedefault=True, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), verbose=dict(argstr='-v', position=1, ), ) inputs = FIRST.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FIRST_outputs(): output_map = dict(bvars=dict(), original_segmentations=dict(), segmentation_file=dict(), vtk_surfaces=dict(), ) outputs = FIRST.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py000066400000000000000000000042521227300005300240020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import FLAMEO def test_FLAMEO_inputs(): input_map = dict(args=dict(argstr='%s', ), burnin=dict(argstr='--burnin=%d', ), cope_file=dict(argstr='--copefile=%s', mandatory=True, ), cov_split_file=dict(argstr='--covsplitfile=%s', mandatory=True, ), design_file=dict(argstr='--designfile=%s', mandatory=True, ), dof_var_cope_file=dict(argstr='--dofvarcopefile=%s', ), environ=dict(nohash=True, usedefault=True, ), f_con_file=dict(argstr='--fcontrastsfile=%s', ), fix_mean=dict(argstr='--fixmean', ), ignore_exception=dict(nohash=True, usedefault=True, ), infer_outliers=dict(argstr='--inferoutliers', ), log_dir=dict(argstr='--ld=%s', usedefault=True, ), mask_file=dict(argstr='--maskfile=%s', mandatory=True, ), n_jumps=dict(argstr='--njumps=%d', ), no_pe_outputs=dict(argstr='--nopeoutput', ), outlier_iter=dict(argstr='--ioni=%d', ), output_type=dict(), run_mode=dict(argstr='--runmode=%s', mandatory=True, ), sample_every=dict(argstr='--sampleevery=%d', ), sigma_dofs=dict(argstr='--sigma_dofs=%d', ), t_con_file=dict(argstr='--tcontrastsfile=%s', mandatory=True, ), terminal_output=dict(mandatory=True, nohash=True, ), var_cope_file=dict(argstr='--varcopefile=%s', ), ) inputs = FLAMEO.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FLAMEO_outputs(): output_map = dict(copes=dict(), fstats=dict(), mrefvars=dict(), pes=dict(), res4d=dict(), stats_dir=dict(), tdof=dict(), tstats=dict(), var_copes=dict(), weights=dict(), zfstats=dict(), zstats=dict(), ) outputs = FLAMEO.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FLIRT.py000066400000000000000000000075631227300005300237270ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import FLIRT def test_FLIRT_inputs(): input_map = dict(angle_rep=dict(argstr='-anglerep %s', ), apply_isoxfm=dict(argstr='-applyisoxfm %f', xor=['apply_xfm'], ), apply_xfm=dict(argstr='-applyxfm', requires=['in_matrix_file'], ), args=dict(argstr='%s', ), bbrslope=dict(argstr='-bbrslope %f', min_ver='5.0.0', ), bbrtype=dict(argstr='-bbrtype %s', min_ver='5.0.0', ), bins=dict(argstr='-bins %d', ), coarse_search=dict(argstr='-coarsesearch %d', units='degrees', ), cost=dict(argstr='-cost %s', ), cost_func=dict(argstr='-searchcost %s', ), datatype=dict(argstr='-datatype %s', ), display_init=dict(argstr='-displayinit', ), dof=dict(argstr='-dof %d', ), echospacing=dict(argstr='-echospacing %f', min_ver='5.0.0', ), environ=dict(nohash=True, usedefault=True, ), fieldmap=dict(argstr='-fieldmap %s', min_ver='5.0.0', ), fieldmapmask=dict(argstr='-fieldmapmask %s', min_ver='5.0.0', ), fine_search=dict(argstr='-finesearch %d', units='degrees', ), force_scaling=dict(argstr='-forcescaling', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', mandatory=True, position=0, ), in_matrix_file=dict(argstr='-init %s', ), in_weight=dict(argstr='-inweight %s', ), interp=dict(argstr='-interp %s', ), min_sampling=dict(argstr='-minsampling %f', units='mm', ), no_clamp=dict(argstr='-noclamp', ), no_resample=dict(argstr='-noresample', ), no_resample_blur=dict(argstr='-noresampblur', ), no_search=dict(argstr='-nosearch', ), out_file=dict(argstr='-out %s', hash_files=False, name_source=['in_file'], name_template='%s_flirt', position=2, ), out_log=dict(keep_extension=True, name_source=['in_file'], name_template='%s_flirt.log', requires=['save_log'], ), out_matrix_file=dict(argstr='-omat %s', hash_files=False, keep_extension=True, name_source=['in_file'], name_template='%s_flirt.mat', position=3, ), output_type=dict(), padding_size=dict(argstr='-paddingsize %d', units='voxels', ), pedir=dict(argstr='-pedir %d', min_ver='5.0.0', ), ref_weight=dict(argstr='-refweight %s', ), reference=dict(argstr='-ref %s', mandatory=True, position=1, ), rigid2D=dict(argstr='-2D', ), save_log=dict(), schedule=dict(argstr='-schedule %s', ), searchr_x=dict(argstr='-searchrx %s', units='degrees', ), searchr_y=dict(argstr='-searchry %s', units='degrees', ), searchr_z=dict(argstr='-searchrz %s', units='degrees', ), sinc_width=dict(argstr='-sincwidth %d', units='voxels', ), sinc_window=dict(argstr='-sincwindow %s', ), terminal_output=dict(mandatory=True, nohash=True, ), uses_qform=dict(argstr='-usesqform', ), verbose=dict(argstr='-verbose %d', ), wm_seg=dict(argstr='-wmseg %s', min_ver='5.0.0', ), wmcoords=dict(argstr='-wmcoords %s', min_ver='5.0.0', ), wmnorms=dict(argstr='-wmnorms %s', min_ver='5.0.0', ), ) inputs = FLIRT.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FLIRT_outputs(): output_map = dict(out_file=dict(), out_log=dict(), out_matrix_file=dict(), ) outputs = FLIRT.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FNIRT.py000066400000000000000000000073721227300005300237270ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import FNIRT def test_FNIRT_inputs(): input_map = dict(affine_file=dict(argstr='--aff=%s', ), apply_inmask=dict(argstr='--applyinmask=%s', sep=',', xor=['skip_inmask'], ), apply_intensity_mapping=dict(argstr='--estint=%s', sep=',', xor=['skip_intensity_mapping'], ), apply_refmask=dict(argstr='--applyrefmask=%s', sep=',', xor=['skip_refmask'], ), args=dict(argstr='%s', ), bias_regularization_lambda=dict(argstr='--biaslambda=%f', ), biasfield_resolution=dict(argstr='--biasres=%d,%d,%d', ), config_file=dict(argstr='--config=%s', ), derive_from_ref=dict(argstr='--refderiv', ), environ=dict(nohash=True, usedefault=True, ), field_file=dict(argstr='--fout=%s', hash_files=False, ), fieldcoeff_file=dict(argstr='--cout=%s', ), hessian_precision=dict(argstr='--numprec=%s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', mandatory=True, ), in_fwhm=dict(argstr='--infwhm=%s', sep=',', ), in_intensitymap_file=dict(argstr='--intin=%s', ), inmask_file=dict(argstr='--inmask=%s', ), inmask_val=dict(argstr='--impinval=%f', ), intensity_mapping_model=dict(argstr='--intmod=%s', ), intensity_mapping_order=dict(argstr='--intorder=%d', ), inwarp_file=dict(argstr='--inwarp=%s', ), jacobian_file=dict(argstr='--jout=%s', hash_files=False, ), jacobian_range=dict(argstr='--jacrange=%f,%f', ), log_file=dict(argstr='--logout=%s', genfile=True, hash_files=False, ), max_nonlin_iter=dict(argstr='--miter=%s', sep=',', ), modulatedref_file=dict(argstr='--refout=%s', hash_files=False, ), out_intensitymap_file=dict(argstr='--intout=%s', hash_files=False, ), output_type=dict(), ref_file=dict(argstr='--ref=%s', mandatory=True, ), ref_fwhm=dict(argstr='--reffwhm=%s', sep=',', ), refmask_file=dict(argstr='--refmask=%s', ), refmask_val=dict(argstr='--imprefval=%f', ), regularization_lambda=dict(argstr='--lambda=%s', sep=',', ), regularization_model=dict(argstr='--regmod=%s', ), skip_implicit_in_masking=dict(argstr='--impinm=0', ), skip_implicit_ref_masking=dict(argstr='--imprefm=0', ), skip_inmask=dict(argstr='--applyinmask=0', xor=['apply_inmask'], ), skip_intensity_mapping=dict(argstr='--estint=0', xor=['apply_intensity_mapping'], ), skip_lambda_ssq=dict(argstr='--ssqlambda=0', ), skip_refmask=dict(argstr='--applyrefmask=0', xor=['apply_refmask'], ), spline_order=dict(argstr='--splineorder=%d', ), subsampling_scheme=dict(argstr='--subsamp=%s', sep=',', ), terminal_output=dict(mandatory=True, nohash=True, ), warp_resolution=dict(argstr='--warpres=%d,%d,%d', ), warped_file=dict(argstr='--iout=%s', genfile=True, hash_files=False, ), ) inputs = FNIRT.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FNIRT_outputs(): output_map = dict(field_file=dict(), fieldcoeff_file=dict(), jacobian_file=dict(), log_file=dict(), modulatedref_file=dict(), out_intensitymap_file=dict(), warped_file=dict(), ) outputs = FNIRT.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py000066400000000000000000000012171227300005300247600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.base import FSLCommand def test_FSLCommand_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = FSLCommand.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FUGUE.py000066400000000000000000000053721227300005300237160ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import FUGUE def test_FUGUE_inputs(): input_map = dict(args=dict(argstr='%s', ), asym_se_time=dict(argstr='--asym=%.10f', ), despike_2dfilter=dict(argstr='--despike', ), despike_threshold=dict(argstr='--despikethreshold=%s', ), dwell_time=dict(argstr='--dwell=%.10f', ), dwell_to_asym_ratio=dict(argstr='--dwelltoasym=%.10f', ), environ=dict(nohash=True, usedefault=True, ), fmap_in_file=dict(argstr='--loadfmap=%s', ), fmap_out_file=dict(argstr='--savefmap=%s', hash_files=False, ), forward_warping=dict(usedefault=True, ), fourier_order=dict(argstr='--fourier=%d', ), icorr=dict(argstr='--icorr', requires=['shift_in_file'], ), icorr_only=dict(argstr='--icorronly', requires=['unwarped_file'], ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', ), mask_file=dict(argstr='--mask=%s', ), median_2dfilter=dict(argstr='--median', ), no_extend=dict(argstr='--noextend', ), no_gap_fill=dict(argstr='--nofill', ), nokspace=dict(argstr='--nokspace', ), output_type=dict(), pava=dict(argstr='--pava', ), phase_conjugate=dict(argstr='--phaseconj', ), phasemap_file=dict(argstr='--phasemap=%s', ), poly_order=dict(argstr='--poly=%d', ), save_shift=dict(), save_unmasked_fmap=dict(argstr='--unmaskfmap', requires=['fmap_out_file'], ), save_unmasked_shift=dict(argstr='--unmaskshift', requires=['shift_out_file'], ), shift_in_file=dict(argstr='--loadshift=%s', ), shift_out_file=dict(argstr='--saveshift=%s', hash_files=False, ), smooth2d=dict(argstr='--smooth2=%.2f', ), smooth3d=dict(argstr='--smooth3=%.2f', ), terminal_output=dict(mandatory=True, nohash=True, ), unwarp_direction=dict(argstr='--unwarpdir=%s', ), unwarped_file=dict(argstr='--unwarp=%s', genfile=True, hash_files=False, ), warped_file=dict(argstr='--warp=%s', hash_files=False, ), ) inputs = FUGUE.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FUGUE_outputs(): output_map = dict(fmap_out_file=dict(), shift_out_file=dict(), unwarped_file=dict(), warped_file=dict(), ) outputs = FUGUE.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py000066400000000000000000000030441227300005300261560ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import FilterRegressor def test_FilterRegressor_inputs(): input_map = dict(args=dict(argstr='%s', ), design_file=dict(argstr='-d %s', mandatory=True, position=3, ), environ=dict(nohash=True, usedefault=True, ), filter_all=dict(argstr="-f '%s'", mandatory=True, position=4, xor=['filter_columns'], ), filter_columns=dict(argstr="-f '%s'", mandatory=True, position=4, xor=['filter_all'], ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=1, ), mask=dict(argstr='-m %s', ), out_file=dict(argstr='-o %s', genfile=True, hash_files=False, position=2, ), out_vnscales=dict(argstr='--out_vnscales', ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), var_norm=dict(argstr='--vn', ), ) inputs = FilterRegressor.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FilterRegressor_outputs(): output_map = dict(out_file=dict(), ) outputs = FilterRegressor.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py000066400000000000000000000021741227300005300256660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import FindTheBiggest def test_FindTheBiggest_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=0, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=2, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = FindTheBiggest.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FindTheBiggest_outputs(): output_map = dict(out_file=dict(argstr='%s', ), ) outputs = FindTheBiggest.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_GLM.py000066400000000000000000000043531227300005300234600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import GLM def test_GLM_inputs(): input_map = dict(args=dict(argstr='%s', ), contrasts=dict(argstr='-c %s', ), dat_norm=dict(argstr='--dat_norm', ), demean=dict(argstr='--demean', ), des_norm=dict(argstr='--des_norm', ), design=dict(argstr='-d %s', mandatory=True, position=2, ), dof=dict(argstr='--dof=%d', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=1, ), mask=dict(argstr='-m %s', ), out_cope=dict(argstr='--out_cope=%s', ), out_data_name=dict(argstr='--out_data=%s', ), out_f_name=dict(argstr='--out_f=%s', ), out_file=dict(argstr='-o %s', keep_extension=True, name_source='in_file', name_template='%s_glm', position=3, ), out_p_name=dict(argstr='--out_p=%s', ), out_pf_name=dict(argstr='--out_pf=%s', ), out_res_name=dict(argstr='--out_res=%s', ), out_sigsq_name=dict(argstr='--out_sigsq=%s', ), out_t_name=dict(argstr='--out_t=%s', ), out_varcb_name=dict(argstr='--out_varcb=%s', ), out_vnscales_name=dict(argstr='--out_vnscales=%s', ), out_z_name=dict(argstr='--out_z=%s', ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), var_norm=dict(argstr='--vn', ), ) inputs = GLM.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GLM_outputs(): output_map = dict(out_cope=dict(), out_data=dict(), out_f=dict(), out_file=dict(), out_p=dict(), out_pf=dict(), out_res=dict(), out_sigsq=dict(), out_t=dict(), out_varcb=dict(), out_vnscales=dict(), out_z=dict(), ) outputs = GLM.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py000066400000000000000000000024301227300005300250520ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import ImageMaths def test_ImageMaths_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), in_file2=dict(argstr='%s', position=3, ), op_string=dict(argstr='%s', position=2, ), out_data_type=dict(argstr='-odt %s', position=5, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=4, ), output_type=dict(), suffix=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ImageMaths.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ImageMaths_outputs(): output_map = dict(out_file=dict(), ) outputs = ImageMaths.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py000066400000000000000000000026711227300005300252340ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import ImageMeants def test_ImageMeants_inputs(): input_map = dict(args=dict(argstr='%s', ), eig=dict(argstr='--eig', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=0, ), mask=dict(argstr='-m %s', ), nobin=dict(argstr='--no_bin', ), order=dict(argstr='--order=%d', usedefault=True, ), out_file=dict(argstr='-o %s', genfile=True, hash_files=False, ), output_type=dict(), show_all=dict(argstr='--showall', ), spatial_coord=dict(argstr='-c %s', ), terminal_output=dict(mandatory=True, nohash=True, ), transpose=dict(argstr='--transpose', ), use_mm=dict(argstr='--usemm', ), ) inputs = ImageMeants.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ImageMeants_outputs(): output_map = dict(out_file=dict(), ) outputs = ImageMeants.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ImageStats.py000066400000000000000000000022401227300005300250730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import ImageStats def test_ImageStats_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), mask_file=dict(argstr='', ), op_string=dict(argstr='%s', mandatory=True, position=3, ), output_type=dict(), split_4d=dict(argstr='-t', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ImageStats.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ImageStats_outputs(): output_map = dict(out_stat=dict(), ) outputs = ImageStats.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_InvWarp.py000066400000000000000000000030321227300005300244200ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import InvWarp def test_InvWarp_inputs(): input_map = dict(absolute=dict(argstr='--abs', xor=['relative'], ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inverse_warp=dict(argstr='--out=%s', hash_files=False, name_source=['warp'], name_template='%s_inverse', ), jacobian_max=dict(argstr='--jmax=%f', ), jacobian_min=dict(argstr='--jmin=%f', ), niter=dict(argstr='--niter=%d', ), noconstraint=dict(argstr='--noconstraint', ), output_type=dict(), reference=dict(argstr='--ref=%s', mandatory=True, ), regularise=dict(argstr='--regularise=%f', ), relative=dict(argstr='--rel', xor=['absolute'], ), terminal_output=dict(mandatory=True, nohash=True, ), warp=dict(argstr='--warp=%s', mandatory=True, ), ) inputs = InvWarp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_InvWarp_outputs(): output_map = dict(inverse_warp=dict(), ) outputs = InvWarp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py000066400000000000000000000027551227300005300262120ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import IsotropicSmooth def test_IsotropicSmooth_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fwhm=dict(argstr='-s %.5f', mandatory=True, position=4, xor=['sigma'], ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), sigma=dict(argstr='-s %.5f', mandatory=True, position=4, xor=['fwhm'], ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = IsotropicSmooth.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_IsotropicSmooth_outputs(): output_map = dict(out_file=dict(), ) outputs = IsotropicSmooth.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_L2Model.py000066400000000000000000000015071227300005300242750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import L2Model def test_L2Model_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), num_copes=dict(mandatory=True, ), ) inputs = L2Model.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_L2Model_outputs(): output_map = dict(design_con=dict(), design_grp=dict(), design_mat=dict(), ) outputs = L2Model.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Level1Design.py000066400000000000000000000017621227300005300253240ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import Level1Design def test_Level1Design_inputs(): input_map = dict(bases=dict(mandatory=True, ), contrasts=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), interscan_interval=dict(mandatory=True, ), model_serial_correlations=dict(mandatory=True, ), session_info=dict(mandatory=True, ), ) inputs = Level1Design.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Level1Design_outputs(): output_map = dict(ev_files=dict(), fsf_files=dict(), ) outputs = Level1Design.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py000066400000000000000000000037501227300005300241410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import MCFLIRT def test_MCFLIRT_inputs(): input_map = dict(args=dict(argstr='%s', ), bins=dict(argstr='-bins %d', ), cost=dict(argstr='-cost %s', ), dof=dict(argstr='-dof %d', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', mandatory=True, position=0, ), init=dict(argstr='-init %s', ), interpolation=dict(argstr='-%s_final', ), mean_vol=dict(argstr='-meanvol', ), out_file=dict(argstr='-out %s', genfile=True, hash_files=False, ), output_type=dict(), ref_file=dict(argstr='-reffile %s', ), ref_vol=dict(argstr='-refvol %d', ), rotation=dict(argstr='-rotation %d', ), save_mats=dict(argstr='-mats', ), save_plots=dict(argstr='-plots', ), save_rms=dict(argstr='-rmsabs -rmsrel', ), scaling=dict(argstr='-scaling %.2f', ), smooth=dict(argstr='-smooth %.2f', ), stages=dict(argstr='-stages %d', ), stats_imgs=dict(argstr='-stats', ), terminal_output=dict(mandatory=True, nohash=True, ), use_contour=dict(argstr='-edge', ), use_gradient=dict(argstr='-gdt', ), ) inputs = MCFLIRT.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MCFLIRT_outputs(): output_map = dict(mat_file=dict(), mean_img=dict(), out_file=dict(), par_file=dict(), rms_files=dict(), std_img=dict(), variance_img=dict(), ) outputs = MCFLIRT.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MELODIC.py000066400000000000000000000056011227300005300241120ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import MELODIC def test_MELODIC_inputs(): input_map = dict(ICs=dict(argstr='--ICs=%s', ), approach=dict(argstr='-a %s', ), args=dict(argstr='%s', ), bg_image=dict(argstr='--bgimage=%s', ), bg_threshold=dict(argstr='--bgthreshold=%f', ), cov_weight=dict(argstr='--covarweight=%f', ), dim=dict(argstr='-d %d', ), dim_est=dict(argstr='--dimest=%s', ), environ=dict(nohash=True, usedefault=True, ), epsilon=dict(argstr='--eps=%f', ), epsilonS=dict(argstr='--epsS=%f', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='-i %s', mandatory=True, position=0, ), log_power=dict(argstr='--logPower', ), mask=dict(argstr='-m %s', ), max_restart=dict(argstr='--maxrestart=%d', ), maxit=dict(argstr='--maxit=%d', ), mix=dict(argstr='--mix=%s', ), mm_thresh=dict(argstr='--mmthresh=%f', ), no_bet=dict(argstr='--nobet', ), no_mask=dict(argstr='--nomask', ), no_mm=dict(argstr='--no_mm', ), non_linearity=dict(argstr='--nl=%s', ), num_ICs=dict(argstr='-n %d', ), out_all=dict(argstr='--Oall', ), out_dir=dict(argstr='-o %s', genfile=True, ), out_mean=dict(argstr='--Omean', ), out_orig=dict(argstr='--Oorig', ), out_pca=dict(argstr='--Opca', ), out_stats=dict(argstr='--Ostats', ), out_unmix=dict(argstr='--Ounmix', ), out_white=dict(argstr='--Owhite', ), output_type=dict(), pbsc=dict(argstr='--pbsc', ), rem_cmp=dict(argstr='-f %d', ), remove_deriv=dict(argstr='--remove_deriv', ), report=dict(argstr='--report', ), report_maps=dict(argstr='--report_maps=%s', ), s_con=dict(argstr='--Scon=%s', ), s_des=dict(argstr='--Sdes=%s', ), sep_vn=dict(argstr='--sep_vn', ), sep_whiten=dict(argstr='--sep_whiten', ), smode=dict(argstr='--smode=%s', ), t_con=dict(argstr='--Tcon=%s', ), t_des=dict(argstr='--Tdes=%s', ), terminal_output=dict(mandatory=True, nohash=True, ), tr_sec=dict(argstr='--tr=%f', ), update_mask=dict(argstr='--update_mask', ), var_norm=dict(argstr='--vn', ), ) inputs = MELODIC.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MELODIC_outputs(): output_map = dict(out_dir=dict(), report_dir=dict(), ) outputs = MELODIC.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py000066400000000000000000000024751227300005300264050ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import MakeDyadicVectors def test_MakeDyadicVectors_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mask=dict(argstr='%s', position=2, ), output=dict(argstr='%s', hash_files=False, position=3, usedefault=True, ), output_type=dict(), perc=dict(argstr='%f', position=4, ), phi_vol=dict(argstr='%s', mandatory=True, position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), theta_vol=dict(argstr='%s', mandatory=True, position=0, ), ) inputs = MakeDyadicVectors.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MakeDyadicVectors_outputs(): output_map = dict(dispersion=dict(), dyads=dict(), ) outputs = MakeDyadicVectors.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py000066400000000000000000000024421227300005300254110ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import MathsCommand def test_MathsCommand_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MathsCommand.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MathsCommand_outputs(): output_map = dict(out_file=dict(), ) outputs = MathsCommand.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MaxImage.py000066400000000000000000000025361227300005300245320ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import MaxImage def test_MaxImage_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='-%smax', position=4, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MaxImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MaxImage_outputs(): output_map = dict(out_file=dict(), ) outputs = MaxImage.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MeanImage.py000066400000000000000000000025441227300005300246640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import MeanImage def test_MeanImage_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='-%smean', position=4, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MeanImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MeanImage_outputs(): output_map = dict(out_file=dict(), ) outputs = MeanImage.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Merge.py000066400000000000000000000023561227300005300241010ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import Merge def test_Merge_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='-%s', mandatory=True, position=0, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=2, ), merged_file=dict(argstr='%s', hash_files=False, name_source='in_files', name_template='%s_merged', position=1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), tr=dict(argstr='%.2f', position=-1, ), ) inputs = Merge.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Merge_outputs(): output_map = dict(merged_file=dict(), ) outputs = Merge.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py000066400000000000000000000026521227300005300260730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import MultiImageMaths def test_MultiImageMaths_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), op_string=dict(argstr='%s', mandatory=True, position=4, ), operand_files=dict(mandatory=True, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MultiImageMaths.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MultiImageMaths_outputs(): output_map = dict(out_file=dict(), ) outputs = MultiImageMaths.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py000066400000000000000000000017421227300005300273200ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import MultipleRegressDesign def test_MultipleRegressDesign_inputs(): input_map = dict(contrasts=dict(mandatory=True, ), groups=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), regressors=dict(mandatory=True, ), ) inputs = MultipleRegressDesign.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MultipleRegressDesign_outputs(): output_map = dict(design_con=dict(), design_fts=dict(), design_grp=dict(), design_mat=dict(), ) outputs = MultipleRegressDesign.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Overlay.py000066400000000000000000000041401227300005300244540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import Overlay def test_Overlay_inputs(): input_map = dict(args=dict(argstr='%s', ), auto_thresh_bg=dict(argstr='-a', mandatory=True, position=5, xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), ), background_image=dict(argstr='%s', mandatory=True, position=4, ), bg_thresh=dict(argstr='%.3f %.3f', mandatory=True, position=5, xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), ), environ=dict(nohash=True, usedefault=True, ), full_bg_range=dict(argstr='-A', mandatory=True, position=5, xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), ), ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-1, ), out_type=dict(argstr='%s', position=2, usedefault=True, ), output_type=dict(), show_negative_stats=dict(argstr='%s', position=8, xor=['stat_image2'], ), stat_image=dict(argstr='%s', mandatory=True, position=6, ), stat_image2=dict(argstr='%s', position=9, xor=['show_negative_stats'], ), stat_thresh=dict(argstr='%.2f %.2f', mandatory=True, position=7, ), stat_thresh2=dict(argstr='%.2f %.2f', position=10, ), terminal_output=dict(mandatory=True, nohash=True, ), transparency=dict(argstr='%s', position=1, usedefault=True, ), use_checkerboard=dict(argstr='-c', position=3, ), ) inputs = Overlay.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Overlay_outputs(): output_map = dict(out_file=dict(), ) outputs = Overlay.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py000066400000000000000000000040611227300005300241350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import PRELUDE def test_PRELUDE_inputs(): input_map = dict(args=dict(argstr='%s', ), complex_phase_file=dict(argstr='--complex=%s', mandatory=True, xor=['magnitude_file', 'phase_file'], ), end=dict(argstr='--end=%d', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), label_file=dict(argstr='--labels=%s', hash_files=False, ), labelprocess2d=dict(argstr='--labelslices', ), magnitude_file=dict(argstr='--abs=%s', mandatory=True, xor=['complex_phase_file'], ), mask_file=dict(argstr='--mask=%s', ), num_partitions=dict(argstr='--numphasesplit=%d', ), output_type=dict(), phase_file=dict(argstr='--phase=%s', mandatory=True, xor=['complex_phase_file'], ), process2d=dict(argstr='--slices', xor=['labelprocess2d'], ), process3d=dict(argstr='--force3D', xor=['labelprocess2d', 'process2d'], ), rawphase_file=dict(argstr='--rawphase=%s', hash_files=False, ), removeramps=dict(argstr='--removeramps', ), savemask_file=dict(argstr='--savemask=%s', hash_files=False, ), start=dict(argstr='--start=%d', ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='--thresh=%.10f', ), unwrapped_phase_file=dict(argstr='--unwrap=%s', genfile=True, hash_files=False, ), ) inputs = PRELUDE.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PRELUDE_outputs(): output_map = dict(unwrapped_phase_file=dict(), ) outputs = PRELUDE.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py000066400000000000000000000023651227300005300263120ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import PlotMotionParams def test_PlotMotionParams_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), in_source=dict(mandatory=True, ), out_file=dict(argstr='-o %s', genfile=True, hash_files=False, ), output_type=dict(), plot_size=dict(argstr='%s', ), plot_type=dict(argstr='%s', mandatory=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = PlotMotionParams.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PlotMotionParams_outputs(): output_map = dict(out_file=dict(), ) outputs = PlotMotionParams.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py000066400000000000000000000035061227300005300257500ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import PlotTimeSeries def test_PlotTimeSeries_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), labels=dict(argstr='%s', ), legend_file=dict(argstr='--legend=%s', ), out_file=dict(argstr='-o %s', genfile=True, hash_files=False, ), output_type=dict(), plot_finish=dict(argstr='--finish=%d', xor=('plot_range',), ), plot_range=dict(argstr='%s', xor=('plot_start', 'plot_finish'), ), plot_size=dict(argstr='%s', ), plot_start=dict(argstr='--start=%d', xor=('plot_range',), ), sci_notation=dict(argstr='--sci', ), terminal_output=dict(mandatory=True, nohash=True, ), title=dict(argstr='%s', ), x_precision=dict(argstr='--precision=%d', ), x_units=dict(argstr='-u %d', usedefault=True, ), y_max=dict(argstr='--ymax=%.2f', xor=('y_range',), ), y_min=dict(argstr='--ymin=%.2f', xor=('y_range',), ), y_range=dict(argstr='%s', xor=('y_min', 'y_max'), ), ) inputs = PlotTimeSeries.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PlotTimeSeries_outputs(): output_map = dict(out_file=dict(), ) outputs = PlotTimeSeries.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py000066400000000000000000000021471227300005300256570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import PowerSpectrum def test_PowerSpectrum_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=0, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = PowerSpectrum.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PowerSpectrum_outputs(): output_map = dict(out_file=dict(), ) outputs = PowerSpectrum.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py000066400000000000000000000026321227300005300260770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.epi import PrepareFieldmap def test_PrepareFieldmap_inputs(): input_map = dict(args=dict(argstr='%s', ), delta_TE=dict(argstr='%f', mandatory=True, position=-2, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_magnitude=dict(argstr='%s', mandatory=True, position=3, ), in_phase=dict(argstr='%s', mandatory=True, position=2, ), nocheck=dict(argstr='--nocheck', position=-1, usedefault=True, ), out_fieldmap=dict(argstr='%s', position=5, ), output_type=dict(), scanner=dict(argstr='%s', position=1, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = PrepareFieldmap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PrepareFieldmap_outputs(): output_map = dict(out_fieldmap=dict(), ) outputs = PrepareFieldmap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py000066400000000000000000000054531227300005300250620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import ProbTrackX def test_ProbTrackX_inputs(): input_map = dict(args=dict(argstr='%s', ), avoid_mp=dict(argstr='--avoid=%s', ), c_thresh=dict(argstr='--cthr=%.3f', ), correct_path_distribution=dict(argstr='--pd', ), dist_thresh=dict(argstr='--distthresh=%.3f', ), environ=dict(nohash=True, usedefault=True, ), fibst=dict(argstr='--fibst=%d', ), force_dir=dict(argstr='--forcedir', usedefault=True, ), fsamples=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inv_xfm=dict(argstr='--invxfm=%s', ), loop_check=dict(argstr='--loopcheck', ), mask=dict(argstr='-m %s', mandatory=True, ), mask2=dict(argstr='--mask2=%s', ), mesh=dict(argstr='--mesh=%s', ), mod_euler=dict(argstr='--modeuler', ), mode=dict(argstr='--mode=%s', genfile=True, ), n_samples=dict(argstr='--nsamples=%d', usedefault=True, ), n_steps=dict(argstr='--nsteps=%d', ), network=dict(argstr='--network', ), opd=dict(argstr='--opd', usedefault=True, ), os2t=dict(argstr='--os2t', ), out_dir=dict(argstr='--dir=%s', genfile=True, ), output_type=dict(), phsamples=dict(mandatory=True, ), rand_fib=dict(argstr='--randfib=%d', ), random_seed=dict(argstr='--rseed', ), s2tastext=dict(argstr='--s2tastext', ), sample_random_points=dict(argstr='--sampvox', ), samples_base_name=dict(argstr='--samples=%s', usedefault=True, ), seed=dict(argstr='--seed=%s', mandatory=True, ), seed_ref=dict(argstr='--seedref=%s', ), step_length=dict(argstr='--steplength=%.3f', ), stop_mask=dict(argstr='--stop=%s', ), target_masks=dict(argstr='--targetmasks=%s', ), terminal_output=dict(mandatory=True, nohash=True, ), thsamples=dict(mandatory=True, ), use_anisotropy=dict(argstr='--usef', ), verbose=dict(argstr='--verbose=%d', ), waypoints=dict(argstr='--waypoints=%s', ), xfm=dict(argstr='--xfm=%s', ), ) inputs = ProbTrackX.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ProbTrackX_outputs(): output_map = dict(fdt_paths=dict(), log=dict(), particle_files=dict(), targets=dict(), way_total=dict(), ) outputs = ProbTrackX.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py000066400000000000000000000021051227300005300251220ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import ProjThresh def test_ProjThresh_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=0, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='%d', mandatory=True, position=1, ), ) inputs = ProjThresh.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ProjThresh_outputs(): output_map = dict(out_files=dict(), ) outputs = ProjThresh.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Randomise.py000066400000000000000000000044471227300005300247660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import Randomise def test_Randomise_inputs(): input_map = dict(args=dict(argstr='%s', ), base_name=dict(argstr='-o "%s"', position=1, usedefault=True, ), c_thresh=dict(argstr='-c %.2f', ), cm_thresh=dict(argstr='-C %.2f', ), demean=dict(argstr='-D', ), design_mat=dict(argstr='-d %s', position=2, ), environ=dict(nohash=True, usedefault=True, ), f_c_thresh=dict(argstr='-F %.2f', ), f_cm_thresh=dict(argstr='-S %.2f', ), f_only=dict(argstr='--f_only', ), fcon=dict(argstr='-f %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, position=0, ), mask=dict(argstr='-m %s', ), num_perm=dict(argstr='-n %d', ), one_sample_group_mean=dict(argstr='-1', ), output_type=dict(), p_vec_n_dist_files=dict(argstr='-P', ), raw_stats_imgs=dict(argstr='-R', ), seed=dict(argstr='--seed=%d', ), show_info_parallel_mode=dict(argstr='-Q', ), show_total_perms=dict(argstr='-q', ), tcon=dict(argstr='-t %s', position=3, ), terminal_output=dict(mandatory=True, nohash=True, ), tfce=dict(argstr='-T', ), tfce2D=dict(argstr='--T2', ), tfce_C=dict(argstr='--tfce_C=%.2f', ), tfce_E=dict(argstr='--tfce_E=%.2f', ), tfce_H=dict(argstr='--tfce_H=%.2f', ), var_smooth=dict(argstr='-v %d', ), vox_p_values=dict(argstr='-x', ), x_block_labels=dict(argstr='-e %s', ), ) inputs = Randomise.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Randomise_outputs(): output_map = dict(f_corrected_p_files=dict(), f_p_files=dict(), fstat_files=dict(), t_corrected_p_files=dict(), t_p_files=dict(), tstat_files=dict(), ) outputs = Randomise.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py000066400000000000000000000021021227300005300253530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import Reorient2Std def test_Reorient2Std_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Reorient2Std.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Reorient2Std_outputs(): output_map = dict(out_file=dict(), ) outputs = Reorient2Std.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_SMM.py000066400000000000000000000023531227300005300234730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import SMM def test_SMM_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mask=dict(argstr='--mask="%s"', copyfile=False, mandatory=True, position=1, ), no_deactivation_class=dict(argstr='--zfstatmode', position=2, ), output_type=dict(), spatial_data_file=dict(argstr='--sdf="%s"', copyfile=False, mandatory=True, position=0, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SMM.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SMM_outputs(): output_map = dict(activation_p_map=dict(), deactivation_p_map=dict(), null_p_map=dict(), ) outputs = SMM.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_SUSAN.py000066400000000000000000000027131227300005300237300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import SUSAN def test_SUSAN_inputs(): input_map = dict(args=dict(argstr='%s', ), brightness_threshold=dict(argstr='%.10f', mandatory=True, position=2, ), dimension=dict(argstr='%d', position=4, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), fwhm=dict(argstr='%.10f', mandatory=True, position=3, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), usans=dict(argstr='', position=6, usedefault=True, ), use_median=dict(argstr='%d', position=5, usedefault=True, ), ) inputs = SUSAN.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SUSAN_outputs(): output_map = dict(smoothed_file=dict(), ) outputs = SUSAN.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_SigLoss.py000066400000000000000000000022371227300005300244230ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import SigLoss def test_SigLoss_inputs(): input_map = dict(args=dict(argstr='%s', ), echo_time=dict(argstr='--te=%f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, ), mask_file=dict(argstr='-m %s', ), out_file=dict(argstr='-s %s', genfile=True, ), output_type=dict(), slice_direction=dict(argstr='-d %s', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SigLoss.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SigLoss_outputs(): output_map = dict(out_file=dict(), ) outputs = SigLoss.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py000066400000000000000000000027121227300005300250760ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.preprocess import SliceTimer def test_SliceTimer_inputs(): input_map = dict(args=dict(argstr='%s', ), custom_order=dict(argstr='--ocustom=%s', ), custom_timings=dict(argstr='--tcustom=%s', ), environ=dict(nohash=True, usedefault=True, ), global_shift=dict(argstr='--tglobal', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', mandatory=True, position=0, ), index_dir=dict(argstr='--down', ), interleaved=dict(argstr='--odd', ), out_file=dict(argstr='--out=%s', genfile=True, hash_files=False, ), output_type=dict(), slice_direction=dict(argstr='--direction=%d', ), terminal_output=dict(mandatory=True, nohash=True, ), time_repetition=dict(argstr='--repeat=%f', ), ) inputs = SliceTimer.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SliceTimer_outputs(): output_map = dict(slice_time_corrected_file=dict(), ) outputs = SliceTimer.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Slicer.py000066400000000000000000000045641227300005300242660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import Slicer def test_Slicer_inputs(): input_map = dict(all_axial=dict(argstr='-A', position=10, requires=['image_width'], xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), ), args=dict(argstr='%s', ), colour_map=dict(argstr='-l %s', position=4, ), dither_edges=dict(argstr='-t', position=7, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), image_edges=dict(argstr='%s', position=2, ), image_width=dict(argstr='%d', position=-2, ), in_file=dict(argstr='%s', mandatory=True, position=1, ), intensity_range=dict(argstr='-i %.3f %.3f', position=5, ), label_slices=dict(argstr='-L', position=3, usedefault=True, ), middle_slices=dict(argstr='-a', position=10, xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), ), nearest_neighbour=dict(argstr='-n', position=8, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-1, ), output_type=dict(), sample_axial=dict(argstr='-S %d', position=10, requires=['image_width'], xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), ), scaling=dict(argstr='-s %f', position=0, ), show_orientation=dict(argstr='%s', position=9, usedefault=True, ), single_slice=dict(argstr='-%s', position=10, requires=['slice_number'], xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), ), slice_number=dict(argstr='-%d', position=11, ), terminal_output=dict(mandatory=True, nohash=True, ), threshold_edges=dict(argstr='-e %.3f', position=6, ), ) inputs = Slicer.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Slicer_outputs(): output_map = dict(out_file=dict(), ) outputs = Slicer.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Smooth.py000066400000000000000000000022511227300005300243050ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import Smooth def test_Smooth_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fwhm=dict(argstr='-kernel gauss %f -fmean', mandatory=True, position=1, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=0, ), output_type=dict(), smoothed_file=dict(argstr='%s', genfile=True, hash_files=False, position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Smooth.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Smooth_outputs(): output_map = dict(smoothed_file=dict(), ) outputs = Smooth.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py000066400000000000000000000024071227300005300260040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.model import SmoothEstimate def test_SmoothEstimate_inputs(): input_map = dict(args=dict(argstr='%s', ), dof=dict(argstr='--dof=%d', mandatory=True, xor=['zstat_file'], ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mask_file=dict(argstr='--mask=%s', mandatory=True, ), output_type=dict(), residual_fit_file=dict(argstr='--res=%s', requires=['dof'], ), terminal_output=dict(mandatory=True, nohash=True, ), zstat_file=dict(argstr='--zstat=%s', xor=['dof'], ), ) inputs = SmoothEstimate.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SmoothEstimate_outputs(): output_map = dict(dlh=dict(), resels=dict(), volume=dict(), ) outputs = SmoothEstimate.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py000066400000000000000000000031341227300005300256000ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import SpatialFilter def test_SpatialFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), kernel_file=dict(argstr='%s', position=5, xor=['kernel_size'], ), kernel_shape=dict(argstr='-kernel %s', position=4, ), kernel_size=dict(argstr='%.4f', position=5, xor=['kernel_file'], ), nan2zeros=dict(argstr='-nan', position=3, ), operation=dict(argstr='-f%s', mandatory=True, position=6, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SpatialFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SpatialFilter_outputs(): output_map = dict(out_file=dict(), ) outputs = SpatialFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Split.py000066400000000000000000000021511227300005300241260ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import Split def test_Split_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='-%s', mandatory=True, position=2, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=0, ), out_base_name=dict(argstr='%s', position=1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Split.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Split_outputs(): output_map = dict(out_files=dict(), ) outputs = Split.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py000066400000000000000000000022361227300005300260020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.utils import SwapDimensions def test_SwapDimensions_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position='1', ), new_dims=dict(argstr='%s %s %s', mandatory=True, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SwapDimensions.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SwapDimensions_outputs(): output_map = dict(out_file=dict(), ) outputs = SwapDimensions.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_TOPUP.py000066400000000000000000000037221227300005300237470ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.epi import TOPUP def test_TOPUP_inputs(): input_map = dict(args=dict(argstr='%s', ), config=dict(argstr='--config=%s', usedefault=True, ), encoding_direction=dict(), encoding_file=dict(argstr='--datain=%s', ), environ=dict(nohash=True, usedefault=True, ), estmov=dict(argstr='--estmov=%d', ), fwhm=dict(argstr='--fwhm=%f', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--imain=%s', mandatory=True, ), interp=dict(argstr='--interp=%s', ), max_iter=dict(argstr='--miter=%d', ), minmet=dict(argstr='--minmet=%d', ), numprec=dict(argstr='--numprec=%s', ), out_base=dict(argstr='--out=%s', ), out_corrected=dict(argstr='--iout=%s', ), out_field=dict(argstr='--fout=%s', ), out_logfile=dict(argstr='--logout=%s', ), output_type=dict(), readout_times=dict(), regrid=dict(argstr='--regrid=%d', ), scale=dict(argstr='--scale=%d', ), splineorder=dict(argstr='--splineorder=%d', ), subsamp=dict(argstr='--subsamp=%d', ), terminal_output=dict(mandatory=True, nohash=True, ), warp_res=dict(argstr='--warpres=%f', ), ) inputs = TOPUP.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TOPUP_outputs(): output_map = dict(out_corrected=dict(), out_enc_file=dict(), out_field=dict(), out_fieldcoef=dict(), out_logfile=dict(), out_movpar=dict(), out_topup=dict(), ) outputs = TOPUP.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py000066400000000000000000000027271227300005300257750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import TemporalFilter def test_TemporalFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), highpass_sigma=dict(argstr='-bptf %.6f', position=4, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), lowpass_sigma=dict(argstr='%.6f', position=5, usedefault=True, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = TemporalFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TemporalFilter_outputs(): output_map = dict(out_file=dict(), ) outputs = TemporalFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_Threshold.py000066400000000000000000000027451227300005300250000ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import Threshold def test_Threshold_inputs(): input_map = dict(args=dict(argstr='%s', ), direction=dict(usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), thresh=dict(argstr='%s', mandatory=True, position=4, ), use_nonzero_voxels=dict(requires=['use_robust_range'], ), use_robust_range=dict(), ) inputs = Threshold.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Threshold_outputs(): output_map = dict(out_file=dict(), ) outputs = Threshold.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py000066400000000000000000000030041227300005300256130ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import TractSkeleton def test_TractSkeleton_inputs(): input_map = dict(alt_data_file=dict(argstr='-a %s', ), alt_skeleton=dict(argstr='-s %s', ), args=dict(argstr='%s', ), data_file=dict(), distance_map=dict(), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, ), output_type=dict(), project_data=dict(argstr='-p %.3f %s %s %s %s', requires=['threshold', 'distance_map', 'data_file'], ), projected_data=dict(), search_mask_file=dict(xor=['use_cingulum_mask'], ), skeleton_file=dict(argstr='-o %s', ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(), use_cingulum_mask=dict(usedefault=True, xor=['search_mask_file'], ), ) inputs = TractSkeleton.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TractSkeleton_outputs(): output_map = dict(projected_data=dict(), skeleton_file=dict(), ) outputs = TractSkeleton.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py000066400000000000000000000025441227300005300251340ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import UnaryMaths def test_UnaryMaths_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), operation=dict(argstr='-%s', mandatory=True, position=4, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = UnaryMaths.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_UnaryMaths_outputs(): output_map = dict(out_file=dict(), ) outputs = UnaryMaths.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_VecReg.py000066400000000000000000000026531227300005300242150ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import VecReg def test_VecReg_inputs(): input_map = dict(affine_mat=dict(argstr='-t %s', ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', mandatory=True, ), interpolation=dict(argstr='--interp=%s', ), mask=dict(argstr='-m %s', ), out_file=dict(argstr='-o %s', genfile=True, hash_files=False, ), output_type=dict(), ref_mask=dict(argstr='--refmask=%s', ), ref_vol=dict(argstr='-r %s', mandatory=True, ), rotation_mat=dict(argstr='--rotmat=%s', ), rotation_warp=dict(argstr='--rotwarp=%s', ), terminal_output=dict(mandatory=True, nohash=True, ), warp_field=dict(argstr='-w %s', ), ) inputs = VecReg.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_VecReg_outputs(): output_map = dict(out_file=dict(), ) outputs = VecReg.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_auto_XFibres.py000066400000000000000000000043311227300005300243770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.dti import XFibres def test_XFibres_inputs(): input_map = dict(all_ard=dict(argstr='--allard', xor=('no_ard', 'all_ard'), ), args=dict(argstr='%s', ), burn_in=dict(argstr='--burnin=%d', ), burn_in_no_ard=dict(argstr='--burninnoard=%d', ), bvals=dict(argstr='--bvals=%s', mandatory=True, ), bvecs=dict(argstr='--bvecs=%s', mandatory=True, ), dwi=dict(argstr='--data=%s', mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), force_dir=dict(argstr='--forcedir', usedefault=True, ), fudge=dict(argstr='--fudge=%d', ), gradnonlin=dict(argstr='--gradnonlin=%s', ), ignore_exception=dict(nohash=True, usedefault=True, ), logdir=dict(argstr='--logdir=%s', usedefault=True, ), mask=dict(argstr='--mask=%s', mandatory=True, ), model=dict(argstr='--model=%d', ), n_fibres=dict(argstr='--nfibres=%d', ), n_jumps=dict(argstr='--njumps=%d', ), no_ard=dict(argstr='--noard', xor=('no_ard', 'all_ard'), ), no_spat=dict(argstr='--nospat', xor=('no_spat', 'non_linear'), ), non_linear=dict(argstr='--nonlinear', xor=('no_spat', 'non_linear'), ), output_type=dict(), sample_every=dict(argstr='--sampleevery=%d', ), seed=dict(argstr='--seed=%d', ), terminal_output=dict(mandatory=True, nohash=True, ), update_proposal_every=dict(argstr='--updateproposalevery=%d', ), ) inputs = XFibres.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_XFibres_outputs(): output_map = dict(dyads=dict(), fsamples=dict(), mean_S0samples=dict(), mean_dsamples=dict(), mean_fsamples=dict(), phsamples=dict(), thsamples=dict(), ) outputs = XFibres.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/fsl/tests/test_base.py000066400000000000000000000060561227300005300227250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from nipype.testing import (assert_equal, assert_true, assert_raises, assert_not_equal, skipif) import nipype.interfaces.fsl as fsl from nipype.interfaces.base import InterfaceResult from nipype.interfaces.fsl import check_fsl, no_fsl @skipif(no_fsl)#skip if fsl not installed) def test_fslversion(): ver = fsl.Info.version() if ver: # If ver is None, fsl is not installed ver = ver.split('.') yield assert_true, ver[0] in ['4', '5'] @skipif(no_fsl)#skip if fsl not installed) def test_fsloutputtype(): types = fsl.Info.ftypes.keys() orig_out_type = fsl.Info.output_type() yield assert_true, orig_out_type in types def test_outputtype_to_ext(): for ftype, ext in fsl.Info.ftypes.items(): res = fsl.Info.output_type_to_ext(ftype) yield assert_equal, res, ext yield assert_raises, KeyError, fsl.Info.output_type_to_ext, 'JUNK' @skipif(no_fsl)#skip if fsl not installed) def test_FSLCommand(): # Most methods in FSLCommand are tested in the subclasses. Only # testing the one item that is not. cmd = fsl.FSLCommand(command='ls') res = cmd.run() yield assert_equal, type(res), InterfaceResult @skipif(no_fsl)#skip if fsl not installed) def test_FSLCommand2(): # Check default output type and environ cmd = fsl.FSLCommand(command='junk') yield assert_equal, cmd._output_type, fsl.Info.output_type() yield assert_equal, cmd.inputs.environ['FSLOUTPUTTYPE'], cmd._output_type yield assert_true, cmd._output_type in fsl.Info.ftypes cmd = fsl.FSLCommand cmdinst = fsl.FSLCommand(command='junk') for out_type in fsl.Info.ftypes: cmd.set_default_output_type(out_type) yield assert_equal, cmd._output_type, out_type if out_type != fsl.Info.output_type(): # Setting class outputtype should not effect existing instances yield assert_not_equal, cmdinst.inputs.output_type, out_type @skipif(no_fsl)#skip if fsl not installed) def test_gen_fname(): # Test _gen_fname method of FSLCommand cmd = fsl.FSLCommand(command = 'junk',output_type = 'NIFTI_GZ') pth = os.getcwd() # just the filename fname = cmd._gen_fname('foo.nii.gz',suffix='_fsl') desired = os.path.join(pth, 'foo_fsl.nii.gz') yield assert_equal, fname, desired # filename with suffix fname = cmd._gen_fname('foo.nii.gz', suffix = '_brain') desired = os.path.join(pth, 'foo_brain.nii.gz') yield assert_equal, fname, desired # filename with suffix and working directory fname = cmd._gen_fname('foo.nii.gz', suffix = '_brain', cwd = '/data') desired = os.path.join('/data', 'foo_brain.nii.gz') yield assert_equal, fname, desired # filename with suffix and no file extension change fname = cmd._gen_fname('foo.nii.gz', suffix = '_brain.mat', change_ext = False) desired = os.path.join(pth, 'foo_brain.mat') yield assert_equal, fname, desired nipype-0.9.2/nipype/interfaces/fsl/tests/test_dti.py000066400000000000000000000456551227300005300226030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import tempfile import shutil from tempfile import mkdtemp from shutil import rmtree import numpy as np import nibabel as nb from nipype.testing import ( assert_equal, assert_not_equal, assert_raises, skipif, example_data) import nipype.interfaces.fsl.dti as fsl from nipype.interfaces.fsl import Info, no_fsl from nipype.interfaces.base import Undefined # nosetests --with-doctest path_to/test_fsl.py def skip_dti_tests(): """XXX These tests are skipped until we clean up some of this code """ return True def create_files_in_directory(): outdir = os.path.realpath(mkdtemp()) cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii','b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3,3,3,4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img,np.eye(4),hdr), os.path.join(outdir,f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) # test bedpostx @skipif(no_fsl) def test_bedpostx2(): filelist, outdir, cwd = create_files_in_directory() bpx = fsl.BEDPOSTX() # make sure command gets called yield assert_equal, bpx.cmd, 'bedpostx' # test raising error with mandatory args absent yield assert_raises, ValueError, bpx.run # .inputs based parameters setting bpx2 = fsl.BEDPOSTX() bpx2.inputs.mask = example_data('mask.nii') bpx2.inputs.dwi = example_data('diffusion.nii') bpx2.inputs.bvals = example_data('bvals') bpx2.inputs.bvecs = example_data('bvecs') bpx2.inputs.fibres = 2 bpx2.inputs.weight = 0.3 bpx2.inputs.burn_period = 200 bpx2.inputs.jumps = 500 bpx2.inputs.sampling = 20 actualCmdline = sorted(bpx2.cmdline.split()) cmd = 'bedpostx bedpostx -b 200 -n 2 -j 500 -s 20 -w 0.30' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline # test dtifit @skipif(no_fsl) def test_dtifit2(): filelist, outdir, cwd = create_files_in_directory() dti = fsl.DTIFit() # make sure command gets called yield assert_equal, dti.cmd, 'dtifit' # test raising error with mandatory args absent yield assert_raises, ValueError, dti.run # .inputs based parameters setting dti.inputs.dwi = filelist[0] dti.inputs.base_name = 'foo.dti.nii' dti.inputs.mask = filelist[1] dti.inputs.bvecs = filelist[0] dti.inputs.bvals = filelist[1] dti.inputs.min_z = 10 dti.inputs.max_z = 50 yield assert_equal, dti.cmdline, \ 'dtifit -k %s -o foo.dti.nii -m %s -r %s -b %s -Z 50 -z 10'%(filelist[0], filelist[1], filelist[0], filelist[1]) clean_directory(outdir, cwd) # Globals to store paths for tbss tests tbss_dir = None test_dir = None def setup_tbss(): # Setup function is called before each test. Setup is called only # once for each generator function. global tbss_dir, tbss_files, test_dir test_dir = os.getcwd() tbss_dir = tempfile.mkdtemp() os.chdir(tbss_dir) tbss_files = ['a.nii','b.nii'] for f in tbss_files: fp = open(f,'wt') fp.write('dummy') fp.close() def teardown_tbss(): # Teardown is called after each test to perform cleanup os.chdir(test_dir) shutil.rmtree(tbss_dir) @skipif(skip_dti_tests) def test_randomise2(): rand = fsl.Randomise() # make sure command gets called yield assert_equal, rand.cmd, 'randomise' # test raising error with mandatory args absent yield assert_raises, ValueError, rand.run # .inputs based parameters setting rand.inputs.input_4D = 'infile.nii' rand.inputs.output_rootname = 'outfile' rand.inputs.design_matrix = 'design.mat' rand.inputs.t_contrast = 'infile.con' actualCmdline = sorted(rand.cmdline.split()) cmd = 'randomise -i infile.nii -o outfile -d design.mat -t infile.con' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline # .run based parameter setting rand2 = fsl.Randomise(input_4D='infile2', output_rootname='outfile2', f_contrast='infile.f', one_sample_gmean=True, int_seed=4) actualCmdline = sorted(rand2.cmdline.split()) cmd = 'randomise -i infile2 -o outfile2 -1 -f infile.f --seed=4' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline rand3 = fsl.Randomise() results = rand3.run(input_4D='infile3', output_rootname='outfile3') yield assert_equal, results.runtime.cmdline, \ 'randomise -i infile3 -o outfile3' # test arguments for opt_map opt_map = {'demean_data': ('-D', True), 'one_sample_gmean': ('-1', True), 'mask_image': ('-m inp_mask', 'inp_mask'), 'design_matrix': ('-d design.mat', 'design.mat'), 't_contrast': ('-t input.con', 'input.con'), 'f_contrast': ('-f input.fts', 'input.fts'), 'xchange_block_labels': ('-e design.grp', 'design.grp'), 'print_unique_perm': ('-q', True), 'print_info_parallelMode': ('-Q', True), 'num_permutations': ('-n 10', 10), 'vox_pvalus': ('-x', True), 'fstats_only': ('--fonly', True), 'thresh_free_cluster': ('-T', True), 'thresh_free_cluster_2Dopt': ('--T2', True), 'cluster_thresholding': ('-c 0.20', 0.20), 'cluster_mass_thresholding': ('-C 0.40', 0.40), 'fcluster_thresholding': ('-F 0.10', 0.10), 'fcluster_mass_thresholding': ('-S 0.30', 0.30), 'variance_smoothing': ('-v 0.20', 0.20), 'diagnostics_off': ('--quiet', True), 'output_raw': ('-R', True), 'output_perm_vect': ('-P', True), 'int_seed': ('--seed=20', 20), 'TFCE_height_param': ('--tfce_H=0.11', 0.11), 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), 'list_num_voxel_EVs_pos': ('--vxl=1,2,3,4', '1,2,3,4'), 'list_img_voxel_EVs': ('--vxf=6,7,8,9,3', '6,7,8,9,3')} for name, settings in opt_map.items(): rand4 = fsl.Randomise(input_4D='infile', output_rootname='root', **{name: settings[1]}) yield assert_equal, rand4.cmdline, rand4.cmd + ' -i infile -o root ' \ + settings[0] @skipif(skip_dti_tests) def test_Randomise_parallel(): rand = fsl.Randomise_parallel() # make sure command gets called yield assert_equal, rand.cmd, 'randomise_parallel' # test raising error with mandatory args absent yield assert_raises, ValueError, rand.run # .inputs based parameters setting rand.inputs.input_4D = 'infile.nii' rand.inputs.output_rootname = 'outfile' rand.inputs.design_matrix = 'design.mat' rand.inputs.t_contrast = 'infile.con' actualCmdline = sorted(rand.cmdline.split()) cmd = 'randomise_parallel -i infile.nii -o outfile -d design.mat -t infile.con' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline # .run based parameter setting rand2 = fsl.Randomise_parallel(input_4D='infile2', output_rootname='outfile2', f_contrast='infile.f', one_sample_gmean=True, int_seed=4) actualCmdline = sorted(rand2.cmdline.split()) cmd = 'randomise_parallel -i infile2 -o outfile2 -1 -f infile.f --seed=4' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline rand3 = fsl.Randomise_parallel() results = rand3.run(input_4D='infile3', output_rootname='outfile3') yield assert_equal, results.runtime.cmdline, \ 'randomise_parallel -i infile3 -o outfile3' # test arguments for opt_map opt_map = {'demean_data': ('-D', True), 'one_sample_gmean': ('-1', True), 'mask_image': ('-m inp_mask', 'inp_mask'), 'design_matrix': ('-d design.mat', 'design.mat'), 't_contrast': ('-t input.con', 'input.con'), 'f_contrast': ('-f input.fts', 'input.fts'), 'xchange_block_labels': ('-e design.grp', 'design.grp'), 'print_unique_perm': ('-q', True), 'print_info_parallelMode': ('-Q', True), 'num_permutations': ('-n 10', 10), 'vox_pvalus': ('-x', True), 'fstats_only': ('--fonly', True), 'thresh_free_cluster': ('-T', True), 'thresh_free_cluster_2Dopt': ('--T2', True), 'cluster_thresholding': ('-c 0.20', 0.20), 'cluster_mass_thresholding': ('-C 0.40', 0.40), 'fcluster_thresholding': ('-F 0.10', 0.10), 'fcluster_mass_thresholding': ('-S 0.30', 0.30), 'variance_smoothing': ('-v 0.20', 0.20), 'diagnostics_off': ('--quiet', True), 'output_raw': ('-R', True), 'output_perm_vect': ('-P', True), 'int_seed': ('--seed=20', 20), 'TFCE_height_param': ('--tfce_H=0.11', 0.11), 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), 'list_num_voxel_EVs_pos': ('--vxl=' \ + repr([1, 2, 3, 4]), repr([1, 2, 3, 4])), 'list_img_voxel_EVs': ('--vxf=' \ + repr([6, 7, 8, 9, 3]), repr([6, 7, 8, 9, 3]))} for name, settings in opt_map.items(): rand4 = fsl.Randomise_parallel(input_4D='infile', output_rootname='root', **{name: settings[1]}) yield assert_equal, rand4.cmdline, rand4.cmd + ' -i infile -o root ' \ + settings[0] # test proj_thresh @skipif(skip_dti_tests) def test_Proj_thresh(): proj = fsl.ProjThresh() # make sure command gets called yield assert_equal, proj.cmd, 'proj_thresh' # test raising error with mandatory args absent yield assert_raises, ValueError, proj.run # .inputs based parameters setting proj.inputs.volumes = ['vol1', 'vol2', 'vol3'] proj.inputs.threshold = 3 yield assert_equal, proj.cmdline, 'proj_thresh vol1 vol2 vol3 3' proj2 = fsl.ProjThresh(threshold=10, volumes=['vola', 'volb']) yield assert_equal, proj2.cmdline, 'proj_thresh vola volb 10' # .run based parameters setting proj3 = fsl.ProjThresh() results = proj3.run(volumes=['inp1', 'inp3', 'inp2'], threshold=2) yield assert_equal, results.runtime.cmdline, 'proj_thresh inp1 inp3 inp2 2' yield assert_not_equal, results.runtime.returncode, 0 yield assert_equal, isinstance(results.interface.inputs.volumes, list), True yield assert_equal, results.interface.inputs.threshold, 2 # test arguments for opt_map # Proj_thresh doesn't have an opt_map{} # test vec_reg @skipif(skip_dti_tests) def test_Vec_reg(): vrg = fsl.VecReg() # make sure command gets called yield assert_equal, vrg.cmd, 'vecreg' # test raising error with mandatory args absent yield assert_raises, ValueError, vrg.run # .inputs based parameters setting vrg.inputs.infile = 'infile' vrg.inputs.outfile = 'outfile' vrg.inputs.refVolName = 'MNI152' vrg.inputs.affineTmat = 'tmat.mat' yield assert_equal, vrg.cmdline, \ 'vecreg -i infile -o outfile -r MNI152 -t tmat.mat' # .run based parameter setting vrg2 = fsl.VecReg(infile='infile2', outfile='outfile2', refVolName='MNI152', affineTmat='tmat2.mat', brainMask='nodif_brain_mask') actualCmdline = sorted(vrg2.cmdline.split()) cmd = 'vecreg -i infile2 -o outfile2 -r MNI152 -t tmat2.mat -m nodif_brain_mask' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline vrg3 = fsl.VecReg() results = vrg3.run(infile='infile3', outfile='outfile3', refVolName='MNI152', affineTmat='tmat3.mat',) yield assert_equal, results.runtime.cmdline, \ 'vecreg -i infile3 -o outfile3 -r MNI152 -t tmat3.mat' yield assert_not_equal, results.runtime.returncode, 0 yield assert_equal, results.interface.inputs.infile, 'infile3' yield assert_equal, results.interface.inputs.outfile, 'outfile3' yield assert_equal, results.interface.inputs.refVolName, 'MNI152' yield assert_equal, results.interface.inputs.affineTmat, 'tmat3.mat' # test arguments for opt_map opt_map = { 'verbose': ('-v', True), 'helpDoc': ('-h', True), 'tensor': ('--tensor', True), 'affineTmat': ('-t Tmat', 'Tmat'), 'warpFile': ('-w wrpFile', 'wrpFile'), 'interpolation': ('--interp=sinc', 'sinc'), 'brainMask': ('-m mask', 'mask')} for name, settings in opt_map.items(): vrg4 = fsl.VecReg(infile='infile', outfile='outfile', refVolName='MNI152', **{name: settings[1]}) yield assert_equal, vrg4.cmdline, vrg4.cmd + \ ' -i infile -o outfile -r MNI152 ' + settings[0] # test find_the_biggest @skipif(skip_dti_tests) def test_Find_the_biggest(): fbg = fsl.FindTheBiggest() # make sure command gets called yield assert_equal, fbg.cmd, 'find_the_biggest' # test raising error with mandatory args absent yield assert_raises, ValueError, fbg.run # .inputs based parameters setting fbg.inputs.infiles = 'seed*' fbg.inputs.outfile = 'fbgfile' yield assert_equal, fbg.cmdline, 'find_the_biggest seed* fbgfile' fbg2 = fsl.FindTheBiggest(infiles='seed2*', outfile='fbgfile2') yield assert_equal, fbg2.cmdline, 'find_the_biggest seed2* fbgfile2' # .run based parameters setting fbg3 = fsl.FindTheBiggest() results = fbg3.run(infiles='seed3', outfile='out3') yield assert_equal, results.runtime.cmdline, 'find_the_biggest seed3 out3' # test arguments for opt_map # Find_the_biggest doesn't have an opt_map{} @skipif(no_fsl) def test_tbss_skeleton(): skeletor = fsl.TractSkeleton() files, newdir, olddir = create_files_in_directory() # Test the underlying command yield assert_equal, skeletor.cmd, "tbss_skeleton" # It shouldn't run yet yield assert_raises, ValueError, skeletor.run # Test the most basic way to use it skeletor.inputs.in_file = files[0] # First by implicit argument skeletor.inputs.skeleton_file = True yield assert_equal, skeletor.cmdline, \ "tbss_skeleton -i a.nii -o %s"%os.path.join(newdir, "a_skeleton.nii") # Now with a specific name skeletor.inputs.skeleton_file = "old_boney.nii" yield assert_equal, skeletor.cmdline, "tbss_skeleton -i a.nii -o old_boney.nii" # Now test the more complicated usage bones = fsl.TractSkeleton(in_file="a.nii", project_data=True) # This should error yield assert_raises, ValueError, bones.run # But we can set what we need bones.inputs.threshold = 0.2 bones.inputs.distance_map = "b.nii" bones.inputs.data_file = "b.nii" # Even though that's silly # Now we get a command line yield assert_equal, bones.cmdline, \ "tbss_skeleton -i a.nii -p 0.200 b.nii %s b.nii %s"%(Info.standard_image("LowerCingulum_1mm.nii.gz"), os.path.join(newdir, "b_skeletonised.nii")) # Can we specify a mask? bones.inputs.use_cingulum_mask = Undefined bones.inputs.search_mask_file = "a.nii" yield assert_equal, bones.cmdline, \ "tbss_skeleton -i a.nii -p 0.200 b.nii a.nii b.nii %s"%os.path.join(newdir, "b_skeletonised.nii") # Looks good; clean up clean_directory(newdir, olddir) @skipif(no_fsl) def test_distancemap(): mapper = fsl.DistanceMap() files, newdir, olddir = create_files_in_directory() # Test the underlying command yield assert_equal, mapper.cmd, "distancemap" # It shouldn't run yet yield assert_raises, ValueError, mapper.run # But if we do this... mapper.inputs.in_file = "a.nii" # It should yield assert_equal, mapper.cmdline, "distancemap --out=%s --in=a.nii"%os.path.join(newdir, "a_dstmap.nii") # And we should be able to write out a maxima map mapper.inputs.local_max_file = True yield assert_equal, mapper.cmdline, \ "distancemap --out=%s --in=a.nii --localmax=%s"%(os.path.join(newdir, "a_dstmap.nii"), os.path.join(newdir, "a_lclmax.nii")) # And call it whatever we want mapper.inputs.local_max_file = "max.nii" yield assert_equal, mapper.cmdline, \ "distancemap --out=%s --in=a.nii --localmax=max.nii"%os.path.join(newdir, "a_dstmap.nii") # Not much else to do here clean_directory(newdir, olddir) nipype-0.9.2/nipype/interfaces/fsl/tests/test_epi.py000066400000000000000000000034711227300005300225660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree import numpy as np import nibabel as nb from nipype.testing import ( assert_equal, assert_not_equal, assert_raises, skipif) import nipype.interfaces.fsl.epi as fsl from nipype.interfaces.fsl import no_fsl def create_files_in_directory(): outdir = os.path.realpath(mkdtemp()) cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii','b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3,3,3,4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img,np.eye(4),hdr), os.path.join(outdir,f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) # test eddy_correct @skipif(no_fsl) def test_eddy_correct2(): filelist, outdir, cwd = create_files_in_directory() eddy = fsl.EddyCorrect() # make sure command gets called yield assert_equal, eddy.cmd, 'eddy_correct' # test raising error with mandatory args absent yield assert_raises, ValueError, eddy.run # .inputs based parameters setting eddy.inputs.in_file = filelist[0] eddy.inputs.out_file = 'foo_eddc.nii' eddy.inputs.ref_num = 100 yield assert_equal, eddy.cmdline, 'eddy_correct %s foo_eddc.nii 100'%filelist[0] # .run based parameter setting eddy2 = fsl.EddyCorrect(in_file=filelist[0], out_file='foo_ec.nii', ref_num=20) yield assert_equal, eddy2.cmdline, 'eddy_correct %s foo_ec.nii 20'%filelist[0] # test arguments for opt_map # eddy_correct class doesn't have opt_map{} clean_directory(outdir, cwd) nipype-0.9.2/nipype/interfaces/fsl/tests/test_maths.py000066400000000000000000000371751227300005300231350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree import numpy as np import nibabel as nb from nipype.testing import (assert_equal, assert_raises, skipif) from nipype.interfaces.base import Undefined import nipype.interfaces.fsl.maths as fsl from nipype.interfaces.fsl import no_fsl def create_files_in_directory(): testdir = os.path.realpath(mkdtemp()) origdir = os.getcwd() os.chdir(testdir) ftype = os.environ["FSLOUTPUTTYPE"] os.environ["FSLOUTPUTTYPE"] = "NIFTI" filelist = ['a.nii','b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3,3,3,4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img,np.eye(4),hdr), os.path.join(testdir,f)) return filelist, testdir, origdir, ftype def clean_directory(testdir, origdir, ftype): if os.path.exists(testdir): rmtree(testdir) os.chdir(origdir) os.environ["FSLOUTPUTTYPE"] = ftype @skipif(no_fsl) def test_maths_base(): files, testdir, origdir, ftype = create_files_in_directory() # Get some fslmaths maths = fsl.MathsCommand() # Test that we got what we wanted yield assert_equal, maths.cmd, "fslmaths" # Test that it needs a mandatory argument yield assert_raises, ValueError, maths.run # Set an in file maths.inputs.in_file = "a.nii" # Now test the most basic command line yield assert_equal, maths.cmdline, "fslmaths a.nii %s"%os.path.join(testdir, "a_maths.nii") # Now test that we can set the various data types dtypes = ["float","char","int","short","double","input"] int_cmdline = "fslmaths -dt %s a.nii " + os.path.join(testdir, "a_maths.nii") out_cmdline = "fslmaths a.nii " + os.path.join(testdir, "a_maths.nii") + " -odt %s" duo_cmdline = "fslmaths -dt %s a.nii " + os.path.join(testdir, "a_maths.nii") + " -odt %s" for dtype in dtypes: foo = fsl.MathsCommand(in_file="a.nii",internal_datatype=dtype) yield assert_equal, foo.cmdline, int_cmdline%dtype bar = fsl.MathsCommand(in_file="a.nii",output_datatype=dtype) yield assert_equal, bar.cmdline, out_cmdline%dtype foobar = fsl.MathsCommand(in_file="a.nii",internal_datatype=dtype,output_datatype=dtype) yield assert_equal, foobar.cmdline, duo_cmdline%(dtype, dtype) # Test that we can ask for an outfile name maths.inputs.out_file = "b.nii" yield assert_equal, maths.cmdline, "fslmaths a.nii b.nii" # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_changedt(): files, testdir, origdir, ftype = create_files_in_directory() # Get some fslmaths cdt = fsl.ChangeDataType() # Test that we got what we wanted yield assert_equal, cdt.cmd, "fslmaths" # Test that it needs a mandatory argument yield assert_raises, ValueError, cdt.run # Set an in file and out file cdt.inputs.in_file = "a.nii" cdt.inputs.out_file = "b.nii" # But it still shouldn't work yield assert_raises, ValueError, cdt.run # Now test that we can set the various data types dtypes = ["float","char","int","short","double","input"] cmdline = "fslmaths a.nii b.nii -odt %s" for dtype in dtypes: foo = fsl.MathsCommand(in_file="a.nii",out_file="b.nii",output_datatype=dtype) yield assert_equal, foo.cmdline, cmdline%dtype # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_threshold(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command thresh = fsl.Threshold(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, thresh.cmd, "fslmaths" # Test mandtory args yield assert_raises, ValueError, thresh.run # Test the various opstrings cmdline = "fslmaths a.nii %s b.nii" for val in [0, 0., -1, -1.5, -0.5, 0.5, 3, 400, 400.5]: thresh.inputs.thresh = val yield assert_equal, thresh.cmdline, cmdline%"-thr %.10f"%val val = "%.10f"%42 thresh = fsl.Threshold(in_file="a.nii",out_file="b.nii",thresh=42,use_robust_range=True) yield assert_equal, thresh.cmdline, cmdline%("-thrp "+val) thresh.inputs.use_nonzero_voxels = True yield assert_equal, thresh.cmdline, cmdline%("-thrP "+val) thresh = fsl.Threshold(in_file="a.nii",out_file="b.nii",thresh=42,direction="above") yield assert_equal, thresh.cmdline, cmdline%("-uthr "+val) thresh.inputs.use_robust_range=True yield assert_equal, thresh.cmdline, cmdline%("-uthrp "+val) thresh.inputs.use_nonzero_voxels = True yield assert_equal, thresh.cmdline, cmdline%("-uthrP "+val) # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_meanimage(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command meaner = fsl.MeanImage(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, meaner.cmd, "fslmaths" # Test the defualt opstring yield assert_equal, meaner.cmdline, "fslmaths a.nii -Tmean b.nii" # Test the other dimensions cmdline = "fslmaths a.nii -%smean b.nii" for dim in ["X","Y","Z","T"]: meaner.inputs.dimension=dim yield assert_equal, meaner.cmdline, cmdline%dim # Test the auto naming meaner = fsl.MeanImage(in_file="a.nii") yield assert_equal, meaner.cmdline, "fslmaths a.nii -Tmean %s"%os.path.join(testdir, "a_mean.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_maximage(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command maxer = fsl.MaxImage(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, maxer.cmd, "fslmaths" # Test the defualt opstring yield assert_equal, maxer.cmdline, "fslmaths a.nii -Tmax b.nii" # Test the other dimensions cmdline = "fslmaths a.nii -%smax b.nii" for dim in ["X","Y","Z","T"]: maxer.inputs.dimension=dim yield assert_equal, maxer.cmdline, cmdline%dim # Test the auto naming maxer = fsl.MaxImage(in_file="a.nii") yield assert_equal, maxer.cmdline, "fslmaths a.nii -Tmax %s"%os.path.join(testdir, "a_max.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_smooth(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command smoother = fsl.IsotropicSmooth(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, smoother.cmd, "fslmaths" # Test that smoothing kernel is mandatory yield assert_raises, ValueError, smoother.run # Test smoothing kernels cmdline = "fslmaths a.nii -s %.5f b.nii" for val in [0,1.,1,25,0.5,8/3]: smoother = fsl.IsotropicSmooth(in_file="a.nii",out_file="b.nii",sigma=val) yield assert_equal, smoother.cmdline, cmdline%val smoother = fsl.IsotropicSmooth(in_file="a.nii",out_file="b.nii",fwhm=val) val = float(val)/np.sqrt(8 * np.log(2)) yield assert_equal, smoother.cmdline, cmdline%val # Test automatic naming smoother = fsl.IsotropicSmooth(in_file="a.nii", sigma=5) yield assert_equal, smoother.cmdline, "fslmaths a.nii -s %.5f %s"%(5, os.path.join(testdir, "a_smooth.nii")) # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_mask(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command masker = fsl.ApplyMask(in_file="a.nii",out_file="c.nii") # Test the underlying command yield assert_equal, masker.cmd, "fslmaths" # Test that the mask image is mandatory yield assert_raises, ValueError, masker.run # Test setting the mask image masker.inputs.mask_file = "b.nii" yield assert_equal, masker.cmdline, "fslmaths a.nii -mas b.nii c.nii" # Test auto name generation masker = fsl.ApplyMask(in_file="a.nii",mask_file="b.nii") yield assert_equal, masker.cmdline, "fslmaths a.nii -mas b.nii "+os.path.join(testdir, "a_masked.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_dilation(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command diller = fsl.DilateImage(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, diller.cmd, "fslmaths" # Test that the dilation operation is mandatory yield assert_raises, ValueError, diller.run # Test the different dilation operations for op in ["mean", "modal", "max"]: cv = dict(mean="M", modal="D", max="F") diller.inputs.operation = op yield assert_equal, diller.cmdline, "fslmaths a.nii -dil%s b.nii"%cv[op] # Now test the different kernel options for k in ["3D", "2D", "box", "boxv", "gauss", "sphere"]: for size in [1, 1.5, 5]: diller.inputs.kernel_shape = k diller.inputs.kernel_size = size yield assert_equal, diller.cmdline, "fslmaths a.nii -kernel %s %.4f -dilF b.nii"%(k, size) # Test that we can use a file kernel f = open("kernel.txt","w").close() del f # Shut pyflakes up diller.inputs.kernel_shape = "file" diller.inputs.kernel_size = Undefined diller.inputs.kernel_file = "kernel.txt" yield assert_equal, diller.cmdline, "fslmaths a.nii -kernel file kernel.txt -dilF b.nii" # Test that we don't need to request an out name dil = fsl.DilateImage(in_file="a.nii", operation="max") yield assert_equal, dil.cmdline, "fslmaths a.nii -dilF %s"%os.path.join(testdir, "a_dil.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_erosion(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command erode = fsl.ErodeImage(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, erode.cmd, "fslmaths" # Test the basic command line yield assert_equal, erode.cmdline, "fslmaths a.nii -ero b.nii" # Test that something else happens when you minimum filter erode.inputs.minimum_filter = True yield assert_equal, erode.cmdline, "fslmaths a.nii -eroF b.nii" # Test that we don't need to request an out name erode = fsl.ErodeImage(in_file="a.nii") yield assert_equal, erode.cmdline, "fslmaths a.nii -ero %s"%os.path.join(testdir, "a_ero.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_spatial_filter(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command filter = fsl.SpatialFilter(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, filter.cmd, "fslmaths" # Test that it fails without an operation yield assert_raises, ValueError, filter.run # Test the different operations for op in ["mean", "meanu", "median"]: filter.inputs.operation = op yield assert_equal, filter.cmdline, "fslmaths a.nii -f%s b.nii"%op # Test that we don't need to ask for an out name filter = fsl.SpatialFilter(in_file="a.nii", operation="mean") yield assert_equal, filter.cmdline, "fslmaths a.nii -fmean %s"%os.path.join(testdir, "a_filt.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_unarymaths(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command maths = fsl.UnaryMaths(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, maths.cmd, "fslmaths" # Test that it fails without an operation yield assert_raises, ValueError, maths.run # Test the different operations ops = ["exp", "log", "sin", "cos", "sqr", "sqrt", "recip", "abs", "bin", "index"] for op in ops: maths.inputs.operation = op yield assert_equal, maths.cmdline, "fslmaths a.nii -%s b.nii"%op # Test that we don't need to ask for an out file for op in ops: maths = fsl.UnaryMaths(in_file="a.nii", operation=op) yield assert_equal, maths.cmdline, "fslmaths a.nii -%s %s"%(op, os.path.join(testdir, "a_%s.nii"%op)) # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_binarymaths(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command maths = fsl.BinaryMaths(in_file="a.nii",out_file="c.nii") # Test the underlying command yield assert_equal, maths.cmd, "fslmaths" # Test that it fails without an operation an yield assert_raises, ValueError, maths.run # Test the different operations ops = ["add", "sub", "mul", "div", "rem", "min", "max"] operands = ["b.nii", -2, -0.5, 0, .123456, np.pi, 500] for op in ops: for ent in operands: maths = fsl.BinaryMaths(in_file="a.nii", out_file="c.nii", operation = op) if ent == "b.nii": maths.inputs.operand_file = ent yield assert_equal, maths.cmdline, "fslmaths a.nii -%s b.nii c.nii"%op else: maths.inputs.operand_value = ent yield assert_equal, maths.cmdline, "fslmaths a.nii -%s %.8f c.nii"%(op, ent) # Test that we don't need to ask for an out file for op in ops: maths = fsl.BinaryMaths(in_file="a.nii", operation=op, operand_file="b.nii") yield assert_equal, maths.cmdline, "fslmaths a.nii -%s b.nii %s"%(op,os.path.join(testdir,"a_maths.nii")) # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_multimaths(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command maths = fsl.MultiImageMaths(in_file="a.nii",out_file="c.nii") # Test the underlying command yield assert_equal, maths.cmd, "fslmaths" # Test that it fails without an operation an yield assert_raises, ValueError, maths.run # Test a few operations maths.inputs.operand_files = ["a.nii", "b.nii"] opstrings = ["-add %s -div %s", "-max 1 -sub %s -min %s", "-mas %s -add %s"] for ostr in opstrings: maths.inputs.op_string = ostr yield assert_equal, maths.cmdline, "fslmaths a.nii %s c.nii"%ostr%("a.nii", "b.nii") # Test that we don't need to ask for an out file maths = fsl.MultiImageMaths(in_file="a.nii", op_string="-add %s -mul 5", operand_files=["b.nii"]) yield assert_equal, maths.cmdline, \ "fslmaths a.nii -add b.nii -mul 5 %s"%os.path.join(testdir,"a_maths.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) @skipif(no_fsl) def test_tempfilt(): files, testdir, origdir, ftype = create_files_in_directory() # Get the command filt = fsl.TemporalFilter(in_file="a.nii",out_file="b.nii") # Test the underlying command yield assert_equal, filt.cmd, "fslmaths" # Test that both filters are initialized off yield assert_equal, filt.cmdline, "fslmaths a.nii -bptf -1.000000 -1.000000 b.nii" # Test some filters windows = [(-1, -1), (0.1, 0.1), (-1, 20), (20, -1), (128, 248)] for win in windows: filt.inputs.highpass_sigma = win[0] filt.inputs.lowpass_sigma = win[1] yield assert_equal, filt.cmdline, "fslmaths a.nii -bptf %.6f %.6f b.nii"%win # Test that we don't need to ask for an out file filt = fsl.TemporalFilter(in_file="a.nii", highpass_sigma = 64) yield assert_equal, filt.cmdline, \ "fslmaths a.nii -bptf 64.000000 -1.000000 %s"%os.path.join(testdir,"a_filt.nii") # Clean up our mess clean_directory(testdir, origdir, ftype) nipype-0.9.2/nipype/interfaces/fsl/tests/test_model.py000066400000000000000000000053151227300005300231100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import tempfile import shutil from nipype.testing import (assert_equal, assert_true, skipif) import nipype.interfaces.fsl.model as fsl from nipype.interfaces.fsl import Info from nipype.interfaces.fsl import no_fsl tmp_infile = None tmp_dir = None cwd = None @skipif(no_fsl) def setup_infile(): global tmp_infile, tmp_dir, cwd cwd = os.getcwd() ext = Info.output_type_to_ext(Info.output_type()) tmp_dir = tempfile.mkdtemp() tmp_infile = os.path.join(tmp_dir, 'foo' + ext) file(tmp_infile, 'w') os.chdir(tmp_dir) return tmp_infile, tmp_dir def teardown_infile(tmp_dir): os.chdir(cwd) shutil.rmtree(tmp_dir) @skipif(no_fsl) def test_MultipleRegressDesign(): _, tp_dir = setup_infile() foo = fsl.MultipleRegressDesign() foo.inputs.regressors = dict(voice_stenght=[1,1,1],age=[0.2,0.4,0.5],BMI=[1,-1,2]) con1 = ['voice_and_age','T',['age','voice_stenght'],[0.5,0.5]] con2 = ['just_BMI','T',['BMI'],[1]] foo.inputs.contrasts = [con1,con2,['con3','F',[con1,con2]]] res = foo.run() yield assert_equal, res.outputs.design_mat, os.path.join(os.getcwd(),'design.mat') yield assert_equal, res.outputs.design_con, os.path.join(os.getcwd(),'design.con') yield assert_equal, res.outputs.design_fts, os.path.join(os.getcwd(),'design.fts') yield assert_equal, res.outputs.design_grp, os.path.join(os.getcwd(),'design.grp') design_mat_expected_content = """/NumWaves 3 /NumPoints 3 /PPheights 3.000000e+00 5.000000e-01 1.000000e+00 /Matrix 1.000000e+00 2.000000e-01 1.000000e+00 -1.000000e+00 4.000000e-01 1.000000e+00 2.000000e+00 5.000000e-01 1.000000e+00 """ design_con_expected_content = """/ContrastName1 voice_and_age /ContrastName2 just_BMI /NumWaves 3 /NumContrasts 2 /PPheights 1.000000e+00 1.000000e+00 /RequiredEffect 100.000 100.000 /Matrix 0.000000e+00 5.000000e-01 5.000000e-01 1.000000e+00 0.000000e+00 0.000000e+00 """ design_fts_expected_content = """/NumWaves 2 /NumContrasts 1 /Matrix 1 1 """ design_grp_expected_content = """/NumWaves 1 /NumPoints 3 /Matrix 1 1 1 """ yield assert_equal, open(os.path.join(os.getcwd(),'design.con'), 'r').read(), design_con_expected_content yield assert_equal, open(os.path.join(os.getcwd(),'design.mat'), 'r').read(), design_mat_expected_content yield assert_equal, open(os.path.join(os.getcwd(),'design.fts'), 'r').read(), design_fts_expected_content yield assert_equal, open(os.path.join(os.getcwd(),'design.grp'), 'r').read(), design_grp_expected_content teardown_infile(tp_dir) nipype-0.9.2/nipype/interfaces/fsl/tests/test_preprocess.py000066400000000000000000000453501227300005300242000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import tempfile import shutil from nipype.testing import (assert_equal, assert_not_equal, assert_raises, skipif) from nipype.utils.filemanip import split_filename import nipype.interfaces.fsl.preprocess as fsl from nipype.interfaces.fsl import Info from nipype.interfaces.base import File, TraitError, Undefined from nipype.interfaces.fsl import no_fsl @skipif(no_fsl) def fsl_name(obj, fname): """Create valid fsl name, including file extension for output type. """ ext = Info.output_type_to_ext(obj.inputs.output_type) return fname + ext tmp_infile = None tmp_dir = None @skipif(no_fsl) def setup_infile(): global tmp_infile, tmp_dir ext = Info.output_type_to_ext(Info.output_type()) tmp_dir = tempfile.mkdtemp() tmp_infile = os.path.join(tmp_dir, 'foo' + ext) file(tmp_infile, 'w') return tmp_infile, tmp_dir def teardown_infile(tmp_dir): shutil.rmtree(tmp_dir) # test BET #@with_setup(setup_infile, teardown_infile) #broken in nose with generators @skipif(no_fsl) def test_bet(): tmp_infile, tp_dir = setup_infile() better = fsl.BET() yield assert_equal, better.cmd, 'bet' # Test raising error with mandatory args absent yield assert_raises, ValueError, better.run # Test generated outfile name better.inputs.in_file = tmp_infile outfile = fsl_name(better, 'foo_brain') outpath = os.path.join(os.getcwd(), outfile) realcmd = 'bet %s %s' % (tmp_infile, outpath) yield assert_equal, better.cmdline, realcmd # Test specified outfile name outfile = fsl_name(better, '/newdata/bar') better.inputs.out_file = outfile realcmd = 'bet %s %s' % (tmp_infile, outfile) yield assert_equal, better.cmdline, realcmd # infile foo.nii doesn't exist def func(): better.run(in_file='foo2.nii', out_file='bar.nii') yield assert_raises, TraitError, func # Our options and some test values for them # Should parallel the opt_map structure in the class for clarity opt_map = { 'outline': ('-o', True), 'mask': ('-m', True), 'skull': ('-s', True), 'no_output': ('-n', True), 'frac': ('-f 0.40', 0.4), 'vertical_gradient': ('-g 0.75', 0.75), 'radius': ('-r 20', 20), 'center': ('-c 54 75 80', [54, 75, 80]), 'threshold': ('-t', True), 'mesh': ('-e', True), 'surfaces': ('-A', True) #'verbose': ('-v', True), #'flags': ('--i-made-this-up', '--i-made-this-up'), } # Currently we don't test -R, -S, -B, -Z, -F, -A or -A2 # test each of our arguments better = fsl.BET() outfile = fsl_name(better, 'foo_brain') outpath = os.path.join(os.getcwd(), outfile) for name, settings in opt_map.items(): better = fsl.BET(**{name: settings[1]}) # Add mandatory input better.inputs.in_file = tmp_infile realcmd = ' '.join([better.cmd, tmp_infile, outpath, settings[0]]) yield assert_equal, better.cmdline, realcmd teardown_infile(tmp_dir) # test fast @skipif(no_fsl) def test_fast(): tmp_infile, tp_dir = setup_infile() faster = fsl.FAST() faster.inputs.verbose = True fasted = fsl.FAST(in_files=tmp_infile, verbose = True) fasted2 = fsl.FAST(in_files=[tmp_infile, tmp_infile], verbose = True) yield assert_equal, faster.cmd, 'fast' yield assert_equal, faster.inputs.verbose, True yield assert_equal, faster.inputs.manual_seg , Undefined yield assert_not_equal, faster.inputs, fasted.inputs yield assert_equal, fasted.cmdline, 'fast -v -S 1 %s'%(tmp_infile) yield assert_equal, fasted2.cmdline, 'fast -v -S 2 %s %s'%(tmp_infile, tmp_infile) faster = fsl.FAST() faster.inputs.in_files = tmp_infile yield assert_equal, faster.cmdline, 'fast -S 1 %s'%(tmp_infile) faster.inputs.in_files = [tmp_infile, tmp_infile] yield assert_equal, faster.cmdline, 'fast -S 2 %s %s'%(tmp_infile, tmp_infile) # Our options and some test values for them # Should parallel the opt_map structure in the class for clarity opt_map = {'number_classes': ('-n 4', 4), 'bias_iters': ('-I 5', 5), 'bias_lowpass': ('-l 15', 15), 'img_type': ('-t 2', 2), 'init_seg_smooth': ('-f 0.035', 0.035), 'segments': ('-g', True), 'init_transform': ('-a %s'%(tmp_infile), '%s'%(tmp_infile)), 'other_priors': ('-A %s %s %s'%(tmp_infile, tmp_infile, tmp_infile), (['%s'%(tmp_infile), '%s'%(tmp_infile), '%s'%(tmp_infile)])), 'no_pve': ('--nopve', True), 'output_biasfield': ('-b', True), 'output_biascorrected': ('-B', True), 'no_bias': ('-N', True), 'out_basename': ('-o fasted', 'fasted'), 'use_priors': ('-P', True), 'segment_iters': ('-W 14', 14), 'mixel_smooth': ('-R 0.25', 0.25), 'iters_afterbias': ('-O 3', 3), 'hyper': ('-H 0.15', 0.15), 'verbose': ('-v', True), 'manual_seg': ('-s %s'%(tmp_infile), '%s'%(tmp_infile)), 'probability_maps': ('-p', True), } # test each of our arguments for name, settings in opt_map.items(): faster = fsl.FAST(in_files=tmp_infile, **{name: settings[1]}) yield assert_equal, faster.cmdline, ' '.join([faster.cmd, settings[0], "-S 1 %s"%tmp_infile]) teardown_infile(tmp_dir) @skipif(no_fsl) def setup_flirt(): ext = Info.output_type_to_ext(Info.output_type()) tmpdir = tempfile.mkdtemp() _, infile = tempfile.mkstemp(suffix = ext, dir = tmpdir) _, reffile = tempfile.mkstemp(suffix = ext, dir = tmpdir) return tmpdir, infile, reffile def teardown_flirt(tmpdir): shutil.rmtree(tmpdir) @skipif(no_fsl) def test_flirt(): # setup tmpdir, infile, reffile = setup_flirt() flirter = fsl.FLIRT() yield assert_equal, flirter.cmd, 'flirt' flirter.inputs.bins = 256 flirter.inputs.cost = 'mutualinfo' flirted = fsl.FLIRT(in_file=infile, reference=reffile, out_file='outfile', out_matrix_file='outmat.mat', bins = 256, cost = 'mutualinfo') flirt_est = fsl.FLIRT(in_file=infile, reference=reffile, out_matrix_file='outmat.mat', bins = 256, cost = 'mutualinfo') yield assert_not_equal, flirter.inputs, flirted.inputs yield assert_not_equal, flirted.inputs, flirt_est.inputs yield assert_equal, flirter.inputs.bins, flirted.inputs.bins yield assert_equal, flirter.inputs.cost, flirt_est.inputs.cost realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \ '-bins 256 -cost mutualinfo' % (infile, reffile) yield assert_equal, flirted.cmdline, realcmd flirter = fsl.FLIRT() # infile not specified yield assert_raises, ValueError, flirter.run flirter.inputs.in_file = infile # reference not specified yield assert_raises, ValueError, flirter.run flirter.inputs.reference = reffile # Generate outfile and outmatrix pth, fname, ext = split_filename(infile) outfile = fsl_name(flirter, '%s_flirt' %fname) outmat = '%s_flirt.mat' % fname realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile, outfile, outmat) yield assert_equal, flirter.cmdline, realcmd _, tmpfile = tempfile.mkstemp(suffix = '.nii', dir = tmpdir) # Loop over all inputs, set a reasonable value and make sure the # cmdline is updated correctly. for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()): # Skip mandatory inputs and the trait methods if key in ('trait_added', 'trait_modified', 'in_file', 'reference', 'environ', 'output_type', 'out_file', 'out_matrix_file', 'in_matrix_file', 'apply_xfm', 'ignore_exception', 'terminal_output', 'out_log', 'save_log'): continue param = None value = None if key == 'args': param = '-v' value = '-v' elif isinstance(trait_spec.trait_type, File): value = tmpfile param = trait_spec.argstr % value elif trait_spec.default is False: param = trait_spec.argstr value = True elif key in ('searchr_x', 'searchr_y', 'searchr_z'): value = [-45, 45] param = trait_spec.argstr % ' '.join(str(elt) for elt in value) else: value = trait_spec.default param = trait_spec.argstr % value cmdline = 'flirt -in %s -ref %s' % (infile, reffile) # Handle autogeneration of outfile pth, fname, ext = split_filename(infile) outfile = fsl_name(fsl.FLIRT(),'%s_flirt' % fname) outfile = ' '.join(['-out', outfile]) # Handle autogeneration of outmatrix outmatrix = '%s_flirt.mat' % fname outmatrix = ' '.join(['-omat', outmatrix]) # Build command line cmdline = ' '.join([cmdline, outfile, outmatrix, param]) flirter = fsl.FLIRT(in_file = infile, reference = reffile) setattr(flirter.inputs, key, value) yield assert_equal, flirter.cmdline, cmdline # Test OutputSpec flirter = fsl.FLIRT(in_file = infile, reference = reffile) pth, fname, ext = split_filename(infile) flirter.inputs.out_file = ''.join(['foo', ext]) flirter.inputs.out_matrix_file = ''.join(['bar', ext]) outs = flirter._list_outputs() yield assert_equal, outs['out_file'], \ os.path.join(os.getcwd(), flirter.inputs.out_file) yield assert_equal, outs['out_matrix_file'], \ os.path.join(os.getcwd(), flirter.inputs.out_matrix_file) teardown_flirt(tmpdir) # Mcflirt @skipif(no_fsl) def test_mcflirt(): tmpdir, infile, reffile = setup_flirt() frt = fsl.MCFLIRT() yield assert_equal, frt.cmd, 'mcflirt' # Test generated outfile name frt.inputs.in_file = infile _, nme = os.path.split(infile) outfile = os.path.join(os.getcwd(), nme) outfile = frt._gen_fname(outfile, suffix = '_mcf') realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile yield assert_equal, frt.cmdline, realcmd # Test specified outfile name outfile2 = '/newdata/bar.nii' frt.inputs.out_file = outfile2 realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile2 yield assert_equal, frt.cmdline, realcmd opt_map = { 'cost': ('-cost mutualinfo', 'mutualinfo'), 'bins': ('-bins 256', 256), 'dof': ('-dof 6', 6), 'ref_vol': ('-refvol 2', 2), 'scaling': ('-scaling 6.00', 6.00), 'smooth': ('-smooth 1.00', 1.00), 'rotation': ('-rotation 2', 2), 'stages': ('-stages 3', 3), 'init': ('-init %s'%(infile), infile), 'use_gradient': ('-gdt', True), 'use_contour': ('-edge', True), 'mean_vol': ('-meanvol', True), 'stats_imgs': ('-stats', True), 'save_mats': ('-mats', True), 'save_plots': ('-plots', True), } for name, settings in opt_map.items(): fnt = fsl.MCFLIRT(in_file = infile, **{name : settings[1]}) instr = '-in %s'%(infile) outstr = '-out %s'%(outfile) if name in ('init', 'cost', 'dof','mean_vol','bins'): yield assert_equal, fnt.cmdline, ' '.join([fnt.cmd, instr, settings[0], outstr]) else: yield assert_equal, fnt.cmdline, ' '.join([fnt.cmd, instr, outstr, settings[0]]) # Test error is raised when missing required args fnt = fsl.MCFLIRT() yield assert_raises, ValueError, fnt.run teardown_flirt(tmpdir) #test fnirt @skipif(no_fsl) def test_fnirt(): tmpdir, infile, reffile = setup_flirt() fnirt = fsl.FNIRT() yield assert_equal, fnirt.cmd, 'fnirt' # Test list parameters params = [('subsampling_scheme', '--subsamp', [4,2,2,1],'4,2,2,1'), ('max_nonlin_iter', '--miter', [4,4,4,2],'4,4,4,2'), ('ref_fwhm', '--reffwhm', [4,2,2,0],'4,2,2,0'), ('in_fwhm', '--infwhm', [4,2,2,0],'4,2,2,0'), ('apply_refmask', '--applyrefmask', [0,0,1,1],'0,0,1,1'), ('apply_inmask', '--applyinmask', [0,0,0,1],'0,0,0,1'), ('regularization_lambda', '--lambda', [0.5,0.75],'0.5,0.75')] for item, flag, val, strval in params: fnirt = fsl.FNIRT(in_file = infile, ref_file = reffile, **{item : val}) log = fnirt._gen_fname(infile, suffix='_log.txt', change_ext=False) iout = fnirt._gen_fname(infile, suffix='_warped') if item in ('max_nonlin_iter'): cmd = 'fnirt --in=%s '\ '--logout=%s'\ ' %s=%s --ref=%s'\ ' --iout=%s' % (infile, log, flag, strval, reffile, iout) elif item in ('in_fwhm'): cmd = 'fnirt --in=%s %s=%s --logout=%s '\ '--ref=%s --iout=%s' % (infile, flag, strval, log, reffile, iout) elif item.startswith('apply'): cmd = 'fnirt %s=%s '\ '--in=%s '\ '--logout=%s '\ '--ref=%s --iout=%s' % (flag,strval, infile, log, reffile, iout) else: cmd = 'fnirt '\ '--in=%s --logout=%s '\ '--ref=%s %s=%s --iout=%s' % (infile, log, reffile, flag, strval, iout) yield assert_equal, fnirt.cmdline, cmd # Test ValueError is raised when missing mandatory args fnirt = fsl.FNIRT() yield assert_raises, ValueError, fnirt.run fnirt.inputs.in_file = infile fnirt.inputs.ref_file = reffile # test files opt_map = { 'affine_file': ('--aff='), 'inwarp_file': ('--inwarp='), 'in_intensitymap_file': ('--intin='), 'config_file': ('--config='), 'refmask_file': ('--refmask='), 'inmask_file': ('--inmask='), 'field_file': ('--fout='), 'jacobian_file': ('--jout='), 'modulatedref_file': ('--refout='), 'out_intensitymap_file':('--intout='), 'log_file': ('--logout=')} for name, settings in opt_map.items(): fnirt = fsl.FNIRT(in_file = infile, ref_file = reffile, **{name : infile}) if name in ('config_file', 'affine_file','field_file'): cmd = 'fnirt %s%s --in=%s '\ '--logout=%s '\ '--ref=%s --iout=%s' % (settings, infile, infile, log, reffile, iout) elif name in ('refmask_file'): cmd = 'fnirt --in=%s '\ '--logout=%s --ref=%s '\ '%s%s '\ '--iout=%s' % (infile, log, reffile, settings,infile, iout) elif name in ('in_intensitymap_file', 'inwarp_file', 'inmask_file', 'jacobian_file'): cmd = 'fnirt --in=%s '\ '%s%s '\ '--logout=%s --ref=%s '\ '--iout=%s' % (infile, settings,infile, log, reffile, iout) elif name in ('log_file'): cmd = 'fnirt --in=%s '\ '%s%s --ref=%s '\ '--iout=%s' % (infile, settings,infile, reffile, iout) else: cmd = 'fnirt --in=%s '\ '--logout=%s %s%s '\ '--ref=%s --iout=%s' % (infile,log, settings, infile, reffile,iout) yield assert_equal, fnirt.cmdline, cmd teardown_flirt(tmpdir) @skipif(no_fsl) def test_applywarp(): tmpdir, infile, reffile = setup_flirt() opt_map = { 'out_file': ('--out=bar.nii', 'bar.nii'), 'premat': ('--premat=%s'%(reffile), reffile), 'postmat': ('--postmat=%s'%(reffile), reffile), } # in_file, ref_file, field_file mandatory for name, settings in opt_map.items(): awarp = fsl.ApplyWarp(in_file = infile, ref_file = reffile, field_file = reffile, **{name : settings[1]}) if name == 'out_file': realcmd = 'applywarp --warp=%s '\ '--in=%s --out=%s '\ '--ref=%s'%(reffile, infile, settings[1],reffile) else: outfile = awarp._gen_fname(infile, suffix='_warp') realcmd = 'applywarp --warp=%s '\ '--in=%s --out=%s '\ '%s --ref=%s'%(reffile, infile, outfile, settings[0], reffile) yield assert_equal, awarp.cmdline, realcmd awarp = fsl.ApplyWarp(in_file = infile, ref_file = reffile, field_file = reffile) teardown_flirt(tmpdir) nipype-0.9.2/nipype/interfaces/fsl/tests/test_utils.py000066400000000000000000000247151227300005300231550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree import numpy as np import nibabel as nb from nipype.testing import (assert_equal, assert_not_equal, assert_raises, skipif) import nipype.interfaces.fsl.utils as fsl from nipype.interfaces.fsl import no_fsl def create_files_in_directory(): outdir = mkdtemp() cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii', 'b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3, 3, 3, 4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img, np.eye(4), hdr), os.path.join(outdir, f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) @skipif(no_fsl) def test_fslroi(): filelist, outdir, cwd = create_files_in_directory() roi = fsl.ExtractROI() # make sure command gets called yield assert_equal, roi.cmd, 'fslroi' # test raising error with mandatory args absent yield assert_raises, ValueError, roi.run # .inputs based parameters setting roi.inputs.in_file = filelist[0] roi.inputs.roi_file = 'foo_roi.nii' roi.inputs.t_min = 10 roi.inputs.t_size = 20 yield assert_equal, roi.cmdline, 'fslroi %s foo_roi.nii 10 20' % filelist[0] # .run based parameter setting roi2 = fsl.ExtractROI(in_file=filelist[0], roi_file='foo2_roi.nii', t_min=20, t_size=40, x_min=3, x_size=30, y_min=40, y_size=10, z_min=5, z_size=20) yield assert_equal, roi2.cmdline, \ 'fslroi %s foo2_roi.nii 3 30 40 10 5 20 20 40' % filelist[0] clean_directory(outdir, cwd) # test arguments for opt_map # Fslroi class doesn't have a filled opt_map{} @skipif(no_fsl) def test_fslmerge(): filelist, outdir, cwd = create_files_in_directory() merger = fsl.Merge() # make sure command gets called yield assert_equal, merger.cmd, 'fslmerge' # test raising error with mandatory args absent yield assert_raises, ValueError, merger.run # .inputs based parameters setting merger.inputs.in_files = filelist merger.inputs.merged_file = 'foo_merged.nii' merger.inputs.dimension = 't' merger.inputs.output_type = 'NIFTI' yield assert_equal, merger.cmdline, 'fslmerge -t foo_merged.nii %s' % ' '.join(filelist) # verify that providing a tr value updates the dimension to tr merger.inputs.tr = 2.25 yield assert_equal, merger.cmdline, 'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25) # .run based parameter setting merger2 = fsl.Merge(in_files=filelist, merged_file='foo_merged.nii', dimension='t', output_type='NIFTI', tr=2.25) yield assert_equal, merger2.cmdline, \ 'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25) clean_directory(outdir, cwd) # test arguments for opt_map # Fslmerge class doesn't have a filled opt_map{} # test fslmath @skipif(no_fsl) def test_fslmaths(): filelist, outdir, cwd = create_files_in_directory() math = fsl.ImageMaths() # make sure command gets called yield assert_equal, math.cmd, 'fslmaths' # test raising error with mandatory args absent yield assert_raises, ValueError, math.run # .inputs based parameters setting math.inputs.in_file = filelist[0] math.inputs.op_string = '-add 2.5 -mul input_volume2' math.inputs.out_file = 'foo_math.nii' yield assert_equal, math.cmdline, \ 'fslmaths %s -add 2.5 -mul input_volume2 foo_math.nii' % filelist[0] # .run based parameter setting math2 = fsl.ImageMaths(in_file=filelist[0], op_string='-add 2.5', out_file='foo2_math.nii') yield assert_equal, math2.cmdline, 'fslmaths %s -add 2.5 foo2_math.nii' % filelist[0] # test arguments for opt_map # Fslmath class doesn't have opt_map{} clean_directory(outdir, cwd) # test overlay @skipif(no_fsl) def test_overlay(): filelist, outdir, cwd = create_files_in_directory() overlay = fsl.Overlay() # make sure command gets called yield assert_equal, overlay.cmd, 'overlay' # test raising error with mandatory args absent yield assert_raises, ValueError, overlay.run # .inputs based parameters setting overlay.inputs.stat_image = filelist[0] overlay.inputs.stat_thresh = (2.5, 10) overlay.inputs.background_image = filelist[1] overlay.inputs.auto_thresh_bg = True overlay.inputs.show_negative_stats = True overlay.inputs.out_file = 'foo_overlay.nii' yield assert_equal, overlay.cmdline, \ 'overlay 1 0 %s -a %s 2.50 10.00 %s -2.50 -10.00 foo_overlay.nii' % ( filelist[1], filelist[0], filelist[0]) # .run based parameter setting overlay2 = fsl.Overlay(stat_image=filelist[0], stat_thresh=(2.5, 10), background_image=filelist[1], auto_thresh_bg=True, out_file='foo2_overlay.nii') yield assert_equal, overlay2.cmdline, 'overlay 1 0 %s -a %s 2.50 10.00 foo2_overlay.nii' % ( filelist[1], filelist[0]) clean_directory(outdir, cwd) # test slicer @skipif(no_fsl) def test_slicer(): filelist, outdir, cwd = create_files_in_directory() slicer = fsl.Slicer() # make sure command gets called yield assert_equal, slicer.cmd, 'slicer' # test raising error with mandatory args absent yield assert_raises, ValueError, slicer.run # .inputs based parameters setting slicer.inputs.in_file = filelist[0] slicer.inputs.image_edges = filelist[1] slicer.inputs.intensity_range = (10., 20.) slicer.inputs.all_axial = True slicer.inputs.image_width = 750 slicer.inputs.out_file = 'foo_bar.png' yield assert_equal, slicer.cmdline, \ 'slicer %s %s -L -i 10.000 20.000 -A 750 foo_bar.png' % ( filelist[0], filelist[1]) # .run based parameter setting slicer2 = fsl.Slicer( in_file=filelist[0], middle_slices=True, label_slices=False, out_file='foo_bar2.png') yield assert_equal, slicer2.cmdline, 'slicer %s -a foo_bar2.png' % (filelist[0]) clean_directory(outdir, cwd) def create_parfiles(): np.savetxt('a.par', np.random.rand(6, 3)) np.savetxt('b.par', np.random.rand(6, 3)) return ['a.par', 'b.par'] # test fsl_tsplot @skipif(no_fsl) def test_plottimeseries(): filelist, outdir, cwd = create_files_in_directory() parfiles = create_parfiles() plotter = fsl.PlotTimeSeries() # make sure command gets called yield assert_equal, plotter.cmd, 'fsl_tsplot' # test raising error with mandatory args absent yield assert_raises, ValueError, plotter.run # .inputs based parameters setting plotter.inputs.in_file = parfiles[0] plotter.inputs.labels = ['x', 'y', 'z'] plotter.inputs.y_range = (0, 1) plotter.inputs.title = 'test plot' plotter.inputs.out_file = 'foo.png' yield assert_equal, plotter.cmdline, \ ('fsl_tsplot -i %s -a x,y,z -o foo.png -t \'test plot\' -u 1 --ymin=0 --ymax=1' % parfiles[0]) # .run based parameter setting plotter2 = fsl.PlotTimeSeries( in_file=parfiles, title='test2 plot', plot_range=(2, 5), out_file='bar.png') yield assert_equal, plotter2.cmdline, \ 'fsl_tsplot -i %s,%s -o bar.png --start=2 --finish=5 -t \'test2 plot\' -u 1' % tuple( parfiles) clean_directory(outdir, cwd) @skipif(no_fsl) def test_plotmotionparams(): filelist, outdir, cwd = create_files_in_directory() parfiles = create_parfiles() plotter = fsl.PlotMotionParams() # make sure command gets called yield assert_equal, plotter.cmd, 'fsl_tsplot' # test raising error with mandatory args absent yield assert_raises, ValueError, plotter.run # .inputs based parameters setting plotter.inputs.in_file = parfiles[0] plotter.inputs.in_source = 'fsl' plotter.inputs.plot_type = 'rotations' plotter.inputs.out_file = 'foo.png' yield assert_equal, plotter.cmdline, \ ('fsl_tsplot -i %s -o foo.png -t \'MCFLIRT estimated rotations (radians)\' ' '--start=1 --finish=3 -a x,y,z' % parfiles[0]) # .run based parameter setting plotter2 = fsl.PlotMotionParams( in_file=parfiles[1], in_source='spm', plot_type='translations', out_file='bar.png') yield assert_equal, plotter2.cmdline, \ ('fsl_tsplot -i %s -o bar.png -t \'Realign estimated translations (mm)\' ' '--start=1 --finish=3 -a x,y,z' % parfiles[1]) clean_directory(outdir, cwd) @skipif(no_fsl) def test_convertxfm(): filelist, outdir, cwd = create_files_in_directory() cvt = fsl.ConvertXFM() # make sure command gets called yield assert_equal, cvt.cmd, "convert_xfm" # test raising error with mandatory args absent yield assert_raises, ValueError, cvt.run # .inputs based parameters setting cvt.inputs.in_file = filelist[0] cvt.inputs.invert_xfm = True cvt.inputs.out_file = "foo.mat" yield assert_equal, cvt.cmdline, 'convert_xfm -omat foo.mat -inverse %s' % filelist[0] # constructor based parameter setting cvt2 = fsl.ConvertXFM( in_file=filelist[0], in_file2=filelist[1], concat_xfm=True, out_file="bar.mat") yield assert_equal, cvt2.cmdline, \ "convert_xfm -omat bar.mat -concat %s %s" % (filelist[1], filelist[0]) clean_directory(outdir, cwd) @skipif(no_fsl) def test_swapdims(): files, testdir, origdir = create_files_in_directory() swap = fsl.SwapDimensions() # Test the underlying command yield assert_equal, swap.cmd, "fslswapdim" # Test mandatory args args = [dict(in_file=files[0]), dict(new_dims=("x", "y", "z"))] for arg in args: wontrun = fsl.SwapDimensions(**arg) yield assert_raises, ValueError, wontrun.run # Now test a basic command line swap.inputs.in_file = files[0] swap.inputs.new_dims = ("x", "y", "z") yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z %s" % os.path.realpath(os.path.join(testdir, "a_newdims.nii")) # Test that we can set an output name swap.inputs.out_file = "b.nii" yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z b.nii" # Clean up clean_directory(testdir, origdir) nipype-0.9.2/nipype/interfaces/fsl/utils.py000066400000000000000000001641651227300005300207600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This was written to work with FSL version 4.1.4. Examples -------- See the docstrings of the individual classes for examples. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os from glob import glob import warnings import numpy as np from .base import FSLCommand, FSLCommandInputSpec, Info from ..base import (traits, TraitedSpec, OutputMultiPath, File, isdefined) from ...utils.filemanip import (load_json, save_json, split_filename, fname_presuffix) warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class ImageMeantsInputSpec(FSLCommandInputSpec): in_file = File(exists=True, desc='input file for computing the average timeseries', argstr='-i %s', position=0, mandatory=True) out_file = File(desc='name of output text matrix', argstr='-o %s', genfile=True, hash_files=False) mask = File(exists=True, desc='input 3D mask', argstr='-m %s') spatial_coord = traits.List(traits.Int, desc=(' requested spatial coordinate ' '(instead of mask)'), argstr='-c %s') use_mm = traits.Bool(desc=('use mm instead of voxel coordinates (for -c ' 'option)'), argstr='--usemm') show_all = traits.Bool(desc=('show all voxel time series (within mask) ' 'instead of averaging'), argstr='--showall') eig = traits.Bool(desc=('calculate Eigenvariate(s) instead of mean (output ' 'will have 0 mean)'), argstr='--eig') order = traits.Int(1, desc='select number of Eigenvariates', argstr='--order=%d', usedefault=True) nobin = traits.Bool(desc=('do not binarise the mask for calculation of ' 'Eigenvariates'), argstr='--no_bin') transpose = traits.Bool(desc=('output results in transpose format (one row ' 'per voxel/mean)'), argstr='--transpose') class ImageMeantsOutputSpec(TraitedSpec): out_file = File(exists=True, desc="path/name of output text matrix") class ImageMeants(FSLCommand): """ Use fslmeants for printing the average timeseries (intensities) to the screen (or saves to a file). The average is taken over all voxels in the mask (or all voxels in the image if no mask is specified) """ _cmd = 'fslmeants' input_spec = ImageMeantsInputSpec output_spec = ImageMeantsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']): outputs['out_file'] = self._gen_fname(self.inputs.in_file, suffix='_ts', ext='.txt', change_ext=True) outputs['out_file'] = os.path.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] return None class SmoothInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr="%s", position=0, mandatory=True) fwhm = traits.Float(argstr="-kernel gauss %f -fmean", position=1, mandatory=True) smoothed_file = File( argstr="%s", position=2, genfile=True, hash_files=False) class SmoothOutputSpec(TraitedSpec): smoothed_file = File(exists=True) class Smooth(FSLCommand): '''Use fslmaths to smooth the image ''' input_spec = SmoothInputSpec output_spec = SmoothOutputSpec _cmd = 'fslmaths' def _gen_filename(self, name): if name == 'smoothed_file': return self._list_outputs()['smoothed_file'] return None def _list_outputs(self): outputs = self._outputs().get() outputs['smoothed_file'] = self.inputs.smoothed_file if not isdefined(outputs['smoothed_file']): outputs['smoothed_file'] = self._gen_fname(self.inputs.in_file, suffix='_smooth') outputs['smoothed_file'] = os.path.abspath(outputs['smoothed_file']) return outputs def _format_arg(self, name, trait_spec, value): if name == 'fwhm': sigma = float(value) / np.sqrt(8 * np.log(2)) return super(Smooth, self)._format_arg(name, trait_spec, sigma) return super(Smooth, self)._format_arg(name, trait_spec, value) class MergeInputSpec(FSLCommandInputSpec): in_files = traits.List(File(exists=True), argstr="%s", position=2, mandatory=True) dimension = traits.Enum('t', 'x', 'y', 'z', 'a', argstr="-%s", position=0, desc=("dimension along which to merge, optionally " "set tr input when dimension is t"), mandatory=True) tr = traits.Float(position=-1, argstr='%.2f', desc=('use to specify TR in seconds (default is 1.00 ' 'sec), overrides dimension and sets it to tr')) merged_file = File(argstr="%s", position=1, name_source='in_files', name_template='%s_merged', hash_files=False) class MergeOutputSpec(TraitedSpec): merged_file = File(exists=True) class Merge(FSLCommand): """Use fslmerge to concatenate images Images can be concatenated across time, x, y, or z dimensions. Across the time (t) dimension the TR is set by default to 1 sec. Note: to set the TR to a different value, specify 't' for dimension and specify the TR value in seconds for the tr input. The dimension will be automatically updated to 'tr'. Examples -------- >>> from nipype.interfaces.fsl import Merge >>> merger = Merge() >>> merger.inputs.in_files = ['functional2.nii', 'functional3.nii'] >>> merger.inputs.dimension = 't' >>> merger.inputs.output_type = 'NIFTI_GZ' >>> merger.cmdline 'fslmerge -t functional2_merged.nii.gz functional2.nii functional3.nii' >>> merger.inputs.tr = 2.25 >>> merger.cmdline 'fslmerge -tr functional2_merged.nii.gz functional2.nii functional3.nii 2.25' """ _cmd = 'fslmerge' input_spec = MergeInputSpec output_spec = MergeOutputSpec def _format_arg(self, name, spec, value): if name == 'tr': if self.inputs.dimension != 't': raise ValueError('When TR is specified, dimension must be t') return spec.argstr % value if name == 'dimension': if isdefined(self.inputs.tr): return '-tr' return spec.argstr % value return super(Merge, self)._format_arg(name, spec, value) class ExtractROIInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr="%s", position=0, desc="input file", mandatory=True) roi_file = File(argstr="%s", position=1, desc="output file", genfile=True, hash_files=False) x_min = traits.Int(argstr="%d", position=2) x_size = traits.Int(argstr="%d", position=3) y_min = traits.Int(argstr="%d", position=4) y_size = traits.Int(argstr="%d", position=5) z_min = traits.Int(argstr="%d", position=6) z_size = traits.Int(argstr="%d", position=7) t_min = traits.Int(argstr="%d", position=8) t_size = traits.Int(argstr="%d", position=9) _crop_xor = ['x_min', 'x_size', 'y_min', 'y_size', 'z_min', 'z_size', 't_min', 't_size'] crop_list = traits.List(traits.Tuple(traits.Int, traits.Int), argstr="%s", position=2, xor=_crop_xor, desc="list of two tuples specifying crop options") class ExtractROIOutputSpec(TraitedSpec): roi_file = File(exists=True) class ExtractROI(FSLCommand): """Uses FSL Fslroi command to extract region of interest (ROI) from an image. You can a) take a 3D ROI from a 3D data set (or if it is 4D, the same ROI is taken from each time point and a new 4D data set is created), b) extract just some time points from a 4D data set, or c) control time and space limits to the ROI. Note that the arguments are minimum index and size (not maximum index). So to extract voxels 10 to 12 inclusive you would specify 10 and 3 (not 10 and 12). Examples -------- >>> from nipype.interfaces.fsl import ExtractROI >>> from nipype.testing import anatfile >>> fslroi = ExtractROI(in_file=anatfile, roi_file='bar.nii', t_min=0, ... t_size=1) >>> fslroi.cmdline == 'fslroi %s bar.nii 0 1' % anatfile True """ _cmd = 'fslroi' input_spec = ExtractROIInputSpec output_spec = ExtractROIOutputSpec def _format_arg(self, name, spec, value): if name == "crop_list": return " ".join(map(str, sum(map(list, value), []))) return super(ExtractROI, self)._format_arg(name, spec, value) def _list_outputs(self): """Create a Bunch which contains all possible files generated by running the interface. Some files are always generated, others depending on which ``inputs`` options are set. Returns ------- outputs : Bunch object Bunch object containing all possible files generated by interface object. If None, file was not generated Else, contains path, filename of generated outputfile """ outputs = self._outputs().get() outputs['roi_file'] = self.inputs.roi_file if not isdefined(outputs['roi_file']): outputs['roi_file'] = self._gen_fname(self.inputs.in_file, suffix='_roi') outputs['roi_file'] = os.path.abspath(outputs['roi_file']) return outputs def _gen_filename(self, name): if name == 'roi_file': return self._list_outputs()[name] return None class SplitInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr="%s", position=0, mandatory=True, desc="input filename") out_base_name = traits.Str(argstr="%s", position=1, desc="outputs prefix") dimension = traits.Enum('t', 'x', 'y', 'z', argstr="-%s", position=2, mandatory=True, desc="dimension along which the file will be split") class SplitOutputSpec(TraitedSpec): out_files = OutputMultiPath(File(exists=True)) class Split(FSLCommand): """Uses FSL Fslsplit command to separate a volume into images in time, x, y or z dimension. """ _cmd = 'fslsplit' input_spec = SplitInputSpec output_spec = SplitOutputSpec def _list_outputs(self): """Create a Bunch which contains all possible files generated by running the interface. Some files are always generated, others depending on which ``inputs`` options are set. Returns ------- outputs : Bunch object Bunch object containing all possible files generated by interface object. If None, file was not generated Else, contains path, filename of generated outputfile """ outputs = self._outputs().get() ext = Info.output_type_to_ext(self.inputs.output_type) outbase = 'vol*' if isdefined(self.inputs.out_base_name): outbase = '%s*' % self.inputs.out_base_name outputs['out_files'] = sorted(glob(os.path.join(os.getcwd(), outbase + ext))) return outputs class ImageMathsInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr="%s", mandatory=True, position=1) in_file2 = File(exists=True, argstr="%s", position=3) out_file = File(argstr="%s", position=4, genfile=True, hash_files=False) op_string = traits.Str(argstr="%s", position=2, desc="string defining the operation, i. e. -add") suffix = traits.Str(desc="out_file suffix") out_data_type = traits.Enum('char', 'short', 'int', 'float', 'double', 'input', argstr="-odt %s", position=5, desc=("output datatype, one of (char, short, " "int, float, double, input)")) class ImageMathsOutputSpec(TraitedSpec): out_file = File(exists=True) class ImageMaths(FSLCommand): """Use FSL fslmaths command to allow mathematical manipulation of images `FSL info `_ Examples -------- >>> from nipype import fsl >>> from nipype.testing import anatfile >>> maths = fsl.ImageMaths(in_file=anatfile, op_string= '-add 5', ... out_file='foo_maths.nii') >>> maths.cmdline == 'fslmaths %s -add 5 foo_maths.nii' % anatfile True """ input_spec = ImageMathsInputSpec output_spec = ImageMathsOutputSpec _cmd = 'fslmaths' def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] return None def _parse_inputs(self, skip=None): return super(ImageMaths, self)._parse_inputs(skip=['suffix']) def _list_outputs(self): suffix = '_maths' # ohinds: build suffix if isdefined(self.inputs.suffix): suffix = self.inputs.suffix outputs = self._outputs().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']): outputs['out_file'] = self._gen_fname(self.inputs.in_file, suffix=suffix) outputs['out_file'] = os.path.abspath(outputs['out_file']) return outputs class FilterRegressorInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr="-i %s", desc="input file name (4D image)", mandatory=True, position=1) out_file = File(argstr="-o %s", desc="output file name for the filtered data", genfile=True, position=2, hash_files=False) design_file = File(exists=True, argstr="-d %s", position=3, mandatory=True, desc=("name of the matrix with time courses (e.g. GLM " "design or MELODIC mixing matrix)")) filter_columns = traits.List(traits.Int, argstr="-f '%s'", xor=["filter_all"], mandatory=True, position=4, desc=("(1-based) column indices to filter out " "of the data")) filter_all = traits.Bool(mandatory=True, argstr="-f '%s'", xor=["filter_columns"], position=4, desc=("use all columns in the design file in " "denoising")) mask = File(exists=True, argstr="-m %s", desc="mask image file name") var_norm = traits.Bool(argstr="--vn", desc="perform variance-normalization on data") out_vnscales = traits.Bool(argstr="--out_vnscales", desc=("output scaling factors for variance " "normalization")) class FilterRegressorOutputSpec(TraitedSpec): out_file = File(exists=True, desc="output file name for the filtered data") class FilterRegressor(FSLCommand): """Data de-noising by regressing out part of a design matrix Uses simple OLS regression on 4D images """ input_spec = FilterRegressorInputSpec output_spec = FilterRegressorOutputSpec _cmd = 'fsl_regfilt' def _format_arg(self, name, trait_spec, value): if name == 'filter_columns': return trait_spec.argstr % ",".join(map(str, value)) elif name == "filter_all": design = np.loadtxt(self.inputs.design_file) try: n_cols = design.shape[1] except IndexError: n_cols = 1 return trait_spec.argstr % ",".join(map(str, range(1, n_cols + 1))) return super(FilterRegressor, self)._format_arg(name, trait_spec, value) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']): outputs['out_file'] = self._gen_fname( self.inputs.in_file, suffix='_regfilt') outputs['out_file'] = os.path.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] return None class ImageStatsInputSpec(FSLCommandInputSpec): split_4d = traits.Bool(argstr='-t', position=1, desc=('give a separate output line for each 3D ' 'volume of a 4D timeseries')) in_file = File(exists=True, argstr="%s", mandatory=True, position=2, desc='input file to generate stats of') op_string = traits.Str(argstr="%s", mandatory=True, position=3, desc=("string defining the operation, options are " "applied in order, e.g. -M -l 10 -M will " "report the non-zero mean, apply a threshold " "and then report the new nonzero mean")) mask_file = File(exists=True, argstr="", desc='mask file used for option -k %s') class ImageStatsOutputSpec(TraitedSpec): out_stat = traits.Any(desc='stats output') class ImageStats(FSLCommand): """Use FSL fslstats command to calculate stats from images `FSL info `_ Examples -------- >>> from nipype.interfaces.fsl import ImageStats >>> from nipype.testing import funcfile >>> stats = ImageStats(in_file=funcfile, op_string= '-M') >>> stats.cmdline == 'fslstats %s -M'%funcfile True """ input_spec = ImageStatsInputSpec output_spec = ImageStatsOutputSpec _cmd = 'fslstats' def _format_arg(self, name, trait_spec, value): if name == 'mask_file': return '' if name == 'op_string': if '-k %s' in self.inputs.op_string: if isdefined(self.inputs.mask_file): return self.inputs.op_string % self.inputs.mask_file else: raise ValueError( '-k %s option in op_string requires mask_file') return super(ImageStats, self)._format_arg(name, trait_spec, value) def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() # local caching for backward compatibility outfile = os.path.join(os.getcwd(), 'stat_result.json') if runtime is None: try: out_stat = load_json(outfile)['stat'] except IOError: return self.run().outputs else: out_stat = [] for line in runtime.stdout.split('\n'): if line: values = line.split() if len(values) > 1: out_stat.append([float(val) for val in values]) else: out_stat.extend([float(val) for val in values]) if len(out_stat) == 1: out_stat = out_stat[0] save_json(outfile, dict(stat=out_stat)) outputs.out_stat = out_stat return outputs class AvScaleInputSpec(FSLCommandInputSpec): mat_file = File(exists=True, argstr="%s", desc='mat file to read', position=0) class AvScaleOutputSpec(TraitedSpec): rotation_translation_matrix = traits.Any( desc='Rotation and Translation Matrix') scales = traits.Any(desc='Scales (x,y,z)') skews = traits.Any(desc='Skews') average_scaling = traits.Any(desc='Average Scaling') determinant = traits.Any(desc='Determinant') forward_half_transform = traits.Any(desc='Forward Half Transform') backward_half_transform = traits.Any(desc='Backwards Half Transform') left_right_orientation_preserved = traits.Bool( desc='True if LR orientation preserved') class AvScale(FSLCommand): """Use FSL avscale command to extract info from mat file output of FLIRT Examples -------- avscale = AvScale() avscale.inputs.mat_file = 'flirt.mat' res = avscale.run() # doctest: +SKIP """ input_spec = AvScaleInputSpec output_spec = AvScaleOutputSpec _cmd = 'avscale' def _format_arg(self, name, trait_spec, value): return super(AvScale, self)._format_arg(name, trait_spec, value) def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() def lines_to_float(lines): out = [] for line in lines: values = line.split() out.append([float(val) for val in values]) return out out = runtime.stdout.split('\n') outputs.rotation_translation_matrix = lines_to_float(out[1:5]) outputs.scales = lines_to_float([out[6].split(" = ")[1]]) outputs.skews = lines_to_float([out[8].split(" = ")[1]]) outputs.average_scaling = lines_to_float([out[10].split(" = ")[1]]) outputs.determinant = lines_to_float([out[12].split(" = ")[1]]) if out[13].split(": ")[1] == 'preserved': outputs.left_right_orientation_preserved = True else: outputs.left_right_orientation_preserved = False outputs.forward_half_transform = lines_to_float(out[16:20]) outputs.backward_half_transform = lines_to_float(out[22:-1]) return outputs class OverlayInputSpec(FSLCommandInputSpec): transparency = traits.Bool(desc='make overlay colors semi-transparent', position=1, argstr='%s', usedefault=True, default_value=True) out_type = traits.Enum('float', 'int', position=2, usedefault=True, argstr='%s', desc='write output with float or int') use_checkerboard = traits.Bool(desc='use checkerboard mask for overlay', argstr='-c', position=3) background_image = File(exists=True, position=4, mandatory=True, argstr='%s', desc='image to use as background') _xor_inputs = ('auto_thresh_bg', 'full_bg_range', 'bg_thresh') auto_thresh_bg = traits.Bool(desc=('automatically threhsold the background ' 'image'), argstr='-a', position=5, xor=_xor_inputs, mandatory=True) full_bg_range = traits.Bool(desc='use full range of background image', argstr='-A', position=5, xor=_xor_inputs, mandatory=True) bg_thresh = traits.Tuple(traits.Float, traits.Float, argstr='%.3f %.3f', position=5, desc='min and max values for background intensity', xor=_xor_inputs, mandatory=True) stat_image = File(exists=True, position=6, mandatory=True, argstr='%s', desc='statistical image to overlay in color') stat_thresh = traits.Tuple(traits.Float, traits.Float, position=7, mandatory=True, argstr='%.2f %.2f', desc=('min and max values for the statistical ' 'overlay')) show_negative_stats = traits.Bool(desc=('display negative statistics in ' 'overlay'), xor=['stat_image2'], argstr='%s', position=8) stat_image2 = File(exists=True, position=9, xor=['show_negative_stats'], argstr='%s', desc='second statistical image to overlay in color') stat_thresh2 = traits.Tuple(traits.Float, traits.Float, position=10, desc=('min and max values for second ' 'statistical overlay'), argstr='%.2f %.2f') out_file = File(desc='combined image volume', position=-1, argstr='%s', genfile=True, hash_files=False) class OverlayOutputSpec(TraitedSpec): out_file = File(exists=True, desc='combined image volume') class Overlay(FSLCommand): """ Use FSL's overlay command to combine background and statistical images into one volume Examples -------- >>> from nipype.interfaces import fsl >>> combine = fsl.Overlay() >>> combine.inputs.background_image = 'mean_func.nii.gz' >>> combine.inputs.auto_thresh_bg = True >>> combine.inputs.stat_image = 'zstat1.nii.gz' >>> combine.inputs.stat_thresh = (3.5, 10) >>> combine.inputs.show_negative_stats = True >>> res = combine.run() #doctest: +SKIP """ _cmd = 'overlay' input_spec = OverlayInputSpec output_spec = OverlayOutputSpec def _format_arg(self, name, spec, value): if name == 'transparency': if value: return '1' else: return '0' if name == 'out_type': if value == 'float': return '0' else: return '1' if name == 'show_negative_stats': return '%s %.2f %.2f' % (self.inputs.stat_image, self.inputs.stat_thresh[0] * -1, self.inputs.stat_thresh[1] * -1) return super(Overlay, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): if isdefined(self.inputs.stat_image2) and ( not isdefined(self.inputs.show_negative_stats) or not self.inputs.show_negative_stats): stem = "%s_and_%s" % (split_filename(self.inputs.stat_image)[1], split_filename(self.inputs.stat_image2)[1]) else: stem = split_filename(self.inputs.stat_image)[1] out_file = self._gen_fname(stem, suffix='_overlay') outputs['out_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['out_file'] return None class SlicerInputSpec(FSLCommandInputSpec): in_file = File(exists=True, position=1, argstr='%s', mandatory=True, desc='input volume') image_edges = File(exists=True, position=2, argstr='%s', desc=('volume to display edge overlay for (useful for ' 'checking registration')) label_slices = traits.Bool( position=3, argstr='-L', desc='display slice number', usedefault=True, default_value=True) colour_map = File(exists=True, position=4, argstr='-l %s', desc=('use different colour map from that stored in ' 'nifti header')) intensity_range = traits.Tuple(traits.Float, traits.Float, position=5, argstr='-i %.3f %.3f', desc='min and max intensities to display') threshold_edges = traits.Float(position=6, argstr='-e %.3f', desc='use threshold for edges') dither_edges = traits.Bool(position=7, argstr='-t', desc=('produce semi-transparaent (dithered) ' 'edges')) nearest_neighbour = traits.Bool(position=8, argstr='-n', desc=('use nearest neighbour interpolation ' 'for output')) show_orientation = traits.Bool(position=9, argstr='%s', usedefault=True, default_value=True, desc='label left-right orientation') _xor_options = ('single_slice', 'middle_slices', 'all_axial', 'sample_axial') single_slice = traits.Enum('x', 'y', 'z', position=10, argstr='-%s', xor=_xor_options, requires=['slice_number'], desc=('output picture of single slice in the x, ' 'y, or z plane')) slice_number = traits.Int(position=11, argstr='-%d', desc='slice number to save in picture') middle_slices = traits.Bool(position=10, argstr='-a', xor=_xor_options, desc=('output picture of mid-sagital, axial, ' 'and coronal slices')) all_axial = traits.Bool(position=10, argstr='-A', xor=_xor_options, requires=['image_width'], desc='output all axial slices into one picture') sample_axial = traits.Int(position=10, argstr='-S %d', xor=_xor_options, requires=['image_width'], desc=('output every n axial slices into one ' 'picture')) image_width = traits.Int(position=-2, argstr='%d', desc='max picture width') out_file = File(position=-1, genfile=True, argstr='%s', desc='picture to write', hash_files=False) scaling = traits.Float(position=0, argstr='-s %f', desc='image scale') class SlicerOutputSpec(TraitedSpec): out_file = File(exists=True, desc='picture to write') class Slicer(FSLCommand): """Use FSL's slicer command to output a png image from a volume. Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> slice = fsl.Slicer() >>> slice.inputs.in_file = example_data('functional.nii') >>> slice.inputs.all_axial = True >>> slice.inputs.image_width = 750 >>> res = slice.run() #doctest: +SKIP """ _cmd = 'slicer' input_spec = SlicerInputSpec output_spec = SlicerOutputSpec def _format_arg(self, name, spec, value): if name == 'show_orientation': if value: return '' else: return '-u' elif name == "label_slices": if value: return '-L' else: return '' return super(Slicer, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): out_file = self._gen_fname(self.inputs.in_file, ext='.png') outputs['out_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['out_file'] return None class PlotTimeSeriesInputSpec(FSLCommandInputSpec): in_file = traits.Either(File(exists=True), traits.List(File(exists=True)), mandatory=True, argstr="%s", position=1, desc=("file or list of files with columns of " "timecourse information")) plot_start = traits.Int(argstr="--start=%d", xor=("plot_range",), desc="first column from in-file to plot") plot_finish = traits.Int(argstr="--finish=%d", xor=("plot_range",), desc="final column from in-file to plot") plot_range = traits.Tuple(traits.Int, traits.Int, argstr="%s", xor=("plot_start", "plot_finish"), desc=("first and last columns from the in-file " "to plot")) title = traits.Str(argstr="%s", desc="plot title") legend_file = File(exists=True, argstr="--legend=%s", desc="legend file") labels = traits.Either(traits.Str, traits.List(traits.Str), argstr="%s", desc="label or list of labels") y_min = traits.Float(argstr="--ymin=%.2f", desc="minumum y value", xor=("y_range",)) y_max = traits.Float(argstr="--ymax=%.2f", desc="maximum y value", xor=("y_range",)) y_range = traits.Tuple(traits.Float, traits.Float, argstr="%s", xor=("y_min", "y_max"), desc="min and max y axis values") x_units = traits.Int(argstr="-u %d", usedefault=True, default_value=1, desc=("scaling units for x-axis (between 1 and length " "of in file)")) plot_size = traits.Tuple(traits.Int, traits.Int, argstr="%s", desc="plot image height and width") x_precision = traits.Int(argstr="--precision=%d", desc="precision of x-axis labels") sci_notation = traits.Bool(argstr="--sci", desc="switch on scientific notation") out_file = File(argstr="-o %s", genfile=True, desc="image to write", hash_files=False) class PlotTimeSeriesOutputSpec(TraitedSpec): out_file = File(exists=True, desc='image to write') class PlotTimeSeries(FSLCommand): """Use fsl_tsplot to create images of time course plots. Examples -------- >>> import nipype.interfaces.fsl as fsl >>> plotter = fsl.PlotTimeSeries() >>> plotter.inputs.in_file = 'functional.par' >>> plotter.inputs.title = 'Functional timeseries' >>> plotter.inputs.labels = ['run1', 'run2'] >>> plotter.run() #doctest: +SKIP """ _cmd = "fsl_tsplot" input_spec = PlotTimeSeriesInputSpec output_spec = PlotTimeSeriesOutputSpec def _format_arg(self, name, spec, value): if name == "in_file": if isinstance(value, list): args = ",".join(value) return "-i %s" % args else: return "-i %s" % value elif name == "labels": if isinstance(value, list): args = ",".join(value) return "-a %s" % args else: return "-a %s" % value elif name == "title": return "-t \'%s\'" % value elif name == "plot_range": return "--start=%d --finish=%d" % value elif name == "y_range": return "--ymin=%d --ymax=%d" % value elif name == "plot_size": return "-h %d -w %d" % value return super(PlotTimeSeries, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): if isinstance(self.inputs.in_file, list): infile = self.inputs.in_file[0] else: infile = self.inputs.in_file out_file = self._gen_fname(infile, ext='.png') outputs['out_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['out_file'] return None class PlotMotionParamsInputSpec(FSLCommandInputSpec): in_file = traits.Either(File(exists=True), traits.List(File(exists=True)), mandatory=True, argstr="%s", position=1, desc="file with motion parameters") in_source = traits.Enum("spm", "fsl", mandatory=True, desc=("which program generated the motion " "parameter file - fsl, spm")) plot_type = traits.Enum("rotations", "translations", "displacement", argstr="%s", mandatory=True, desc=("which motion type to plot - rotations, " "translations, displacement")) plot_size = traits.Tuple(traits.Int, traits.Int, argstr="%s", desc="plot image height and width") out_file = File(argstr="-o %s", genfile=True, desc="image to write", hash_files=False) class PlotMotionParamsOutputSpec(TraitedSpec): out_file = File(exists=True, desc='image to write') class PlotMotionParams(FSLCommand): """Use fsl_tsplot to plot the estimated motion parameters from a realignment program. Examples -------- >>> import nipype.interfaces.fsl as fsl >>> plotter = fsl.PlotMotionParams() >>> plotter.inputs.in_file = 'functional.par' >>> plotter.inputs.in_source = 'fsl' >>> plotter.inputs.plot_type = 'rotations' >>> res = plotter.run() #doctest: +SKIP Notes ----- The 'in_source' attribute determines the order of columns that are expected in the source file. FSL prints motion parameters in the order rotations, translations, while SPM prints them in the opposite order. This interface should be able to plot timecourses of motion parameters generated from other sources as long as they fall under one of these two patterns. For more flexibilty, see the :class:`fsl.PlotTimeSeries` interface. """ _cmd = 'fsl_tsplot' input_spec = PlotMotionParamsInputSpec output_spec = PlotMotionParamsOutputSpec def _format_arg(self, name, spec, value): if name == "plot_type": source = self.inputs.in_source if self.inputs.plot_type == 'displacement': title = '-t \'MCFLIRT estimated mean displacement (mm)\'' labels = '-a abs,rel' return '%s %s' % (title, labels) # Get the right starting and ending position depending on source # package sfdict = dict(fsl_rot=(1, 3), fsl_tra=( 4, 6), spm_rot=(4, 6), spm_tra=(1, 3)) # Format the title properly sfstr = "--start=%d --finish=%d" % sfdict[ "%s_%s" % (source, value[:3])] titledict = dict(fsl="MCFLIRT", spm="Realign") unitdict = dict(rot="radians", tra="mm") title = "\'%s estimated %s (%s)\'" % ( titledict[source], value, unitdict[value[:3]]) return "-t %s %s -a x,y,z" % (title, sfstr) elif name == "plot_size": return "-h %d -w %d" % value elif name == "in_file": if isinstance(value, list): args = ",".join(value) return "-i %s" % args else: return "-i %s" % value return super(PlotMotionParams, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): if isinstance(self.inputs.in_file, list): infile = self.inputs.in_file[0] else: infile = self.inputs.in_file plttype = dict(rot="rot", tra="trans", dis="disp")[ self.inputs.plot_type[:3]] out_file = fname_presuffix( infile, suffix="_%s.png" % plttype, use_ext=False) outputs['out_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['out_file'] return None class ConvertXFMInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, argstr="%s", position=-1, desc="input transformation matrix") in_file2 = File(exists=True, argstr="%s", position=-2, desc=("second input matrix (for use with fix_scale_skew or " "concat_xfm")) _options = ["invert_xfm", "concat_xfm", "fix_scale_skew"] invert_xfm = traits.Bool(argstr="-inverse", position=-3, xor=_options, desc="invert input transformation") concat_xfm = traits.Bool(argstr="-concat", position=-3, xor=_options, requires=["in_file2"], desc=("write joint transformation of two input " "matrices")) fix_scale_skew = traits.Bool(argstr="-fixscaleskew", position=-3, xor=_options, requires=["in_file2"], desc=("use secondary matrix to fix scale and " "skew")) out_file = File(genfile=True, argstr="-omat %s", position=1, desc="final transformation matrix", hash_files=False) class ConvertXFMOutputSpec(TraitedSpec): out_file = File(exists=True, desc="output transformation matrix") class ConvertXFM(FSLCommand): """Use the FSL utility convert_xfm to modify FLIRT transformation matrices. Examples -------- >>> import nipype.interfaces.fsl as fsl >>> invt = fsl.ConvertXFM() >>> invt.inputs.in_file = "flirt.mat" >>> invt.inputs.invert_xfm = True >>> invt.inputs.out_file = 'flirt_inv.mat' >>> invt.cmdline 'convert_xfm -omat flirt_inv.mat -inverse flirt.mat' """ _cmd = "convert_xfm" input_spec = ConvertXFMInputSpec output_spec = ConvertXFMOutputSpec def _list_outputs(self): outputs = self._outputs().get() outfile = self.inputs.out_file if not isdefined(outfile): _, infile1, _ = split_filename(self.inputs.in_file) if self.inputs.invert_xfm: outfile = fname_presuffix(infile1, suffix="_inv.mat", newpath=os.getcwd(), use_ext=False) else: if self.inputs.concat_xfm: _, infile2, _ = split_filename(self.inputs.in_file2) outfile = fname_presuffix("%s_%s" % (infile1, infile2), suffix=".mat", newpath=os.getcwd(), use_ext=False) else: outfile = fname_presuffix(infile1, suffix="_fix.mat", newpath=os.getcwd(), use_ext=False) outputs["out_file"] = os.path.abspath(outfile) return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()["out_file"] return None class SwapDimensionsInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, argstr="%s", position="1", desc="input image") _dims = ["x", "-x", "y", "-y", "z", "-z", "RL", "LR", "AP", "PA", "IS", "SI"] new_dims = traits.Tuple(traits.Enum(_dims), traits.Enum(_dims), traits.Enum(_dims), argstr="%s %s %s", mandatory=True, desc="3-tuple of new dimension order") out_file = File(genfile=True, argstr="%s", desc="image to write", hash_files=False) class SwapDimensionsOutputSpec(TraitedSpec): out_file = File(exists=True, desc="image with new dimensions") class SwapDimensions(FSLCommand): """Use fslswapdim to alter the orientation of an image. This interface accepts a three-tuple corresponding to the new orientation. You may either provide dimension ids in the form of (-)x, (-)y, or (-z), or nifti-syle dimension codes (RL, LR, AP, PA, IS, SI). """ _cmd = "fslswapdim" input_spec = SwapDimensionsInputSpec output_spec = SwapDimensionsOutputSpec def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = self.inputs.out_file if not isdefined(self.inputs.out_file): outputs["out_file"] = self._gen_fname(self.inputs.in_file, suffix='_newdims') outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): if name == "out_file": return self._list_outputs()["out_file"] return None class PowerSpectrumInputSpec(FSLCommandInputSpec): # We use position args here as list indices - so a negative number # will put something on the end in_file = File(exists=True, desc="input 4D file to estimate the power spectrum", argstr='%s', position=0, mandatory=True) out_file = File(desc='name of output 4D file for power spectrum', argstr='%s', position=1, genfile=True, hash_files=False) class PowerSpectrumOutputSpec(TraitedSpec): out_file = File( exists=True, desc="path/name of the output 4D power spectrum file") class PowerSpectrum(FSLCommand): """Use FSL PowerSpectrum command for power spectrum estimation. Examples -------- >>> from nipype.interfaces import fsl >>> pspec = fsl.PowerSpectrum() >>> pspec.inputs.in_file = 'functional.nii' >>> res = pspec.run() # doctest: +SKIP """ _cmd = 'fslpspec' input_spec = PowerSpectrumInputSpec output_spec = PowerSpectrumOutputSpec def _gen_outfilename(self): out_file = self.inputs.out_file if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_ps') return out_file def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name == 'out_file': return self._gen_outfilename() return None class SigLossInputSpec(FSLCommandInputSpec): in_file = File(mandatory=True, exists=True, argstr='-i %s', desc='b0 fieldmap file') out_file = File(argstr='-s %s', desc='output signal loss estimate file', genfile=True) mask_file = File(exists=True, argstr='-m %s', desc='brain mask file') echo_time = traits.Float(argstr='--te=%f', desc='echo time in seconds') slice_direction = traits.Enum('x','y','z', argstr='-d %s', desc='slicing direction') class SigLossOuputSpec(TraitedSpec): out_file = File(exists=True, desc='signal loss estimate file') class SigLoss(FSLCommand): """Estimates signal loss from a field map (in rad/s) Examples -------- >>> sigloss = SigLoss() >>> sigloss.inputs.in_file = "phase.nii" >>> sigloss.inputs.echo_time = 0.03 >>> res = sigloss.run() # doctest: +SKIP """ input_spec = SigLossInputSpec output_spec = SigLossOuputSpec _cmd = 'sigloss' def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']) and \ isdefined(self.inputs.in_file): outputs['out_file']=self._gen_fname(self.inputs.in_file, suffix='_sigloss') return outputs def _gen_filename(self, name): if name=='out_file': return self._list_outputs()['out_file'] return None class Reorient2StdInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, argstr="%s") out_file = File(genfile=True, hash_files=False, argstr="%s") class Reorient2StdOutputSpec(TraitedSpec): out_file = File(exists=True) class Reorient2Std(FSLCommand): """fslreorient2std is a tool for reorienting the image to match the approximate orientation of the standard template images (MNI152). Examples -------- >>> reorient = Reorient2Std() >>> reorient.inputs.in_file = "functional.nii" >>> res = reorient.run() # doctest: +SKIP """ _cmd = 'fslreorient2std' input_spec = Reorient2StdInputSpec output_spec = Reorient2StdOutputSpec def _gen_filename(self, name): if name == 'out_file': return self._gen_fname(self.inputs.in_file, suffix="_reoriented") return None def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): outputs['out_file'] = self._gen_filename('out_file') else: outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs class InvWarpInputSpec(FSLCommandInputSpec): warp = File(exists=True, argstr='--warp=%s', mandatory=True, desc=('Name of file containing warp-coefficients/fields. This ' 'would typically be the output from the --cout switch of ' 'fnirt (but can also use fields, like the output from ' '--fout).')) reference = File(exists=True, argstr='--ref=%s', mandatory=True, desc=('Name of a file in target space. Note that the ' 'target space is now different from the target ' 'space that was used to create the --warp file. It ' 'would typically be the file that was specified ' 'with the --in argument when running fnirt.')) inverse_warp = File(argstr='--out=%s', name_source=['warp'], hash_files=False, name_template='%s_inverse', desc=('Name of output file, containing warps that are ' 'the "reverse" of those in --warp. This will be ' 'a field-file (rather than a file of spline ' 'coefficients), and it will have any affine ' 'component included as part of the ' 'displacements.')) absolute = traits.Bool(argstr='--abs', xor=['relative'], desc=('If set it indicates that the warps in --warp ' 'should be interpreted as absolute, provided ' 'that it is not created by fnirt (which ' 'always uses relative warps). If set it also ' 'indicates that the output --out should be ' 'absolute.')) relative = traits.Bool(argstr='--rel', xor=['absolute'], desc=('If set it indicates that the warps in --warp ' 'should be interpreted as relative. I.e. the ' 'values in --warp are displacements from the ' 'coordinates in the --ref space. If set it ' 'also indicates that the output --out should ' 'be relative.')) niter = traits.Int(argstr='--niter=%d', desc=('Determines how many iterations of the ' 'gradient-descent search that should be run.')) regularise = traits.Float(argstr='--regularise=%f', desc='Regularisation strength (deafult=1.0).') noconstraint = traits.Bool(argstr='--noconstraint', desc='Do not apply Jacobian constraint') jacobian_min = traits.Float(argstr='--jmin=%f', desc=('Minimum acceptable Jacobian value for ' 'constraint (default 0.01)')) jacobian_max = traits.Float(argstr='--jmax=%f', desc=('Maximum acceptable Jacobian value for ' 'constraint (default 100.0)')) class InvWarpOutputSpec(TraitedSpec): inverse_warp = File(exists=True, desc=('Name of output file, containing warps that are ' 'the "reverse" of those in --warp.')) class InvWarp(FSLCommand): """Use FSL Invwarp to inverse a FNIRT warp Examples -------- >>> from nipype.interfaces.fsl import InvWarp >>> invwarp = InvWarp() >>> invwarp.inputs.warp = "struct2mni.nii" >>> invwarp.inputs.reference = "anatomical.nii" >>> invwarp.cmdline 'invwarp --out=struct2mni_inverse.nii.gz --ref=anatomical.nii --warp=struct2mni.nii' >>> res = invwarp.run() # doctest: +SKIP """ input_spec = InvWarpInputSpec output_spec = InvWarpOutputSpec _cmd = 'invwarp' class ComplexInputSpec(FSLCommandInputSpec): complex_in_file = File(exists=True, argstr="%s", position=2) complex_in_file2 = File(exists=True, argstr="%s", position=3) real_in_file = File(exists=True, argstr="%s", position=2) imaginary_in_file = File(exists=True, argstr="%s", position=3) magnitude_in_file = File(exists=True, argstr="%s", position=2) phase_in_file = File(exists=True, argstr='%s', position=3) _ofs = ['complex_out_file', 'magnitude_out_file','phase_out_file', 'real_out_file','imaginary_out_file'] _conversion = ['real_polar','real_cartesian', 'complex_cartesian','complex_polar', 'complex_split','complex_merge',] complex_out_file = File(genfile=True, argstr="%s", position=-3, xor=_ofs+_conversion[:2]) magnitude_out_file = File(genfile=True, argstr="%s", position=-4, xor=_ofs[:1]+_ofs[3:]+_conversion[1:]) phase_out_file = File(genfile=True, argstr="%s", position=-3, xor=_ofs[:1]+_ofs[3:]+_conversion[1:]) real_out_file = File(genfile=True, argstr="%s", position=-4, xor=_ofs[:3]+_conversion[:1]+_conversion[2:]) imaginary_out_file = File(genfile=True, argstr="%s", position=-3, xor=_ofs[:3]+_conversion[:1]+_conversion[2:]) start_vol = traits.Int(position=-2, argstr='%d') end_vol = traits.Int(position=-1, argstr='%d') real_polar = traits.Bool( argstr = '-realpolar', xor = _conversion, position=1,) # requires=['complex_in_file','magnitude_out_file','phase_out_file']) real_cartesian = traits.Bool( argstr = '-realcartesian', xor = _conversion, position=1,) # requires=['complex_in_file','real_out_file','imaginary_out_file']) complex_cartesian = traits.Bool( argstr = '-complex', xor = _conversion, position=1,) # requires=['real_in_file','imaginary_in_file','complex_out_file']) complex_polar = traits.Bool( argstr = '-complexpolar', xor = _conversion, position=1,) # requires=['magnitude_in_file','phase_in_file', # 'magnitude_out_file','phase_out_file']) complex_split = traits.Bool( argstr = '-complexsplit', xor = _conversion, position=1,) # requires=['complex_in_file','complex_out_file']) complex_merge = traits.Bool( argstr = '-complexmerge', xor = _conversion + ['start_vol','end_vol'], position=1,) # requires=['complex_in_file','complex_in_file2','complex_out_file']) class ComplexOuputSpec(TraitedSpec): magnitude_out_file = File() phase_out_file = File() real_out_file = File() imaginary_out_file = File() complex_out_file = File() class Complex(FSLCommand): """fslcomplex is a tool for converting complex data Examples -------- >>> cplx = Complex() >>> cplx.inputs.complex_in_file = "complex.nii" >>> cplx.real_polar = True >>> res = cplx.run() # doctest: +SKIP """ _cmd = 'fslcomplex' input_spec = ComplexInputSpec output_spec = ComplexOuputSpec def _parse_inputs(self, skip=None): if skip == None: skip = [] if self.inputs.real_cartesian: skip += self.inputs._ofs[:3] elif self.inputs.real_polar: skip += self.inputs._ofs[:1]+self.inputs._ofs[3:] else: skip += self.inputs._ofs[1:] return super(Complex,self)._parse_inputs(skip) def _gen_filename(self, name): if name == 'complex_out_file': if self.inputs.complex_cartesian: in_file = self.inputs.real_in_file elif self.inputs.complex_polar: in_file = self.inputs.magnitude_in_file elif self.inputs.complex_split or self.inputs.complex_merge: in_file = self.inputs.complex_in_file else: return None return self._gen_fname(in_file, suffix="_cplx") elif name =='magnitude_out_file': return self._gen_fname(self.inputs.complex_in_file, suffix="_mag") elif name =='phase_out_file': return self._gen_fname(self.inputs.complex_in_file,suffix="_phase") elif name =='real_out_file': return self._gen_fname(self.inputs.complex_in_file, suffix="_real") elif name =='imaginary_out_file': return self._gen_fname(self.inputs.complex_in_file, suffix="_imag") return None def _get_output(self,name): output = getattr(self.inputs,name) if not isdefined(output): output = self._gen_filename(name) return os.path.abspath(output) def _list_outputs(self): outputs = self.output_spec().get() if self.inputs.complex_cartesian or self.inputs.complex_polar or \ self.inputs.complex_split or self.inputs.complex_merge: outputs['complex_out_file'] = self._get_output('complex_out_file') elif self.inputs.real_cartesian: outputs['real_out_file'] = self._get_output('real_out_file') outputs['imaginary_out_file'] = self._get_output('imaginary_out_file') elif self.inputs.real_polar: outputs['magnitude_out_file'] = self._get_output('magnitude_out_file') outputs['phase_out_file'] = self._get_output('phase_out_file') return outputs nipype-0.9.2/nipype/interfaces/io.py000066400000000000000000001641251227300005300174370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Set of interfaces that allow interaction with data. Currently available interfaces are: DataSource: Generic nifti to named Nifti interface DataSink: Generic named output from interfaces to data store XNATSource: preliminary interface to XNAT To come : XNATSink Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ import glob import string import os import os.path as op import shutil import re import tempfile from warnings import warn import sqlite3 from nipype.utils.misc import human_order_sorted try: import pyxnat except: pass from nipype.interfaces.base import (TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, isdefined, OutputMultiPath, DynamicTraitedSpec, Undefined, BaseInterfaceInputSpec) from nipype.utils.filemanip import (copyfile, list_to_filename, filename_to_list) from .. import logging iflogger = logging.getLogger('interface') def copytree(src, dst): """Recursively copy a directory tree using nipype.utils.filemanip.copyfile() This is not a thread-safe routine. However, in the case of creating new directories, it checks to see if a particular directory has already been created by another process. """ names = os.listdir(src) try: os.makedirs(dst) except OSError, why: if 'File exists' in why: pass else: raise why errors = [] for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if os.path.isdir(srcname): copytree(srcname, dstname) else: copyfile(srcname, dstname, True, hashmethod='content') except (IOError, os.error), why: errors.append((srcname, dstname, str(why))) # catch the Error from the recursive copytree so that we can # continue with other files except Exception, err: errors.extend(err.args[0]) if errors: raise Exception(errors) def add_traits(base, names, trait_type=None): """ Add traits to a traited class. All traits are set to Undefined by default """ if trait_type is None: trait_type = traits.Any undefined_traits = {} for key in names: base.add_trait(key, trait_type) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) # access each trait for key in names: _ = getattr(base, key) return base class IOBase(BaseInterface): def _run_interface(self, runtime): return runtime def _list_outputs(self): raise NotImplementedError def _outputs(self): return self._add_output_traits(super(IOBase, self)._outputs()) def _add_output_traits(self, base): return base class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): base_directory = Directory( desc='Path to the base directory for storing data.') container = traits.Str( desc='Folder within base directory in which to store output') parameterization = traits.Bool(True, usedefault=True, desc='store output in parametrized structure') strip_dir = Directory(desc='path to strip out of filename') substitutions = InputMultiPath(traits.Tuple(traits.Str, traits.Str), desc=('List of 2-tuples reflecting string ' 'to substitute and string to replace ' 'it with')) regexp_substitutions = InputMultiPath(traits.Tuple(traits.Str, traits.Str), desc=('List of 2-tuples reflecting a pair ' 'of a Python regexp pattern and a ' 'replacement string. Invoked after ' 'string `substitutions`')) _outputs = traits.Dict(traits.Str, value={}, usedefault=True) remove_dest_dir = traits.Bool(False, usedefault=True, desc='remove dest directory when copying dirs') def __setattr__(self, key, value): if key not in self.copyable_trait_names(): if not isdefined(value): super(DataSinkInputSpec, self).__setattr__(key, value) self._outputs[key] = value else: if key in self._outputs: self._outputs[key] = value super(DataSinkInputSpec, self).__setattr__(key, value) class DataSinkOutputSpec(TraitedSpec): out_file = traits.Any(desc='datasink output') class DataSink(IOBase): """ Generic datasink module to store structured outputs Primarily for use within a workflow. This interface allows arbitrary creation of input attributes. The names of these attributes define the directory structure to create for storage of the files or directories. The attributes take the following form: string[[.[@]]string[[.[@]]string]] ... where parts between [] are optional. An attribute such as contrasts.@con will create a 'contrasts' directory to store the results linked to the attribute. If the @ is left out, such as in 'contrasts.con', a subdirectory 'con' will be created under 'contrasts'. the general form of the output is:: 'base_directory/container/parameterization/destloc/filename' destloc = string[[.[@]]string[[.[@]]string]] and filename comesfrom the input to the connect statement. .. warning:: This is not a thread-safe node because it can write to a common shared location. It will not complain when it overwrites a file. .. note:: If both substitutions and regexp_substitutions are used, then substitutions are applied first followed by regexp_substitutions. This interface **cannot** be used in a MapNode as the inputs are defined only when the connect statement is executed. Examples -------- >>> ds = DataSink() >>> ds.inputs.base_directory = 'results_dir' >>> ds.inputs.container = 'subject' >>> ds.inputs.structural = 'structural.nii' >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii']) >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii']) >>> ds.run() # doctest: +SKIP To use DataSink in a MapNode, its inputs have to be defined at the time the interface is created. >>> ds = DataSink(infields=['contasts.@con']) >>> ds.inputs.base_directory = 'results_dir' >>> ds.inputs.container = 'subject' >>> ds.inputs.structural = 'structural.nii' >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii']) >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii']) >>> ds.run() # doctest: +SKIP """ input_spec = DataSinkInputSpec output_spec = DataSinkOutputSpec def __init__(self, infields=None, force_run=True, **kwargs): """ Parameters ---------- infields : list of str Indicates the input fields to be dynamically created """ super(DataSink, self).__init__(**kwargs) undefined_traits = {} # used for mandatory inputs check self._infields = infields if infields: for key in infields: self.inputs.add_trait(key, traits.Any) self.inputs._outputs[key] = Undefined undefined_traits[key] = Undefined self.inputs.trait_set(trait_change_notify=False, **undefined_traits) if force_run: self._always_run = True def _get_dst(self, src): ## If path is directory with trailing os.path.sep, ## then remove that for a more robust behavior src = src.rstrip(os.path.sep) path, fname = os.path.split(src) if self.inputs.parameterization: dst = path if isdefined(self.inputs.strip_dir): dst = dst.replace(self.inputs.strip_dir, '') folders = [folder for folder in dst.split(os.path.sep) if folder.startswith('_')] dst = os.path.sep.join(folders) if fname: dst = os.path.join(dst, fname) else: if fname: dst = fname else: dst = path.split(os.path.sep)[-1] if dst[0] == os.path.sep: dst = dst[1:] return dst def _substitute(self, pathstr): pathstr_ = pathstr if isdefined(self.inputs.substitutions): for key, val in self.inputs.substitutions: oldpathstr = pathstr pathstr = pathstr.replace(key, val) if pathstr != oldpathstr: iflogger.debug('sub.str: %s -> %s using %r -> %r' % (oldpathstr, pathstr, key, val)) if isdefined(self.inputs.regexp_substitutions): for key, val in self.inputs.regexp_substitutions: oldpathstr = pathstr pathstr, _ = re.subn(key, val, pathstr) if pathstr != oldpathstr: iflogger.debug('sub.regexp: %s -> %s using %r -> %r' % (oldpathstr, pathstr, key, val)) if pathstr_ != pathstr: iflogger.info('sub: %s -> %s' % (pathstr_, pathstr)) return pathstr def _list_outputs(self): """Execute this module. """ outputs = self.output_spec().get() out_files = [] outdir = self.inputs.base_directory if not isdefined(outdir): outdir = '.' outdir = os.path.abspath(outdir) if isdefined(self.inputs.container): outdir = os.path.join(outdir, self.inputs.container) if not os.path.exists(outdir): try: os.makedirs(outdir) except OSError, inst: if 'File exists' in inst: pass else: raise(inst) for key, files in self.inputs._outputs.items(): if not isdefined(files): continue iflogger.debug("key: %s files: %s" % (key, str(files))) files = filename_to_list(files) tempoutdir = outdir for d in key.split('.'): if d[0] == '@': continue tempoutdir = os.path.join(tempoutdir, d) # flattening list if isinstance(files, list): if isinstance(files[0], list): files = [item for sublist in files for item in sublist] for src in filename_to_list(files): src = os.path.abspath(src) if os.path.isfile(src): dst = self._get_dst(src) dst = os.path.join(tempoutdir, dst) dst = self._substitute(dst) path, _ = os.path.split(dst) if not os.path.exists(path): try: os.makedirs(path) except OSError, inst: if 'File exists' in inst: pass else: raise(inst) iflogger.debug("copyfile: %s %s" % (src, dst)) copyfile(src, dst, copy=True, hashmethod='content') out_files.append(dst) elif os.path.isdir(src): dst = self._get_dst(os.path.join(src, '')) dst = os.path.join(tempoutdir, dst) dst = self._substitute(dst) path, _ = os.path.split(dst) if not os.path.exists(path): try: os.makedirs(path) except OSError, inst: if 'File exists' in inst: pass else: raise(inst) if os.path.exists(dst) and self.inputs.remove_dest_dir: iflogger.debug("removing: %s" % dst) shutil.rmtree(dst) iflogger.debug("copydir: %s %s" % (src, dst)) copytree(src, dst) out_files.append(dst) outputs['out_file'] = out_files return outputs class DataGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): # InterfaceInputSpec): base_directory = Directory(exists=True, desc='Path to the base directory consisting of subject data.') raise_on_empty = traits.Bool(True, usedefault=True, desc='Generate exception if list is empty for a given field') sort_filelist = traits.Bool(mandatory=True, desc='Sort the filelist that matches the template') template = traits.Str(mandatory=True, desc='Layout used to get files. relative to base directory if defined') template_args = traits.Dict(key_trait=traits.Str, value_trait=traits.List(traits.List), desc='Information to plug into template') class DataGrabber(IOBase): """ Generic datagrabber module that wraps around glob in an intelligent way for neuroimaging tasks to grab files .. attention:: Doesn't support directories currently Examples -------- >>> from nipype.interfaces.io import DataGrabber Pick all files from current directory >>> dg = DataGrabber() >>> dg.inputs.template = '*' Pick file foo/foo.nii from current directory >>> dg.inputs.template = '%s/%s.dcm' >>> dg.inputs.template_args['outfiles']=[['dicomdir','123456-1-1.dcm']] Same thing but with dynamically created fields >>> dg = DataGrabber(infields=['arg1','arg2']) >>> dg.inputs.template = '%s/%s.nii' >>> dg.inputs.arg1 = 'foo' >>> dg.inputs.arg2 = 'foo' however this latter form can be used with iterables and iterfield in a pipeline. Dynamically created, user-defined input and output fields >>> dg = DataGrabber(infields=['sid'], outfields=['func','struct','ref']) >>> dg.inputs.base_directory = '.' >>> dg.inputs.template = '%s/%s.nii' >>> dg.inputs.template_args['func'] = [['sid',['f3','f5']]] >>> dg.inputs.template_args['struct'] = [['sid',['struct']]] >>> dg.inputs.template_args['ref'] = [['sid','ref']] >>> dg.inputs.sid = 's1' Change the template only for output field struct. The rest use the general template >>> dg.inputs.field_template = dict(struct='%s/struct.nii') >>> dg.inputs.template_args['struct'] = [['sid']] """ input_spec = DataGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = True def __init__(self, infields=None, outfields=None, **kwargs): """ Parameters ---------- infields : list of str Indicates the input fields to be dynamically created outfields: list of str Indicates output fields to be dynamically created See class examples for usage """ if not outfields: outfields = ['outfiles'] super(DataGrabber, self).__init__(**kwargs) undefined_traits = {} # used for mandatory inputs check self._infields = infields self._outfields = outfields if infields: for key in infields: self.inputs.add_trait(key, traits.Any) undefined_traits[key] = Undefined # add ability to insert field specific templates self.inputs.add_trait('field_template', traits.Dict(traits.Enum(outfields), desc="arguments that fit into template")) undefined_traits['field_template'] = Undefined if not isdefined(self.inputs.template_args): self.inputs.template_args = {} for key in outfields: if not key in self.inputs.template_args: if infields: self.inputs.template_args[key] = [infields] else: self.inputs.template_args[key] = [] self.inputs.trait_set(trait_change_notify=False, **undefined_traits) def _add_output_traits(self, base): """ Using traits.Any instead out OutputMultiPath till add_trait bug is fixed. """ return add_traits(base, self.inputs.template_args.keys()) def _list_outputs(self): # infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically # hence manual check if self._infields: for key in self._infields: value = getattr(self.inputs, key) if not isdefined(value): msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \ (self.__class__.__name__, key) raise ValueError(msg) outputs = {} for key, args in self.inputs.template_args.items(): outputs[key] = [] template = self.inputs.template if hasattr(self.inputs, 'field_template') and \ isdefined(self.inputs.field_template) and \ key in self.inputs.field_template: template = self.inputs.field_template[key] if isdefined(self.inputs.base_directory): template = os.path.join( os.path.abspath(self.inputs.base_directory), template) else: template = os.path.abspath(template) if not args: filelist = glob.glob(template) if len(filelist) == 0: msg = 'Output key: %s Template: %s returned no files' % ( key, template) if self.inputs.raise_on_empty: raise IOError(msg) else: warn(msg) else: if self.inputs.sort_filelist: filelist = human_order_sorted(filelist) outputs[key] = list_to_filename(filelist) for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: if isinstance(arg, str) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): raise ValueError('incompatible number of arguments for %s' % key) if len(arg) > maxlen: maxlen = len(arg) outfiles = [] for i in range(maxlen): argtuple = [] for arg in arglist: if isinstance(arg, str) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) else: argtuple.append(arg) filledtemplate = template if argtuple: try: filledtemplate = template % tuple(argtuple) except TypeError as e: raise TypeError(e.message + ": Template %s failed to convert with args %s" % (template, str(tuple(argtuple)))) outfiles = glob.glob(filledtemplate) if len(outfiles) == 0: msg = 'Output key: %s Template: %s returned no files' % (key, filledtemplate) if self.inputs.raise_on_empty: raise IOError(msg) else: warn(msg) outputs[key].append(None) else: if self.inputs.sort_filelist: outfiles = human_order_sorted(outfiles) outputs[key].append(list_to_filename(outfiles)) if any([val is None for val in outputs[key]]): outputs[key] = [] if len(outputs[key]) == 0: outputs[key] = None elif len(outputs[key]) == 1: outputs[key] = outputs[key][0] return outputs class SelectFilesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): base_directory = Directory(exists=True, desc="Root path common to templates.") sort_filelist = traits.Bool(True, usedefault=True, desc="When matching mutliple files, return them in sorted order.") raise_on_empty = traits.Bool(True, usedefault=True, desc="Raise an exception if a template pattern matches no files.") force_lists = traits.Either(traits.Bool(), traits.List(traits.Str()), default=False, usedefault=True, desc=("Whether to return outputs as a list even when only one file " "matches the template. Either a boolean that applies to all " "output fields or a list of output field names to coerce to " " a list")) class SelectFiles(IOBase): """Flexibly collect data from disk to feed into workflows. This interface uses the {}-based string formatting syntax to plug values (possibly known only at workflow execution time) into string templates and collect files from persistant storage. These templates can also be combined with glob wildcards. The field names in the formatting template (i.e. the terms in braces) will become inputs fields on the interface, and the keys in the templates dictionary will form the output fields. Examples -------- >>> from nipype import SelectFiles, Node >>> templates={"T1": "{subject_id}/struct/T1.nii", ... "epi": "{subject_id}/func/f[0, 1].nii"} >>> dg = Node(SelectFiles(templates), "selectfiles") >>> dg.inputs.subject_id = "subj1" >>> dg.outputs.get() {'T1': , 'epi': } The same thing with dynamic grabbing of specific files: >>> templates["epi"] = "{subject_id}/func/f{run!s}.nii" >>> dg = Node(SelectFiles(templates), "selectfiles") >>> dg.inputs.subject_id = "subj1" >>> dg.inputs.run = [2, 4] """ input_spec = SelectFilesInputSpec output_spec = DynamicTraitedSpec _always_run = True def __init__(self, templates, **kwargs): """Create an instance with specific input fields. Parameters ---------- templates : dictionary Mapping from string keys to string template values. The keys become output fields on the interface. The templates should use {}-formatting syntax, where the names in curly braces become inputs fields on the interface. Format strings can also use glob wildcards to match multiple files. At runtime, the values of the interface inputs will be plugged into these templates, and the resulting strings will be used to select files. """ super(SelectFiles, self).__init__(**kwargs) # Infer the infields and outfields from the template infields = [] for name, template in templates.iteritems(): for _, field_name, _, _ in string.Formatter().parse(template): if field_name is not None and field_name not in infields: infields.append(field_name) self._infields = infields self._outfields = list(templates) self._templates = templates # Add the dynamic input fields undefined_traits = {} for field in infields: self.inputs.add_trait(field, traits.Any) undefined_traits[field] = Undefined self.inputs.trait_set(trait_change_notify=False, **undefined_traits) def _add_output_traits(self, base): """Add the dynamic output fields""" return add_traits(base, self._templates.keys()) def _list_outputs(self): """Find the files and expose them as interface outputs.""" outputs = {} info = dict([(k, v) for k, v in self.inputs.__dict__.items() if k in self._infields]) force_lists = self.inputs.force_lists if isinstance(force_lists, bool): force_lists = self._outfields if force_lists else [] bad_fields = set(force_lists) - set(self._outfields) if bad_fields: bad_fields = ", ".join(list(bad_fields)) plural = "s" if len(bad_fields) > 1 else "" verb = "were" if len(bad_fields) > 1 else "was" msg = ("The field%s '%s' %s set in 'force_lists' and not in " "'templates'.") % (plural, bad_fields, verb) raise ValueError(msg) for field, template in self._templates.iteritems(): # Build the full template path if isdefined(self.inputs.base_directory): template = op.abspath(op.join( self.inputs.base_directory, template)) else: template = op.abspath(template) # Fill in the template and glob for files filled_template = template.format(**info) filelist = glob.glob(filled_template) # Handle the case where nothing matched if not filelist: msg = "No files were found matching %s template: %s" % ( field, template) if self.inputs.raise_on_empty: raise IOError(msg) else: warn(msg) # Possibly sort the list if self.inputs.sort_filelist: filelist = human_order_sorted(filelist) # Handle whether this must be a list or not if field not in force_lists: filelist = list_to_filename(filelist) outputs[field] = filelist return outputs class DataFinderInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): root_paths = traits.Either(traits.List(), traits.Str(), mandatory=True,) match_regex = traits.Str('(.+)', usedefault=True, desc=("Regular expression for matching " "paths.")) ignore_regexes = traits.List(desc=("List of regular expressions, " "if any match the path it will be " "ignored.") ) max_depth = traits.Int(desc="The maximum depth to search beneath " "the root_paths") min_depth = traits.Int(desc="The minimum depth to search beneath " "the root paths") unpack_single = traits.Bool(False, usedefault=True, desc="Unpack single results from list") class DataFinder(IOBase): """Search for paths that match a given regular expression. Allows a less proscriptive approach to gathering input files compared to DataGrabber. Will recursively search any subdirectories by default. This can be limited with the min/max depth options. Matched paths are available in the output 'out_paths'. Any named groups of captured text from the regular expression are also available as ouputs of the same name. Examples -------- >>> from nipype.interfaces.io import DataFinder >>> df = DataFinder() >>> df.inputs.root_paths = '.' >>> df.inputs.match_regex = '.+/(?P.+(qT1|ep2d_fid_T1).+)/(?P.+)\.nii.gz' >>> result = df.run() # doctest: +SKIP >>> print result.outputs.out_paths # doctest: +SKIP ['./027-ep2d_fid_T1_Gd4/acquisition.nii.gz', './018-ep2d_fid_T1_Gd2/acquisition.nii.gz', './016-ep2d_fid_T1_Gd1/acquisition.nii.gz', './013-ep2d_fid_T1_pre/acquisition.nii.gz'] >>> print result.outputs.series_dir # doctest: +SKIP ['027-ep2d_fid_T1_Gd4', '018-ep2d_fid_T1_Gd2', '016-ep2d_fid_T1_Gd1', '013-ep2d_fid_T1_pre'] >>> print result.outputs.basename # doctest: +SKIP ['acquisition', 'acquisition', 'acquisition', 'acquisition'] """ input_spec = DataFinderInputSpec output_spec = DynamicTraitedSpec _always_run = True def _match_path(self, target_path): #Check if we should ignore the path for ignore_re in self.ignore_regexes: if ignore_re.search(target_path): return #Check if we can match the path match = self.match_regex.search(target_path) if not match is None: match_dict = match.groupdict() if self.result is None: self.result = {'out_paths': []} for key in match_dict.keys(): self.result[key] = [] self.result['out_paths'].append(target_path) for key, val in match_dict.iteritems(): self.result[key].append(val) def _run_interface(self, runtime): #Prepare some of the inputs if isinstance(self.inputs.root_paths, str): self.inputs.root_paths = [self.inputs.root_paths] self.match_regex = re.compile(self.inputs.match_regex) if self.inputs.max_depth is Undefined: max_depth = None else: max_depth = self.inputs.max_depth if self.inputs.min_depth is Undefined: min_depth = 0 else: min_depth = self.inputs.min_depth if self.inputs.ignore_regexes is Undefined: self.ignore_regexes = [] else: self.ignore_regexes = \ [re.compile(regex) for regex in self.inputs.ignore_regexes] self.result = None for root_path in self.inputs.root_paths: #Handle tilda/env variables and remove extra seperators root_path = os.path.normpath(os.path.expandvars(os.path.expanduser(root_path))) #Check if the root_path is a file if os.path.isfile(root_path): if min_depth == 0: self._match_path(root_path) continue #Walk through directory structure checking paths for curr_dir, sub_dirs, files in os.walk(root_path): #Determine the current depth from the root_path curr_depth = (curr_dir.count(os.sep) - root_path.count(os.sep)) #If the max path depth has been reached, clear sub_dirs #and files if max_depth is not None and curr_depth >= max_depth: sub_dirs[:] = [] files = [] #Test the path for the curr_dir and all files if curr_depth >= min_depth: self._match_path(curr_dir) if curr_depth >= (min_depth - 1): for infile in files: full_path = os.path.join(curr_dir, infile) self._match_path(full_path) if (self.inputs.unpack_single and len(self.result['out_paths']) == 1 ): for key, vals in self.result.iteritems(): self.result[key] = vals[0] else: #sort all keys acording to out_paths for key in self.result.keys(): if key == "out_paths": continue sort_tuples = human_order_sorted(zip(self.result["out_paths"], self.result[key])) self.result[key] = [x for (_, x) in sort_tuples] self.result["out_paths"] = human_order_sorted(self.result["out_paths"]) if not self.result: raise RuntimeError("Regular expression did not match any files!") return runtime def _list_outputs(self): outputs = self._outputs().get() outputs.update(self.result) return outputs class FSSourceInputSpec(BaseInterfaceInputSpec): subjects_dir = Directory(mandatory=True, desc='Freesurfer subjects directory.') subject_id = traits.Str(mandatory=True, desc='Subject name for whom to retrieve data') hemi = traits.Enum('both', 'lh', 'rh', usedefault=True, desc='Selects hemisphere specific outputs') class FSSourceOutputSpec(TraitedSpec): T1 = File( exists=True, desc='Intensity normalized whole-head volume', loc='mri') aseg = File( exists=True, desc='Volumetric map of regions from automatic segmentation', loc='mri') brain = File( exists=True, desc='Intensity normalized brain-only volume', loc='mri') brainmask = File( exists=True, desc='Skull-stripped (brain-only) volume', loc='mri') filled = File(exists=True, desc='Subcortical mass volume', loc='mri') norm = File( exists=True, desc='Normalized skull-stripped volume', loc='mri') nu = File(exists=True, desc='Non-uniformity corrected whole-head volume', loc='mri') orig = File(exists=True, desc='Base image conformed to Freesurfer space', loc='mri') rawavg = File(exists=True, desc='Volume formed by averaging input images', loc='mri') ribbon = OutputMultiPath( File(exists=True), desc='Volumetric maps of cortical ribbons', loc='mri', altkey='*ribbon') wm = File(exists=True, desc='Segmented white-matter volume', loc='mri') wmparc = File( exists=True, desc='Aparc parcellation projected into subcortical white matter', loc='mri') curv = OutputMultiPath(File(exists=True), desc='Maps of surface curvature', loc='surf') inflated = OutputMultiPath( File(exists=True), desc='Inflated surface meshes', loc='surf') pial = OutputMultiPath( File(exists=True), desc='Gray matter/pia mater surface meshes', loc='surf') smoothwm = OutputMultiPath(File(exists=True), loc='surf', desc='Smoothed original surface meshes') sphere = OutputMultiPath( File(exists=True), desc='Spherical surface meshes', loc='surf') sulc = OutputMultiPath( File(exists=True), desc='Surface maps of sulcal depth', loc='surf') thickness = OutputMultiPath(File(exists=True), loc='surf', desc='Surface maps of cortical thickness') volume = OutputMultiPath( File(exists=True), desc='Surface maps of cortical volume', loc='surf') white = OutputMultiPath( File(exists=True), desc='White/gray matter surface meshes', loc='surf') label = OutputMultiPath( File(exists=True), desc='Volume and surface label files', loc='label', altkey='*label') annot = OutputMultiPath(File(exists=True), desc='Surface annotation files', loc='label', altkey='*annot') aparc_aseg = OutputMultiPath( File(exists=True), loc='mri', altkey='aparc*aseg', desc='Aparc parcellation projected into aseg volume') sphere_reg = OutputMultiPath( File(exists=True), loc='surf', altkey='sphere.reg', desc='Spherical registration file') aseg_stats = OutputMultiPath(File(exists=True), loc='stats', altkey='aseg', desc='Automated segmentation statistics file') wmparc_stats = OutputMultiPath( File(exists=True), loc='stats', altkey='wmparc', desc='White matter parcellation statistics file') aparc_stats = OutputMultiPath( File(exists=True), loc='stats', altkey='aparc', desc='Aparc parcellation statistics files') BA_stats = OutputMultiPath(File(exists=True), loc='stats', altkey='BA', desc='Brodmann Area statistics files') aparc_a2009s_stats = OutputMultiPath( File(exists=True), loc='stats', altkey='aparc.a2009s', desc='Aparc a2009s parcellation statistics files') curv_stats = OutputMultiPath(File(exists=True), loc='stats', altkey='curv', desc='Curvature statistics files') entorhinal_exvivo_stats = OutputMultiPath( File(exists=True), loc='stats', altkey='entorhinal_exvivo', desc='Entorhinal exvivo statistics files') class FreeSurferSource(IOBase): """Generates freesurfer subject info from their directories Examples -------- >>> from nipype.interfaces.io import FreeSurferSource >>> fs = FreeSurferSource() >>> #fs.inputs.subjects_dir = '.' >>> fs.inputs.subject_id = 'PWS04' >>> res = fs.run() # doctest: +SKIP >>> fs.inputs.hemi = 'lh' >>> res = fs.run() # doctest: +SKIP """ input_spec = FSSourceInputSpec output_spec = FSSourceOutputSpec _always_run = True _additional_metadata = ['loc', 'altkey'] def _get_files(self, path, key, dirval, altkey=None): globsuffix = '' if dirval == 'mri': globsuffix = '.mgz' elif dirval == 'stats': globsuffix = '.stats' globprefix = '' if key == 'ribbon' or dirval in ['surf', 'label', 'stats']: if self.inputs.hemi != 'both': globprefix = self.inputs.hemi + '.' else: globprefix = '*' if key == 'aseg_stats' or key == 'wmparc_stats': globprefix = '' keydir = os.path.join(path, dirval) if altkey: key = altkey globpattern = os.path.join( keydir, ''.join((globprefix, key, globsuffix))) return glob.glob(globpattern) def _list_outputs(self): subjects_dir = self.inputs.subjects_dir subject_path = os.path.join(subjects_dir, self.inputs.subject_id) output_traits = self._outputs() outputs = output_traits.get() for k in outputs.keys(): val = self._get_files(subject_path, k, output_traits.traits()[k].loc, output_traits.traits()[k].altkey) if val: outputs[k] = list_to_filename(val) return outputs class XNATSourceInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): query_template = traits.Str( mandatory=True, desc=('Layout used to get files. Relative to base ' 'directory if defined') ) query_template_args = traits.Dict( traits.Str, traits.List(traits.List), value=dict(outfiles=[]), usedefault=True, desc='Information to plug into template' ) server = traits.Str( mandatory=True, requires=['user', 'pwd'], xor=['config'] ) user = traits.Str() pwd = traits.Password() config = File(mandatory=True, xor=['server']) cache_dir = Directory(desc='Cache directory') class XNATSource(IOBase): """ Generic XNATSource module that wraps around the pyxnat module in an intelligent way for neuroimaging tasks to grab files and data from an XNAT server. Examples -------- >>> from nipype.interfaces.io import XNATSource Pick all files from current directory >>> dg = XNATSource() >>> dg.inputs.template = '*' >>> dg = XNATSource(infields=['project','subject','experiment','assessor','inout']) >>> dg.inputs.query_template = '/projects/%s/subjects/%s/experiments/%s' \ '/assessors/%s/%s_resources/files' >>> dg.inputs.project = 'IMAGEN' >>> dg.inputs.subject = 'IMAGEN_000000001274' >>> dg.inputs.experiment = '*SessionA*' >>> dg.inputs.assessor = '*ADNI_MPRAGE_nii' >>> dg.inputs.inout = 'out' >>> dg = XNATSource(infields=['sid'],outfields=['struct','func']) >>> dg.inputs.query_template = '/projects/IMAGEN/subjects/%s/experiments/*SessionA*' \ '/assessors/*%s_nii/out_resources/files' >>> dg.inputs.query_template_args['struct'] = [['sid','ADNI_MPRAGE']] >>> dg.inputs.query_template_args['func'] = [['sid','EPI_faces']] >>> dg.inputs.sid = 'IMAGEN_000000001274' """ input_spec = XNATSourceInputSpec output_spec = DynamicTraitedSpec def __init__(self, infields=None, outfields=None, **kwargs): """ Parameters ---------- infields : list of str Indicates the input fields to be dynamically created outfields: list of str Indicates output fields to be dynamically created See class examples for usage """ super(XNATSource, self).__init__(**kwargs) undefined_traits = {} # used for mandatory inputs check self._infields = infields if infields: for key in infields: self.inputs.add_trait(key, traits.Any) undefined_traits[key] = Undefined self.inputs.query_template_args['outfiles'] = [infields] if outfields: # add ability to insert field specific templates self.inputs.add_trait( 'field_template', traits.Dict(traits.Enum(outfields), desc="arguments that fit into query_template") ) undefined_traits['field_template'] = Undefined #self.inputs.remove_trait('query_template_args') outdict = {} for key in outfields: outdict[key] = [] self.inputs.query_template_args = outdict self.inputs.trait_set(trait_change_notify=False, **undefined_traits) def _add_output_traits(self, base): """ Using traits.Any instead out OutputMultiPath till add_trait bug is fixed. """ return add_traits(base, self.inputs.query_template_args.keys()) def _list_outputs(self): # infields are mandatory, however I could not figure out # how to set 'mandatory' flag dynamically, hence manual check cache_dir = self.inputs.cache_dir or tempfile.gettempdir() if self.inputs.config: xnat = pyxnat.Interface(config=self.inputs.config) else: xnat = pyxnat.Interface(self.inputs.server, self.inputs.user, self.inputs.pwd, cache_dir ) if self._infields: for key in self._infields: value = getattr(self.inputs, key) if not isdefined(value): msg = ("%s requires a value for input '%s' " "because it was listed in 'infields'" % (self.__class__.__name__, key) ) raise ValueError(msg) outputs = {} for key, args in self.inputs.query_template_args.items(): outputs[key] = [] template = self.inputs.query_template if hasattr(self.inputs, 'field_template') and \ isdefined(self.inputs.field_template) and \ key in self.inputs.field_template: template = self.inputs.field_template[key] if not args: file_objects = xnat.select(template).get('obj') if file_objects == []: raise IOError('Template %s returned no files' % template ) outputs[key] = list_to_filename( [str(file_object.get()) for file_object in file_objects if file_object.exists() ]) for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: if isinstance(arg, str) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): raise ValueError('incompatible number ' 'of arguments for %s' % key ) if len(arg) > maxlen: maxlen = len(arg) outfiles = [] for i in range(maxlen): argtuple = [] for arg in arglist: if isinstance(arg, str) and \ hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) else: argtuple.append(arg) if argtuple: target = template % tuple(argtuple) file_objects = xnat.select(target).get('obj') if file_objects == []: raise IOError('Template %s ' 'returned no files' % target ) outfiles = list_to_filename( [str(file_object.get()) for file_object in file_objects if file_object.exists() ] ) else: file_objects = xnat.select(template).get('obj') if file_objects == []: raise IOError('Template %s ' 'returned no files' % template ) outfiles = list_to_filename( [str(file_object.get()) for file_object in file_objects if file_object.exists() ] ) outputs[key].insert(i, outfiles) if len(outputs[key]) == 0: outputs[key] = None elif len(outputs[key]) == 1: outputs[key] = outputs[key][0] return outputs class XNATSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): _outputs = traits.Dict(traits.Str, value={}, usedefault=True) server = traits.Str(mandatory=True, requires=['user', 'pwd'], xor=['config'] ) user = traits.Str() pwd = traits.Password() config = File(mandatory=True, xor=['server']) cache_dir = Directory(desc='') project_id = traits.Str( desc='Project in which to store the outputs', mandatory=True) subject_id = traits.Str( desc='Set to subject id', mandatory=True) experiment_id = traits.Str( desc='Set to workflow name', mandatory=True) assessor_id = traits.Str( desc=('Option to customize ouputs representation in XNAT - ' 'assessor level will be used with specified id'), xor=['reconstruction_id'] ) reconstruction_id = traits.Str( desc=('Option to customize ouputs representation in XNAT - ' 'reconstruction level will be used with specified id'), xor=['assessor_id'] ) share = traits.Bool(False, desc=('Option to share the subjects from the original project' 'instead of creating new ones when possible - the created ' 'experiments are then shared back to the original project' ), usedefault=True) def __setattr__(self, key, value): if key not in self.copyable_trait_names(): self._outputs[key] = value else: super(XNATSinkInputSpec, self).__setattr__(key, value) class XNATSink(IOBase): """ Generic datasink module that takes a directory containing a list of nifti files and provides a set of structured output fields. """ input_spec = XNATSinkInputSpec def _list_outputs(self): """Execute this module. """ # setup XNAT connection cache_dir = self.inputs.cache_dir or tempfile.gettempdir() if self.inputs.config: xnat = pyxnat.Interface(config=self.inputs.config) else: xnat = pyxnat.Interface(self.inputs.server, self.inputs.user, self.inputs.pwd, cache_dir ) # if possible share the subject from the original project if self.inputs.share: subject_id = self.inputs.subject_id result = xnat.select( 'xnat:subjectData', ['xnat:subjectData/PROJECT', 'xnat:subjectData/SUBJECT_ID'] ).where('xnat:subjectData/SUBJECT_ID = %s AND' % subject_id) # subject containing raw data exists on the server if (result.data and isinstance(result.data[0], dict)): result = result.data[0] shared = xnat.select('/project/%s/subject/%s' % (self.inputs.project_id, self.inputs.subject_id ) ) if not shared.exists(): # subject not in share project share_project = xnat.select( '/project/%s' % self.inputs.project_id) if not share_project.exists(): # check project exists share_project.insert() subject = xnat.select('/project/%(project)s' '/subject/%(subject_id)s' % result ) subject.share(str(self.inputs.project_id)) # setup XNAT resource uri_template_args = dict( project_id=quote_id(self.inputs.project_id), subject_id=self.inputs.subject_id, experiment_id=quote_id(self.inputs.experiment_id)) if self.inputs.share: uri_template_args['original_project'] = result['project'] if self.inputs.assessor_id: uri_template_args['assessor_id'] = quote_id(self.inputs.assessor_id) elif self.inputs.reconstruction_id: uri_template_args['reconstruction_id'] = quote_id(self.inputs.reconstruction_id) # gather outputs and upload them for key, files in self.inputs._outputs.items(): for name in filename_to_list(files): if isinstance(name, list): for i, file_name in enumerate(name): push_file(self, xnat, file_name, '%s_' % i + key, uri_template_args ) else: push_file(self, xnat, name, key, uri_template_args) def quote_id(string): return str(string).replace('_', '---') def unquote_id(string): return str(string).replace('---', '_') def push_file(self, xnat, file_name, out_key, uri_template_args): # grab info from output file names val_list = [unquote_id(val) for part in os.path.split(file_name)[0].split(os.sep) for val in part.split('_')[1:] if part.startswith('_') and len(part.split('_')) % 2 ] keymap = dict(zip(val_list[1::2], val_list[2::2])) _label = [] for key, val in sorted(keymap.items()): if str(self.inputs.subject_id) not in val: _label.extend([key, val]) # select and define container level uri_template_args['container_type'] = None for container in ['assessor_id', 'reconstruction_id']: if getattr(self.inputs, container): uri_template_args['container_type'] = container.split('_id')[0] uri_template_args['container_id'] = uri_template_args[container] if uri_template_args['container_type'] is None: uri_template_args['container_type'] = 'reconstruction' uri_template_args['container_id'] = unquote_id( uri_template_args['experiment_id'] ) if _label: uri_template_args['container_id'] += ( '_results_%s' % '_'.join(_label) ) else: uri_template_args['container_id'] += '_results' # define resource level uri_template_args['resource_label'] = ( '%s_%s' % (uri_template_args['container_id'], out_key.split('.')[0] ) ) # define file level uri_template_args['file_name'] = os.path.split( os.path.abspath(unquote_id(file_name)))[1] uri_template = ( '/project/%(project_id)s/subject/%(subject_id)s' '/experiment/%(experiment_id)s/%(container_type)s/%(container_id)s' '/out/resource/%(resource_label)s/file/%(file_name)s' ) # unquote values before uploading for key in uri_template_args.keys(): uri_template_args[key] = unquote_id(uri_template_args[key]) # upload file remote_file = xnat.select(uri_template % uri_template_args) remote_file.insert(file_name, experiments='xnat:imageSessionData', use_label=True ) # shares the experiment back to the original project if relevant if 'original_project' in uri_template_args: experiment_template = ( '/project/%(original_project)s' '/subject/%(subject_id)s/experiment/%(experiment_id)s' ) xnat.select(experiment_template % uri_template_args ).share(uri_template_args['original_project']) def capture_provenance(): pass def push_provenance(): pass class SQLiteSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): database_file = File(exists=True, mandatory=True) table_name = traits.Str(mandatory=True) class SQLiteSink(IOBase): """ Very simple frontend for storing values into SQLite database. .. warning:: This is not a thread-safe node because it can write to a common shared location. It will not complain when it overwrites a file. Examples -------- >>> sql = SQLiteSink(input_names=['subject_id', 'some_measurement']) >>> sql.inputs.database_file = 'my_database.db' >>> sql.inputs.table_name = 'experiment_results' >>> sql.inputs.subject_id = 's1' >>> sql.inputs.some_measurement = 11.4 >>> sql.run() # doctest: +SKIP """ input_spec = SQLiteSinkInputSpec def __init__(self, input_names, **inputs): super(SQLiteSink, self).__init__(**inputs) self._input_names = filename_to_list(input_names) add_traits(self.inputs, [name for name in self._input_names]) def _list_outputs(self): """Execute this module. """ conn = sqlite3.connect(self.inputs.database_file, check_same_thread=False) c = conn.cursor() c.execute("INSERT OR REPLACE INTO %s (" % self.inputs.table_name + ",".join(self._input_names) + ") VALUES (" + ",".join(["?"] * len(self._input_names)) + ")", [getattr(self.inputs, name) for name in self._input_names]) conn.commit() c.close() return None class MySQLSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): host = traits.Str('localhost', mandatory=True, requires=['username', 'password'], xor=['config'], usedefault=True) config = File(mandatory=True, xor=['host'], desc="MySQL Options File (same format as my.cnf)") database_name = traits.Str( mandatory=True, desc='Otherwise known as the schema name') table_name = traits.Str(mandatory=True) username = traits.Str() password = traits.Str() class MySQLSink(IOBase): """ Very simple frontend for storing values into MySQL database. Examples -------- >>> sql = MySQLSink(input_names=['subject_id', 'some_measurement']) >>> sql.inputs.database_name = 'my_database' >>> sql.inputs.table_name = 'experiment_results' >>> sql.inputs.username = 'root' >>> sql.inputs.password = 'secret' >>> sql.inputs.subject_id = 's1' >>> sql.inputs.some_measurement = 11.4 >>> sql.run() # doctest: +SKIP """ input_spec = MySQLSinkInputSpec def __init__(self, input_names, **inputs): super(MySQLSink, self).__init__(**inputs) self._input_names = filename_to_list(input_names) add_traits(self.inputs, [name for name in self._input_names]) def _list_outputs(self): """Execute this module. """ import MySQLdb if isdefined(self.inputs.config): conn = MySQLdb.connect(db=self.inputs.database_name, read_default_file=self.inputs.config) else: conn = MySQLdb.connect(host=self.inputs.host, user=self.inputs.username, passwd=self.inputs.password, db=self.inputs.database_name) c = conn.cursor() c.execute("REPLACE INTO %s (" % self.inputs.table_name + ",".join(self._input_names) + ") VALUES (" + ",".join(["%s"] * len(self._input_names)) + ")", [getattr(self.inputs, name) for name in self._input_names]) conn.commit() c.close() return None nipype-0.9.2/nipype/interfaces/matlab.py000066400000000000000000000171741227300005300202710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ General matlab interface code """ import os from nipype.interfaces.base import (CommandLineInputSpec, InputMultiPath, isdefined, CommandLine, traits, File, Directory) from .. import config def get_matlab_command(): if 'NIPYPE_NO_MATLAB' in os.environ: return None try: matlab_cmd = os.environ['MATLABCMD'] except: matlab_cmd = 'matlab' try: res = CommandLine(command='which', args=matlab_cmd, terminal_output='allatonce').run() matlab_path = res.runtime.stdout.strip() except Exception, e: return None return matlab_cmd no_matlab = get_matlab_command() is None class MatlabInputSpec(CommandLineInputSpec): """ Basic expected inputs to Matlab interface """ script = traits.Str(argstr='-r \"%s;exit\"', desc='m-code to run', mandatory=True, position=-1) uses_mcr = traits.Bool(desc='use MCR interface', xor=['nodesktop', 'nosplash', 'single_comp_thread'], nohash=True) nodesktop = traits.Bool(True, argstr='-nodesktop', usedefault=True, desc='Switch off desktop mode on unix platforms', nohash=True) nosplash = traits.Bool(True, argstr='-nosplash', usedefault=True, desc='Switch of splash screen', nohash=True) logfile = File(argstr='-logfile %s', desc='Save matlab output to log') single_comp_thread = traits.Bool(argstr="-singleCompThread", desc="force single threaded operation", nohash=True) # non-commandline options mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True) script_file = File('pyscript.m', usedefault=True, desc='Name of file to write m-code to') paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath') prescript = traits.List(["ver,","try,"], usedefault=True, desc='prescript to be added before code') postscript = traits.List(["\n,catch ME,", "fprintf(2,'MATLAB code threw an exception:\\n');", "fprintf(2,'%s\\n',ME.message);", "if length(ME.stack) ~= 0, fprintf(2,'File:%s\\nName:%s\\nLine:%d\\n',ME.stack.file,ME.stack.name,ME.stack.line);, end;", "end;"], desc='script added after code', usedefault = True) class MatlabCommand(CommandLine): """Interface that runs matlab code >>> import nipype.interfaces.matlab as matlab >>> mlab = matlab.MatlabCommand() >>> mlab.inputs.script = "which('who')" >>> out = mlab.run() # doctest: +SKIP """ _cmd = 'matlab' _default_matlab_cmd = None _default_mfile = None _default_paths = None input_spec = MatlabInputSpec def __init__(self, matlab_cmd = None, **inputs): """initializes interface to matlab (default 'matlab -nodesktop -nosplash') """ super(MatlabCommand,self).__init__(**inputs) if matlab_cmd and isdefined(matlab_cmd): self._cmd = matlab_cmd elif self._default_matlab_cmd: self._cmd = self._default_matlab_cmd if self._default_mfile and not isdefined(self.inputs.mfile): self.inputs.mfile = self._default_mfile if self._default_paths and not isdefined(self.inputs.paths): self.inputs.paths = self._default_paths if not isdefined(self.inputs.single_comp_thread) and \ not isdefined(self.inputs.uses_mcr): if config.getboolean('execution','single_thread_matlab'): self.inputs.single_comp_thread = True # For matlab commands force all output to be returned since matlab # does not have a clean way of notifying an error self.inputs.terminal_output = 'allatonce' @classmethod def set_default_matlab_cmd(cls, matlab_cmd): """Set the default MATLAB command line for MATLAB classes. This method is used to set values for all MATLAB subclasses. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.matlab_cmd. """ cls._default_matlab_cmd = matlab_cmd @classmethod def set_default_mfile(cls, mfile): """Set the default MATLAB script file format for MATLAB classes. This method is used to set values for all MATLAB subclasses. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.mfile. """ cls._default_mfile = mfile @classmethod def set_default_paths(cls, paths): """Set the default MATLAB paths for MATLAB classes. This method is used to set values for all MATLAB subclasses. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.paths. """ cls._default_paths = paths def _run_interface(self,runtime): self.inputs.terminal_output = 'allatonce' runtime = super(MatlabCommand, self)._run_interface(runtime) try: # Matlab can leave the terminal in a barbbled state os.system('stty sane') except: # We might be on a system where stty doesn't exist pass if 'MATLAB code threw an exception' in runtime.stderr: self.raise_exception(runtime) return runtime def _format_arg(self, name, trait_spec, value): if name in ['script']: argstr = trait_spec.argstr if self.inputs.uses_mcr: argstr='%s' return self._gen_matlab_command(argstr, value) return super(MatlabCommand, self)._format_arg(name, trait_spec, value) def _gen_matlab_command(self, argstr, script_lines): cwd = os.getcwd() mfile = self.inputs.mfile or self.inputs.uses_mcr paths = [] if isdefined(self.inputs.paths): paths = self.inputs.paths # prescript prescript = self.inputs.prescript postscript = self.inputs.postscript #postcript takes different default value depending on the mfile argument if mfile: prescript.insert(0,"fprintf(1,'Executing %s at %s:\\n',mfilename,datestr(now));") else: prescript.insert(0,"fprintf(1,'Executing code at %s:\\n',datestr(now));") for path in paths: prescript.append("addpath('%s');\n" % path) if not mfile: #clean up the code of comments and replace newlines with commas script_lines = ','.join([line for line in script_lines.split("\n") if not line.strip().startswith("%")]) script_lines = '\n'.join(prescript)+script_lines+'\n'.join(postscript) if mfile: mfile = file(os.path.join(cwd,self.inputs.script_file), 'wt') mfile.write(script_lines) mfile.close() if self.inputs.uses_mcr: script = '%s' % (os.path.join(cwd,self.inputs.script_file)) else: script = "addpath('%s');%s" % (cwd, self.inputs.script_file.split('.')[0]) else: script = ''.join(script_lines.split('\n')) return argstr % script nipype-0.9.2/nipype/interfaces/meshfix.py000066400000000000000000000166651227300005300205000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Fixes meshes: Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import (CommandLine, CommandLineInputSpec, traits, TraitedSpec, isdefined, File) import os, os.path as op from nipype.utils.filemanip import split_filename class MeshFixInputSpec(CommandLineInputSpec): number_of_biggest_shells = traits.Int(argstr='--shells %d', desc="Only the N biggest shells are kept") epsilon_angle = traits.Range(argstr='-a %f', low=0.0, high=2.0, desc="Epsilon angle in degrees (must be between 0 and 2)") join_overlapping_largest_components = traits.Bool(argstr='-j', xor=['join_closest_components'], desc='Join 2 biggest components if they overlap, remove the rest.') join_closest_components = traits.Bool(argstr='-jc', xor=['join_closest_components'], desc='Join the closest pair of components.') quiet_mode = traits.Bool(argstr='-q', desc="Quiet mode, don't write much to stdout.") dont_clean = traits.Bool(argstr='--no-clean', desc="Don't Clean") save_as_stl = traits.Bool(xor= ['save_as_vmrl', 'save_as_freesurfer_mesh'], argstr='--stl', desc="Result is saved in stereolithographic format (.stl)") save_as_vmrl = traits.Bool(argstr='--wrl', xor= ['save_as_stl', 'save_as_freesurfer_mesh'], desc="Result is saved in VRML1.0 format (.wrl)") save_as_freesurfer_mesh = traits.Bool(argstr='--fsmesh', xor= ['save_as_vrml', 'save_as_stl'], desc="Result is saved in freesurfer mesh format") remove_handles = traits.Bool(argstr='--remove-handles', desc="Remove handles") uniform_remeshing_steps = traits.Int(argstr='-u %d', requires=['uniform_remeshing_vertices'], desc="Number of steps for uniform remeshing of the whole mesh") uniform_remeshing_vertices = traits.Int(argstr='--vertices %d', requires=['uniform_remeshing_steps'], desc="Constrains the number of vertices." \ "Must be used with uniform_remeshing_steps") laplacian_smoothing_steps = traits.Int(argstr='--smooth %d', desc="The number of laplacian smoothing steps to apply") x_shift = traits.Int(argstr='--smooth %d', desc="Shifts the coordinates of the vertices when saving. Output must be in FreeSurfer format") # Cutting, decoupling, dilation cut_outer = traits.Int(argstr='--cut-outer %d', desc="Remove triangles of 1st that are outside of the 2nd shell.") cut_inner = traits.Int(argstr='--cut-inner %d', desc="Remove triangles of 1st that are inside of the 2nd shell. Dilate 2nd by N; Fill holes and keep only 1st afterwards.") decouple_inin = traits.Int(argstr='--decouple-inin %d', desc="Treat 1st file as inner, 2nd file as outer component." \ "Resolve overlaps by moving inners triangles inwards. Constrain the min distance between the components > d.") decouple_outin = traits.Int(argstr='--decouple-outin %d', desc="Treat 1st file as outer, 2nd file as inner component." \ "Resolve overlaps by moving outers triangles inwards. Constrain the min distance between the components > d.") decouple_outout = traits.Int(argstr='--decouple-outout %d', desc="Treat 1st file as outer, 2nd file as inner component." \ "Resolve overlaps by moving outers triangles outwards. Constrain the min distance between the components > d.") finetuning_inwards = traits.Bool(argstr='--fineTuneIn ', requires=['finetuning_distance', 'finetuning_substeps']) finetuning_outwards = traits.Bool(argstr='--fineTuneIn ', requires=['finetuning_distance', 'finetuning_substeps'], xor=['finetuning_inwards'], desc = 'Similar to finetuning_inwards, but ensures minimal distance in the other direction') finetuning_distance = traits.Float(argstr='%f', requires=['finetuning_substeps'], desc="Used to fine-tune the minimal distance between surfaces." \ "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)") finetuning_substeps = traits.Int(argstr='%d', requires=['finetuning_distance'], desc="Used to fine-tune the minimal distance between surfaces." \ "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)") dilation = traits.Int(argstr='--dilate %d', desc="Dilate the surface by d. d < 0 means shrinking.") set_intersections_to_one = traits.Bool(argstr='--intersect', desc="If the mesh contains intersections, return value = 1." \ "If saved in gmsh format, intersections will be highlighted.") in_file1 = File(exists=True, argstr="%s", position=1, mandatory=True) in_file2 = File(exists=True, argstr="%s", position=2) output_type = traits.Enum('off', ['stl', 'msh', 'wrl', 'vrml', 'fs', 'off'], usedefault=True, desc='The output type to save the file as.') out_filename = File(genfile=True, argstr="-o %s", desc='The output filename for the fixed mesh file') class MeshFixOutputSpec(TraitedSpec): mesh_file = File(exists=True, desc='The output mesh file') class MeshFix(CommandLine): """ MeshFix v1.2-alpha - by Marco Attene, Mirko Windhoff, Axel Thielscher. .. seealso:: http://jmeshlib.sourceforge.net Sourceforge page http://simnibs.de/installation/meshfixandgetfem Ubuntu installation instructions If MeshFix is used for research purposes, please cite the following paper: M. Attene - A lightweight approach to repairing digitized polygon meshes. The Visual Computer, 2010. (c) Springer. Accepted input formats are OFF, PLY and STL. Other formats (like .msh for gmsh) are supported only partially. Example ------- >>> import nipype.interfaces.meshfix as mf >>> fix = mf.MeshFix() >>> fix.inputs.in_file1 = 'lh-pial.stl' >>> fix.inputs.in_file2 = 'rh-pial.stl' >>> fix.run() # doctest: +SKIP """ _cmd = 'meshfix' input_spec=MeshFixInputSpec output_spec=MeshFixOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_filename): path, name, ext = split_filename(self.inputs.out_filename) ext = ext.replace('.', '') out_types = ['stl', 'msh', 'wrl', 'vrml', 'fs', 'off'] # Make sure that the output filename uses one of the possible file types if any(ext == out_type.lower() for out_type in out_types): outputs['mesh_file'] = op.abspath(self.inputs.out_filename) else: outputs['mesh_file'] = op.abspath(name + '.' + self.inputs.output_type) else: outputs['mesh_file'] = op.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file1) if self.inputs.save_as_freesurfer_mesh or self.inputs.output_type == 'fs': self.inputs.output_type = 'fs' self.inputs.save_as_freesurfer_mesh = True if self.inputs.save_as_stl or self.inputs.output_type == 'stl': self.inputs.output_type = 'stl' self.inputs.save_as_stl = True if self.inputs.save_as_vmrl or self.inputs.output_type == 'vmrl': self.inputs.output_type = 'vmrl' self.inputs.save_as_vmrl = True return name + '_fixed.' + self.inputs.output_type nipype-0.9.2/nipype/interfaces/mne/000077500000000000000000000000001227300005300172245ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/mne/__init__.py000066400000000000000000000000641227300005300213350ustar00rootroot00000000000000from nipype.interfaces.mne.base import WatershedBEM nipype-0.9.2/nipype/interfaces/mne/base.py000066400000000000000000000105101227300005300205050ustar00rootroot00000000000000from nipype.interfaces.base import (traits, File, Directory, TraitedSpec, OutputMultiPath) import os.path as op import glob from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec from nipype.utils.filemanip import list_to_filename import logging logging.basicConfig() iflogger = logging.getLogger('interface') class WatershedBEMInputSpec(FSTraitedSpec): subject_id = traits.Str(argstr='--subject %s', mandatory=True, desc='Subject ID (must have a complete Freesurfer directory)') subjects_dir = Directory(exists=True, mandatory=True, usedefault=True, desc='Path to Freesurfer subjects directory') volume = traits.Enum('T1', 'aparc+aseg', 'aseg', 'brain', 'orig', 'brainmask', 'ribbon', argstr='--volume %s', usedefault=True, desc='The volume from the "mri" directory to use (defaults to T1)') overwrite = traits.Bool(True, usedefault=True, argstr='--overwrite', desc='Overwrites the existing files') atlas_mode = traits.Bool(argstr='--atlas', desc='Use atlas mode for registration (default: no rigid alignment)') class WatershedBEMOutputSpec(TraitedSpec): mesh_files = OutputMultiPath(File(exists=True), desc=('Paths to the output meshes (brain, inner ' 'skull, outer skull, outer skin)')) brain_surface = File(exists=True, loc='bem/watershed', desc='Brain surface (in Freesurfer format)') inner_skull_surface = File(exists=True, loc='bem/watershed', desc='Inner skull surface (in Freesurfer format)') outer_skull_surface = File(exists=True, loc='bem/watershed', desc='Outer skull surface (in Freesurfer format)') outer_skin_surface = File(exists=True, loc='bem/watershed', desc='Outer skin surface (in Freesurfer format)') fif_file = File(exists=True, loc='bem', altkey='fif', desc='"fif" format file for EEG processing in MNE') cor_files = OutputMultiPath(File(exists=True), loc='bem/watershed/ws', altkey='COR', desc='"COR" format files') class WatershedBEM(FSCommand): """Uses mne_watershed_bem to get information from dicom directories Examples -------- >>> from nipype.interfaces.mne import WatershedBEM >>> bem = WatershedBEM() >>> bem.inputs.subject_id = 'subj1' >>> bem.inputs.subjects_dir = '.' >>> bem.cmdline 'mne_watershed_bem --overwrite --subject subj1 --volume T1' >>> bem.run() # doctest: +SKIP """ _cmd = 'mne_watershed_bem' input_spec = WatershedBEMInputSpec output_spec = WatershedBEMOutputSpec _additional_metadata = ['loc', 'altkey'] def _get_files(self, path, key, dirval, altkey=None): globsuffix = '*' globprefix = '*' keydir = op.join(path, dirval) if altkey: key = altkey globpattern = op.join(keydir, ''.join((globprefix, key, globsuffix))) return glob.glob(globpattern) def _list_outputs(self): outputs = self.output_spec().get() subjects_dir = self.inputs.subjects_dir subject_path = op.join(subjects_dir, self.inputs.subject_id) output_traits = self._outputs() mesh_paths = [] for k in outputs.keys(): if k != 'mesh_files': val = self._get_files(subject_path, k, output_traits.traits()[k].loc, output_traits.traits()[k].altkey) if val: value_list = list_to_filename(val) if isinstance(value_list, list): out_files = [] for value in value_list: out_files.append(op.abspath(value)) elif isinstance(value_list, str): out_files = op.abspath(value_list) else: raise TypeError outputs[k] = out_files if not k.rfind('surface') == -1: mesh_paths.append(out_files) outputs['mesh_files'] = mesh_paths return outputs nipype-0.9.2/nipype/interfaces/mne/setup.py000066400000000000000000000007101227300005300207340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('mne', parent_package, top_path) #config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/mne/tests/000077500000000000000000000000001227300005300203665ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py000066400000000000000000000030671227300005300253070ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mne.base import WatershedBEM def test_WatershedBEM_inputs(): input_map = dict(args=dict(argstr='%s', ), atlas_mode=dict(argstr='--atlas', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), overwrite=dict(argstr='--overwrite', usedefault=True, ), subject_id=dict(argstr='--subject %s', mandatory=True, ), subjects_dir=dict(mandatory=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), volume=dict(argstr='--volume %s', usedefault=True, ), ) inputs = WatershedBEM.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_WatershedBEM_outputs(): output_map = dict(brain_surface=dict(loc='bem/watershed', ), cor_files=dict(altkey='COR', loc='bem/watershed/ws', ), fif_file=dict(altkey='fif', loc='bem', ), inner_skull_surface=dict(loc='bem/watershed', ), mesh_files=dict(), outer_skin_surface=dict(loc='bem/watershed', ), outer_skull_surface=dict(loc='bem/watershed', ), ) outputs = WatershedBEM.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/000077500000000000000000000000001227300005300177725ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/mrtrix/__init__.py000066400000000000000000000016131227300005300221040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .tracking import (Tracks2Prob, StreamlineTrack, DiffusionTensorStreamlineTrack, SphericallyDeconvolutedStreamlineTrack, ProbabilisticSphericallyDeconvolutedStreamlineTrack) from .tensors import (FSL2MRTrix, ConstrainedSphericalDeconvolution, DWI2SphericalHarmonicsImage, EstimateResponseForSH, GenerateDirections, FindShPeaks, Directions2Amplitude) from .preprocess import (MRConvert, MRMultiply, MRTrixViewer, MRTrixInfo, GenerateWhiteMatterMask, DWI2Tensor, Tensor2ApparentDiffusion, Tensor2FractionalAnisotropy, Tensor2Vector, MedianFilter3D, Erode, Threshold) from .convert import MRTrix2TrackVisnipype-0.9.2/nipype/interfaces/mrtrix/convert.py000066400000000000000000000234651227300005300220360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ # -*- coding: utf-8 -*- import os.path as op import nibabel as nb, nibabel.trackvis as trk import numpy as np from nibabel.trackvis import HeaderError from nibabel.volumeutils import native_code from ..base import (TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, isdefined, traits) from ...utils.filemanip import split_filename from ...utils.misc import package_check from ...workflows.misc.utils import get_data_dims, get_vox_dims import warnings have_dipy = True try: package_check('dipy') except Exception, e: False else: from dipy.tracking.utils import move_streamlines, affine_from_fsl_mat_file from nibabel.orientations import aff2axcodes from ... import logging iflogger = logging.getLogger('interface') def transform_to_affine(streams, header, affine): rotation, scale = np.linalg.qr(affine) streams = move_streamlines(streams, rotation) scale[0:3,0:3] = np.dot(scale[0:3,0:3], np.diag(1./header['voxel_size'])) scale[0:3,3] = abs(scale[0:3,3]) streams = move_streamlines(streams, scale) return streams def read_mrtrix_tracks(in_file, as_generator=True): header = read_mrtrix_header(in_file) streamlines = read_mrtrix_streamlines(in_file, header, as_generator) return header, streamlines def read_mrtrix_header(in_file): fileobj = open(in_file,'r') header = {} iflogger.info('Reading header data...') for line in fileobj: if line == 'END\n': iflogger.info('Reached the end of the header!') break elif ': ' in line: line = line.replace('\n','') line = line.replace("'","") key = line.split(': ')[0] value = line.split(': ')[1] header[key] = value iflogger.info('...adding "{v}" to header for key "{k}"'.format(v=value,k=key)) fileobj.close() header['count'] = int(header['count'].replace('\n','')) header['offset'] = int(header['file'].replace('.','')) return header def read_mrtrix_streamlines(in_file, header, as_generator=True): offset = header['offset'] stream_count = header['count'] fileobj = open(in_file,'r') fileobj.seek(offset) endianness = native_code f4dt = np.dtype(endianness + 'f4') pt_cols = 3 bytesize = pt_cols*4 def points_per_track(offset): n_streams = 0 n_points = 0 track_points = [] iflogger.info('Identifying the number of points per tract...') all_str = fileobj.read() num_triplets = len(all_str)/bytesize pts = np.ndarray(shape=(num_triplets,pt_cols), dtype='f4',buffer=all_str) nonfinite_list = np.where(np.isfinite(pts[:,2]) == False) nonfinite_list = list(nonfinite_list[0])[0:-1] # Converts numpy array to list, removes the last value nonfinite_list_bytes = [offset+x*bytesize for x in nonfinite_list] for idx, value in enumerate(nonfinite_list): if idx == 0: track_points.append(nonfinite_list[idx]) else: track_points.append(nonfinite_list[idx]-nonfinite_list[idx-1]-1) return track_points, nonfinite_list def track_gen(track_points): n_streams = 0 iflogger.info('Reading tracks...') while True: n_pts = track_points[n_streams] pts_str = fileobj.read(n_pts * bytesize) nan_str = fileobj.read(bytesize) if len(pts_str) < (n_pts * bytesize): if not n_streams == stream_count: raise HeaderError( 'Expecting %s points, found only %s' % ( stream_count, n_streams)) iflogger.error('Expecting %s points, found only %s' % ( stream_count, n_streams)) break pts = np.ndarray( shape = (n_pts, pt_cols), dtype = f4dt, buffer = pts_str) nan_pt = np.ndarray( shape = (1, pt_cols), dtype = f4dt, buffer = nan_str) if np.isfinite(nan_pt[0][0]): raise ValueError break xyz = pts[:,:3] yield xyz n_streams += 1 if n_streams == stream_count: iflogger.info('100% : {n} tracks read'.format(n=n_streams)) raise StopIteration if n_streams % (float(stream_count)/100) == 0: percent = int(float(n_streams)/float(stream_count)*100) iflogger.info('{p}% : {n} tracks read'.format(p=percent, n=n_streams)) track_points, nonfinite_list = points_per_track(offset) fileobj.seek(offset) streamlines = track_gen(track_points) if not as_generator: streamlines = list(streamlines) return streamlines class MRTrix2TrackVisInputSpec(TraitedSpec): in_file = File(exists=True, mandatory=True, desc='The input file for the tracks in MRTrix (.tck) format') image_file = File(exists=True, desc='The image the tracks were generated from') matrix_file = File(exists=True, desc='A transformation matrix to apply to the tracts after they have been generated (from FLIRT - affine transformation from image_file to registration_image_file)') registration_image_file = File(exists=True, desc='The final image the tracks should be registered to.') out_filename = File('converted.trk', genfile=True, usedefault=True, desc='The output filename for the tracks in TrackVis (.trk) format') class MRTrix2TrackVisOutputSpec(TraitedSpec): out_file = File(exists=True) class MRTrix2TrackVis(BaseInterface): """ Converts MRtrix (.tck) tract files into TrackVis (.trk) format using functions from dipy Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tck2trk = mrt.MRTrix2TrackVis() >>> tck2trk.inputs.in_file = 'dwi_CSD_tracked.tck' >>> tck2trk.inputs.image_file = 'diffusion.nii' >>> tck2trk.run() # doctest: +SKIP """ input_spec = MRTrix2TrackVisInputSpec output_spec = MRTrix2TrackVisOutputSpec def _run_interface(self, runtime): dx, dy, dz = get_data_dims(self.inputs.image_file) vx, vy, vz = get_vox_dims(self.inputs.image_file) image_file = nb.load(self.inputs.image_file) affine = image_file.get_affine() out_filename = op.abspath(self.inputs.out_filename) #Reads MRTrix tracks header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True) iflogger.info('MRTrix Header:') iflogger.info(header) # Writes to Trackvis trk_header = nb.trackvis.empty_header() trk_header['dim'] = [dx,dy,dz] trk_header['voxel_size'] = [vx,vy,vz] trk_header['n_count'] = header['count'] if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file): iflogger.info('Applying transformation from matrix file {m}'.format(m=self.inputs.matrix_file)) xfm = np.genfromtxt(self.inputs.matrix_file) iflogger.info(xfm) registration_image_file = nb.load(self.inputs.registration_image_file) reg_affine = registration_image_file.get_affine() r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file) r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file) iflogger.info('Using affine from registration image file {r}'.format(r=self.inputs.registration_image_file)) iflogger.info(reg_affine) trk_header['vox_to_ras'] = reg_affine trk_header['dim'] = [r_dx,r_dy,r_dz] trk_header['voxel_size'] = [r_vx,r_vy,r_vz] affine = np.dot(affine,np.diag(1./np.array([vx, vy, vz, 1]))) transformed_streamlines = transform_to_affine(streamlines, trk_header, affine) aff = affine_from_fsl_mat_file(xfm, [vx,vy,vz], [r_vx,r_vy,r_vz]) iflogger.info(aff) axcode = aff2axcodes(reg_affine) trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2] final_streamlines = move_streamlines(transformed_streamlines, aff) trk_tracks = ((ii,None,None) for ii in final_streamlines) trk.write(out_filename, trk_tracks, trk_header) iflogger.info('Saving transformed Trackvis file as {out}'.format(out=out_filename)) iflogger.info('New TrackVis Header:') iflogger.info(trk_header) else: iflogger.info('Applying transformation from scanner coordinates to {img}'.format(img=self.inputs.image_file)) axcode = aff2axcodes(affine) trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2] trk_header['vox_to_ras'] = affine transformed_streamlines = transform_to_affine(streamlines, trk_header, affine) trk_tracks = ((ii,None,None) for ii in transformed_streamlines) trk.write(out_filename, trk_tracks, trk_header) iflogger.info('Saving Trackvis file as {out}'.format(out=out_filename)) iflogger.info('TrackVis Header:') iflogger.info(trk_header) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = op.abspath(self.inputs.out_filename) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '.trk' nipype-0.9.2/nipype/interfaces/mrtrix/defhdr.mat000066400000000000000000000010251227300005300217270ustar00rootroot00000000000000MATLAB 5.0 MAT-file, Platform: GLNXA64, Created on: Mon Jul 18 07:53:57 2011 IMx햽N0m*RՁ&V0bj-vdvx q[׹P'˗M; UmTJħ?!Gw *= j\21S9Ne1V ]_uHZiYI+a?RP J'DuJE2bb QR"Wj9aȬ62FܽA A)f7wP}U 0.( zFV2p_ZH7W | w06cE8%|A<#۹5.[u5r}h;޷-{}w݈[ = 'ߗ-{|~a} Gknipype-0.9.2/nipype/interfaces/mrtrix/preprocess.py000066400000000000000000000644021227300005300225370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File, InputMultiPath, isdefined from nipype.utils.filemanip import split_filename import os.path as op class MRConvertInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='voxel-order data filename') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output filename') extract_at_axis = traits.Enum(1,2,3, argstr='-coord %s', position=1, desc='"Extract data only at the coordinates specified. This option specifies the Axis. Must be used in conjunction with extract_at_coordinate.') extract_at_coordinate = traits.List(traits.Float, argstr='%s', sep=',', position=2, minlen=1, maxlen=3, desc='"Extract data only at the coordinates specified. This option specifies the coordinates. Must be used in conjunction with extract_at_axis. Three comma-separated numbers giving the size of each voxel in mm.') voxel_dims = traits.List(traits.Float, argstr='-vox %s', sep=',', position=3, minlen=3, maxlen=3, desc='Three comma-separated numbers giving the size of each voxel in mm.') output_datatype = traits.Enum("nii", "float", "char", "short", "int", "long", "double", argstr='-output %s', position=2, desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"') #, usedefault=True) extension = traits.Enum("mif","nii", "float", "char", "short", "int", "long", "double", position=2, desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', usedefault=True) layout = traits.Enum("nii", "float", "char", "short", "int", "long", "double", argstr='-output %s', position=2, desc='specify the layout of the data in memory. The actual layout produced will depend on whether the output image format can support it.') resample = traits.Float(argstr='-scale %d', position=3, units='mm', desc='Apply scaling to the intensity values.') offset_bias = traits.Float(argstr='-scale %d', position=3, units='mm', desc='Apply offset to the intensity values.') replace_NaN_with_zero = traits.Bool(argstr='-zero', position=3, desc="Replace all NaN values with zero.") prs = traits.Bool(argstr='-prs', position=3, desc="Assume that the DW gradients are specified in the PRS frame (Siemens DICOM only).") class MRConvertOutputSpec(TraitedSpec): converted = File(exists=True, desc='path/name of 4D volume in voxel order') class MRConvert(CommandLine): """ Perform conversion between different file types and optionally extract a subset of the input image. If used correctly, this program can be a very useful workhorse. In addition to converting images between different formats, it can be used to extract specific studies from a data set, extract a specific region of interest, flip the images, or to scale the intensity of the images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> mrconvert = mrt.MRConvert() >>> mrconvert.inputs.in_file = 'dwi_FA.mif' >>> mrconvert.inputs.out_filename = 'dwi_FA.nii' >>> mrconvert.run() # doctest: +SKIP """ _cmd = 'mrconvert' input_spec=MRConvertInputSpec output_spec=MRConvertOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['converted'] = self.inputs.out_filename if not isdefined(outputs['converted']): outputs['converted'] = op.abspath(self._gen_outfilename()) else: outputs['converted'] = op.abspath(outputs['converted']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) if isdefined(self.inputs.out_filename): outname = self.inputs.out_filename else: outname = name + '_mrconvert.' + self.inputs.extension return outname class DWI2TensorInputSpec(CommandLineInputSpec): in_file = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Diffusion-weighted images') out_filename = File(name_template="%s_tensor.mif", name_source="in_file", output_name="tensor", argstr='%s', desc='Output tensor filename', position=-1) encoding_file = File(argstr='-grad %s', position=2, desc=('Encoding file supplied as a 4xN text file with ' 'each line is in the format [ X Y Z b ], where ' '[ X Y Z ] describe the direction of the applied ' 'gradient, and b gives the b-value in units ' '(1000 s/mm^2). See FSL2MRTrix()')) ignore_slice_by_volume = traits.List(traits.Int, argstr='-ignoreslices %s', sep=' ', position=2, minlen=2, maxlen=2, desc=('Requires two values (i.e. [34 ' '1] for [Slice Volume] Ignores ' 'the image slices specified ' 'when computing the tensor. ' 'Slice here means the z ' 'coordinate of the slice to be ' 'ignored.')) ignore_volumes = traits.List(traits.Int, argstr='-ignorevolumes %s', sep=' ', position=2, minlen=1, desc=('Requires two values (i.e. [2 5 6] for ' '[Volumes] Ignores the image volumes ' 'specified when computing the tensor.')) quiet = traits.Bool(argstr='-quiet', position=1, desc=("Do not display information messages or progress " "status.")) debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class DWI2TensorOutputSpec(TraitedSpec): tensor = File(exists=True, desc='path/name of output diffusion tensor image') class DWI2Tensor(CommandLine): """ Converts diffusion-weighted images to tensor images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> dwi2tensor = mrt.DWI2Tensor() >>> dwi2tensor.inputs.in_file = 'dwi.mif' >>> dwi2tensor.inputs.encoding_file = 'encoding.txt' >>> dwi2tensor.cmdline 'dwi2tensor -grad encoding.txt dwi.mif dwi_tensor.mif' >>> dwi2tensor.run() # doctest: +SKIP """ _cmd = 'dwi2tensor' input_spec=DWI2TensorInputSpec output_spec=DWI2TensorOutputSpec class Tensor2VectorInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Diffusion tensor image') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output vector filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class Tensor2VectorOutputSpec(TraitedSpec): vector = File(exists=True, desc='the output image of the major eigenvectors of the diffusion tensor image.') class Tensor2Vector(CommandLine): """ Generates a map of the major eigenvectors of the tensors in each voxel. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tensor2vector = mrt.Tensor2Vector() >>> tensor2vector.inputs.in_file = 'dwi_tensor.mif' >>> tensor2vector.run() # doctest: +SKIP """ _cmd = 'tensor2vector' input_spec=Tensor2VectorInputSpec output_spec=Tensor2VectorOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['vector'] = self.inputs.out_filename if not isdefined(outputs['vector']): outputs['vector'] = op.abspath(self._gen_outfilename()) else: outputs['vector'] = op.abspath(outputs['vector']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_vector.mif' class Tensor2FractionalAnisotropyInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Diffusion tensor image') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output Fractional Anisotropy filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class Tensor2FractionalAnisotropyOutputSpec(TraitedSpec): FA = File(exists=True, desc='the output image of the major eigenvectors of the diffusion tensor image.') class Tensor2FractionalAnisotropy(CommandLine): """ Generates a map of the fractional anisotropy in each voxel. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tensor2FA = mrt.Tensor2FractionalAnisotropy() >>> tensor2FA.inputs.in_file = 'dwi_tensor.mif' >>> tensor2FA.run() # doctest: +SKIP """ _cmd = 'tensor2FA' input_spec=Tensor2FractionalAnisotropyInputSpec output_spec=Tensor2FractionalAnisotropyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['FA'] = self.inputs.out_filename if not isdefined(outputs['FA']): outputs['FA'] = op.abspath(self._gen_outfilename()) else: outputs['FA'] = op.abspath(outputs['FA']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_FA.mif' class Tensor2ApparentDiffusionInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Diffusion tensor image') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output Fractional Anisotropy filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class Tensor2ApparentDiffusionOutputSpec(TraitedSpec): ADC = File(exists=True, desc='the output image of the major eigenvectors of the diffusion tensor image.') class Tensor2ApparentDiffusion(CommandLine): """ Generates a map of the apparent diffusion coefficient (ADC) in each voxel Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tensor2ADC = mrt.Tensor2ApparentDiffusion() >>> tensor2ADC.inputs.in_file = 'dwi_tensor.mif' >>> tensor2ADC.run() # doctest: +SKIP """ _cmd = 'tensor2ADC' input_spec=Tensor2ApparentDiffusionInputSpec output_spec=Tensor2ApparentDiffusionOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['ADC'] = self.inputs.out_filename if not isdefined(outputs['ADC']): outputs['ADC'] = op.abspath(self._gen_outfilename()) else: outputs['ADC'] = op.abspath(outputs['ADC']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_ADC.mif' class MRMultiplyInputSpec(CommandLineInputSpec): in_files = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Input images to be multiplied') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MRMultiplyOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image of the multiplication') class MRMultiply(CommandLine): """ Multiplies two images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> MRmult = mrt.MRMultiply() >>> MRmult.inputs.in_files = ['dwi.mif', 'dwi_WMProb.mif'] >>> MRmult.run() # doctest: +SKIP """ _cmd = 'mrmult' input_spec=MRMultiplyInputSpec output_spec=MRMultiplyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_files[0]) return name + '_MRMult.mif' class MRTrixViewerInputSpec(CommandLineInputSpec): in_files = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Input images to be viewed') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MRTrixViewerOutputSpec(TraitedSpec): pass class MRTrixViewer(CommandLine): """ Loads the input images in the MRTrix Viewer. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> MRview = mrt.MRTrixViewer() >>> MRview.inputs.in_files = 'dwi.mif' >>> MRview.run() # doctest: +SKIP """ _cmd = 'mrview' input_spec=MRTrixViewerInputSpec output_spec=MRTrixViewerOutputSpec def _list_outputs(self): return class MRTrixInfoInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Input images to be read') class MRTrixInfoOutputSpec(TraitedSpec): pass class MRTrixInfo(CommandLine): """ Prints out relevant header information found in the image specified. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> MRinfo = mrt.MRTrixInfo() >>> MRinfo.inputs.in_file = 'dwi.mif' >>> MRinfo.run() # doctest: +SKIP """ _cmd = 'mrinfo' input_spec=MRTrixInfoInputSpec output_spec=MRTrixInfoOutputSpec def _list_outputs(self): return class GenerateWhiteMatterMaskInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-3, desc='Diffusion-weighted images') binary_mask = File(exists=True, argstr='%s', mandatory=True, position = -2, desc='Binary brain mask') out_WMProb_filename = File(genfile=True, argstr='%s', position = -1, desc='Output WM probability image filename') encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=1, desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix') noise_level_margin = traits.Float(argstr='-margin %s', desc='Specify the width of the margin on either side of the image to be used to estimate the noise level (default = 10)') class GenerateWhiteMatterMaskOutputSpec(TraitedSpec): WMprobabilitymap = File(exists=True, desc='WMprobabilitymap') class GenerateWhiteMatterMask(CommandLine): """ Generates a white matter probability mask from the DW images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> genWM = mrt.GenerateWhiteMatterMask() >>> genWM.inputs.in_file = 'dwi.mif' >>> genWM.inputs.encoding_file = 'encoding.txt' >>> genWM.run() # doctest: +SKIP """ _cmd = 'gen_WM_mask' input_spec=GenerateWhiteMatterMaskInputSpec output_spec=GenerateWhiteMatterMaskOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['WMprobabilitymap'] = op.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_WMProb_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_WMProb.mif' class ErodeInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Input mask image to be eroded') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image filename') number_of_passes = traits.Int(argstr='-npass %s', desc='the number of passes (default: 1)') dilate = traits.Bool(argstr='-dilate', position=1, desc="Perform dilation rather than erosion") quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class ErodeOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image') class Erode(CommandLine): """ Erode (or dilates) a mask (i.e. binary) image Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> erode = mrt.Erode() >>> erode.inputs.in_file = 'mask.mif' >>> erode.run() # doctest: +SKIP """ _cmd = 'erode' input_spec=ErodeInputSpec output_spec=ErodeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_erode.mif' class ThresholdInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='The input image to be thresholded') out_filename = File(genfile=True, argstr='%s', position=-1, desc='The output binary image mask.') absolute_threshold_value = traits.Float(argstr='-abs %s', desc='Specify threshold value as absolute intensity.') percentage_threshold_value = traits.Float(argstr='-percent %s', desc='Specify threshold value as a percentage of the peak intensity in the input image.') invert = traits.Bool(argstr='-invert', position=1, desc="Invert output binary mask") replace_zeros_with_NaN = traits.Bool(argstr='-nan', position=1, desc="Replace all zero values with NaN") quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class ThresholdOutputSpec(TraitedSpec): out_file = File(exists=True, desc='The output binary image mask.') class Threshold(CommandLine): """ Create bitwise image by thresholding image intensity. By default, the threshold level is determined using a histogram analysis to cut out the background. Otherwise, the threshold intensity can be specified using command line options. Note that only the first study is used for thresholding. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> thresh = mrt.Threshold() >>> thresh.inputs.in_file = 'wm_mask.mif' >>> thresh.run() # doctest: +SKIP """ _cmd = 'threshold' input_spec=ThresholdInputSpec output_spec=ThresholdOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_thresh.mif' class MedianFilter3DInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Input images to be smoothed') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MedianFilter3DOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image') class MedianFilter3D(CommandLine): """ Smooth images using a 3x3x3 median filter. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> median3d = mrt.MedianFilter3D() >>> median3d.inputs.in_file = 'mask.mif' >>> median3d.run() # doctest: +SKIP """ _cmd = 'median3D' input_spec=MedianFilter3DInputSpec output_spec=MedianFilter3DOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_median3D.mif' class MRTransformInputSpec(CommandLineInputSpec): in_files = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Input images to be transformed') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image') invert = traits.Bool(argstr='-inverse', position=1, desc="Invert the specified transform before using it") replace_transform = traits.Bool(argstr='-replace', position=1, desc="replace the current transform by that specified, rather than applying it to the current transform") transformation_file = File(exists=True, argstr='-transform %s', position=1, desc='The transform to apply, in the form of a 4x4 ascii file.') template_image = File(exists=True, argstr='-template %s', position=1, desc='Reslice the input image to match the specified template image.') reference_image = File(exists=True, argstr='-reference %s', position=1, desc='in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.') flip_x = traits.Bool(argstr='-flipx', position=1, desc="assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option.") quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MRTransformOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image of the transformation') class MRTransform(CommandLine): """ Apply spatial transformations or reslice images Example ------- >>> MRxform = MRTransform() >>> MRxform.inputs.in_files = 'anat_coreg.mif' >>> MRxform.run() # doctest: +SKIP """ _cmd = 'mrtransform' input_spec=MRTransformInputSpec output_spec=MRTransformOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_files[0]) return name + '_MRTransform.mif' nipype-0.9.2/nipype/interfaces/mrtrix/setup.py000066400000000000000000000006511227300005300215060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('mrtrix', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/mrtrix/tensors.py000066400000000000000000000511271227300005300220470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import (CommandLineInputSpec, CommandLine, BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, Directory, InputMultiPath, OutputMultiPath, isdefined) from nipype.utils.filemanip import split_filename import os.path as op import numpy as np from ... import logging iflogger = logging.getLogger('interface') class DWI2SphericalHarmonicsImageInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Diffusion-weighted images') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output filename') encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=1, desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix') maximum_harmonic_order = traits.Float(argstr='-lmax %s', desc='set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.') normalise = traits.Bool(argstr='-normalise', position=3, desc="normalise the DW signal to the b=0 image") class DWI2SphericalHarmonicsImageOutputSpec(TraitedSpec): spherical_harmonics_image = File(exists=True, desc='Spherical harmonics image') class DWI2SphericalHarmonicsImage(CommandLine): """ Convert base diffusion-weighted images to their spherical harmonic representation. This program outputs the spherical harmonic decomposition for the set measured signal attenuations. The signal attenuations are calculated by identifying the b-zero images from the diffusion encoding supplied (i.e. those with zero as the b-value), and dividing the remaining signals by the mean b-zero signal intensity. The spherical harmonic decomposition is then calculated by least-squares linear fitting. Note that this program makes use of implied symmetries in the diffusion profile. First, the fact the signal attenuation profile is real implies that it has conjugate symmetry, i.e. Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the diffusion profile should be antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l components should be zero. Therefore, this program only computes the even elements. Note that the spherical harmonics equations used here differ slightly from those conventionally used, in that the (-1)^m factor has been omitted. This should be taken into account in all subsequent calculations. Each volume in the output image corresponds to a different spherical harmonic component, according to the following convention: * [0] Y(0,0) * [1] Im {Y(2,2)} * [2] Im {Y(2,1)} * [3] Y(2,0) * [4] Re {Y(2,1)} * [5] Re {Y(2,2)} * [6] Im {Y(4,4)} * [7] Im {Y(4,3)} Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> dwi2SH = mrt.DWI2SphericalHarmonicsImage() >>> dwi2SH.inputs.in_file = 'diffusion.nii' >>> dwi2SH.inputs.encoding_file = 'encoding.txt' >>> dwi2SH.run() # doctest: +SKIP """ _cmd = 'dwi2SH' input_spec=DWI2SphericalHarmonicsImageInputSpec output_spec=DWI2SphericalHarmonicsImageOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['spherical_harmonics_image'] = self.inputs.out_filename if not isdefined(outputs['spherical_harmonics_image']): outputs['spherical_harmonics_image'] = op.abspath(self._gen_outfilename()) else: outputs['spherical_harmonics_image'] = op.abspath(outputs['spherical_harmonics_image']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_SH.mif' class ConstrainedSphericalDeconvolutionInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-3, desc='diffusion-weighted image') response_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the diffusion-weighted signal response function for a single fibre population (see EstimateResponse)') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output filename') mask_image = File(exists=True, argstr='-mask %s', position=2, desc='only perform computation within the specified binary brain mask image') encoding_file = File(exists=True, argstr='-grad %s', position=1, desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix') filter_file = File(exists=True, argstr='-filter %s', position=-2, desc='a text file containing the filtering coefficients for each even harmonic order.' \ 'the linear frequency filtering parameters used for the initial linear spherical deconvolution step (default = [ 1 1 1 0 0 ]).') lambda_value = traits.Float(argstr='-lambda %s', desc='the regularisation parameter lambda that controls the strength of the constraint (default = 1.0).') maximum_harmonic_order = traits.Int(argstr='-lmax %s', desc='set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.') threshold_value = traits.Float(argstr='-threshold %s', desc='the threshold below which the amplitude of the FOD is assumed to be zero, expressed as a fraction of the mean value of the initial FOD (default = 0.1)') iterations = traits.Int(argstr='-niter %s', desc='the maximum number of iterations to perform for each voxel (default = 50)') debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') directions_file = File(exists=True, argstr='-directions %s', position=-2, desc='a text file containing the [ el az ] pairs for the directions: Specify the directions over which to apply the non-negativity constraint (by default, the built-in 300 direction set is used)') normalise = traits.Bool(argstr='-normalise', position=3, desc="normalise the DW signal to the b=0 image") class ConstrainedSphericalDeconvolutionOutputSpec(TraitedSpec): spherical_harmonics_image = File(exists=True, desc='Spherical harmonics image') class ConstrainedSphericalDeconvolution(CommandLine): """ Perform non-negativity constrained spherical deconvolution. Note that this program makes use of implied symmetries in the diffusion profile. First, the fact the signal attenuation profile is real implies that it has conjugate symmetry, i.e. Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the diffusion profile should be antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l components should be zero. Therefore, this program only computes the even elements. Note that the spherical harmonics equations used here differ slightly from those conventionally used, in that the (-1)^m factor has been omitted. This should be taken into account in all subsequent calculations. Each volume in the output image corresponds to a different spherical harmonic component, according to the following convention: * [0] Y(0,0) * [1] Im {Y(2,2)} * [2] Im {Y(2,1)} * [3] Y(2,0) * [4] Re {Y(2,1)} * [5] Re {Y(2,2)} * [6] Im {Y(4,4)} * [7] Im {Y(4,3)} Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> csdeconv = mrt.ConstrainedSphericalDeconvolution() >>> csdeconv.inputs.in_file = 'dwi.mif' >>> csdeconv.inputs.encoding_file = 'encoding.txt' >>> csdeconv.run() # doctest: +SKIP """ _cmd = 'csdeconv' input_spec=ConstrainedSphericalDeconvolutionInputSpec output_spec=ConstrainedSphericalDeconvolutionOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['spherical_harmonics_image'] = self.inputs.out_filename if not isdefined(outputs['spherical_harmonics_image']): outputs['spherical_harmonics_image'] = op.abspath(self._gen_outfilename()) else: outputs['spherical_harmonics_image'] = op.abspath(outputs['spherical_harmonics_image']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_CSD.mif' class EstimateResponseForSHInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-3, desc='Diffusion-weighted images') mask_image = File(exists=True, mandatory=True, argstr='%s', position=-2, desc='only perform computation within the specified binary brain mask image') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output filename') encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=1, desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix') maximum_harmonic_order = traits.Int(argstr='-lmax %s', desc='set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.') normalise = traits.Bool(argstr='-normalise', desc='normalise the DW signal to the b=0 image') quiet = traits.Bool(argstr='-quiet', desc='Do not display information messages or progress status.') debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') class EstimateResponseForSHOutputSpec(TraitedSpec): response = File(exists=True, desc='Spherical harmonics image') class EstimateResponseForSH(CommandLine): """ Estimates the fibre response function for use in spherical deconvolution. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> estresp = mrt.EstimateResponseForSH() >>> estresp.inputs.in_file = 'dwi.mif' >>> estresp.inputs.mask_image = 'dwi_WMProb.mif' >>> estresp.inputs.encoding_file = 'encoding.txt' >>> estresp.run() # doctest: +SKIP """ _cmd = 'estimate_response' input_spec=EstimateResponseForSHInputSpec output_spec=EstimateResponseForSHOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['response'] = self.inputs.out_filename if not isdefined(outputs['response']): outputs['response'] = op.abspath(self._gen_outfilename()) else: outputs['response'] = op.abspath(outputs['response']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_ER.txt' def concat_files(bvec_file, bval_file, invert_x, invert_y, invert_z): bvecs = np.loadtxt(bvec_file) bvals = np.loadtxt(bval_file) flip = False if np.shape(bvecs)[0] > np.shape(bvecs)[1]: flip = True bvecs = np.transpose(bvecs) if invert_x: bvecs[0,:] = -bvecs[0,:] iflogger.info('Inverting b-vectors in the x direction') if invert_y: bvecs[1,:] = -bvecs[1,:] iflogger.info('Inverting b-vectors in the y direction') if invert_z: bvecs[2,:] = -bvecs[2,:] iflogger.info('Inverting b-vectors in the z direction') iflogger.info(np.shape(bvecs)) iflogger.info(np.shape(bvals)) encoding = np.transpose(np.vstack((bvecs,bvals))) _, bvec , _ = split_filename(bvec_file) _, bval , _ = split_filename(bval_file) out_encoding_file = bvec + '_' + bval + '.txt' np.savetxt(out_encoding_file, encoding) return out_encoding_file class FSL2MRTrixInputSpec(TraitedSpec): bvec_file = File(exists=True, mandatory=True, desc='FSL b-vectors file (3xN text file)') bval_file = File(exists=True, mandatory=True, desc='FSL b-values file (1xN text file)') invert_x = traits.Bool(False, usedefault=True, desc='Inverts the b-vectors along the x-axis') invert_y = traits.Bool(False, usedefault=True, desc='Inverts the b-vectors along the y-axis') invert_z = traits.Bool(False, usedefault=True, desc='Inverts the b-vectors along the z-axis') out_encoding_file = File(genfile=True, desc='Output encoding filename') class FSL2MRTrixOutputSpec(TraitedSpec): encoding_file = File(desc='The gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient' \ 'and b gives the b-value in units (1000 s/mm^2).') class FSL2MRTrix(BaseInterface): """ Converts separate b-values and b-vectors from text files (FSL style) into a 4xN text file in which each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> fsl2mrtrix = mrt.FSL2MRTrix() >>> fsl2mrtrix.inputs.bvec_file = 'bvecs' >>> fsl2mrtrix.inputs.bval_file = 'bvals' >>> fsl2mrtrix.inputs.invert_y = True >>> fsl2mrtrix.run() # doctest: +SKIP """ input_spec = FSL2MRTrixInputSpec output_spec = FSL2MRTrixOutputSpec def _run_interface(self, runtime): encoding = concat_files(self.inputs.bvec_file, self.inputs.bval_file, self.inputs.invert_x, self.inputs.invert_y, self.inputs.invert_z) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs['encoding_file'] = op.abspath(self._gen_filename('out_encoding_file')) return outputs def _gen_filename(self, name): if name is 'out_encoding_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, bvec , _ = split_filename(self.inputs.bvec_file) _, bval , _ = split_filename(self.inputs.bval_file) return bvec + '_' + bval + '.txt' class GenerateDirectionsInputSpec(CommandLineInputSpec): num_dirs = traits.Int(mandatory=True, argstr='%s', position=-2 , desc='the number of directions to generate.') power = traits.Float(argstr='-power %s', desc='specify exponent to use for repulsion power law.') niter = traits.Int(argstr='-niter %s', desc='specify the maximum number of iterations to perform.') display_info = traits.Bool(argstr='-info', desc='Display information messages.') quiet_display = traits.Bool(argstr='-quiet', desc='do not display information messages or progress status.') display_debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') out_file = File("directions.txt", argstr='%s', hash_files=False, position= -1, desc='the text file to write the directions to, as [ az el ] pairs.', usedefault=True) class GenerateDirectionsOutputSpec(TraitedSpec): out_file = File(exists=True, desc='directions file') class GenerateDirections(CommandLine): """ generate a set of directions evenly distributed over a hemisphere. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> gendir = mrt.GenerateDirections() >>> gendir.inputs.num_dirs = 300 >>> gendir.run() # doctest: +SKIP """ _cmd = 'gendir' input_spec=GenerateDirectionsInputSpec output_spec=GenerateDirectionsOutputSpec class FindShPeaksInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-3, desc='the input image of SH coefficients.') directions_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the set of directions to use as seeds for the peak finding') peaks_image = File(exists=True, argstr='-peaks %s', desc='the program will try to find the peaks that most closely match those in the image provided') num_peaks = traits.Int(argstr='-num %s', desc='the number of peaks to extract (default is 3)') peak_directions = traits.List(traits.Float, argstr='-direction %s', sep=' ', minlen=2, maxlen=2, desc='phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option ' \ ' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)') peak_threshold = traits.Float(argstr='-threshold %s', desc='only peak amplitudes greater than the threshold will be considered') display_info = traits.Bool(argstr='-info', desc='Display information messages.') quiet_display = traits.Bool(argstr='-quiet', desc='do not display information messages or progress status.') display_debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') out_file = File(name_template="%s_peak_dirs.mif", keep_extension=False, argstr='%s', hash_files=False, position= -1, desc='the output image. Each volume corresponds to the x, y & z component of each peak direction vector in turn', name_source=["in_file"]) class FindShPeaksOutputSpec(TraitedSpec): out_file = File(exists=True, desc='Peak directions image') class FindShPeaks(CommandLine): """ identify the orientations of the N largest peaks of a SH profile Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> shpeaks = mrt.FindShPeaks() >>> shpeaks.inputs.in_file = 'csd.mif' >>> shpeaks.inputs.directions_file = 'dirs.txt' >>> shpeaks.inputs.num_peaks = 2 >>> shpeaks.run() # doctest: +SKIP """ _cmd = 'find_SH_peaks' input_spec=FindShPeaksInputSpec output_spec=FindShPeaksOutputSpec class Directions2AmplitudeInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the input directions image. Each volume corresponds to the x, y & z component of each direction vector in turn.') peaks_image = File(exists=True, argstr='-peaks %s', desc='the program will try to find the peaks that most closely match those in the image provided') num_peaks = traits.Int(argstr='-num %s', desc='the number of peaks to extract (default is 3)') peak_directions = traits.List(traits.Float, argstr='-direction %s', sep=' ', minlen=2, maxlen=2, desc='phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option ' \ ' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)') display_info = traits.Bool(argstr='-info', desc='Display information messages.') quiet_display = traits.Bool(argstr='-quiet', desc='do not display information messages or progress status.') display_debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') out_file = File(name_template="%s_amplitudes.mif", keep_extension=False, argstr='%s', hash_files=False, position= -1, desc='the output amplitudes image', name_source=["in_file"]) class Directions2AmplitudeOutputSpec(TraitedSpec): out_file = File(exists=True, desc='amplitudes image') class Directions2Amplitude(CommandLine): """ convert directions image to amplitudes Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> amplitudes = mrt.Directions2Amplitude() >>> amplitudes.inputs.in_file = 'peak_directions.mif' >>> amplitudes.run() # doctest: +SKIP """ _cmd = 'dir2amp' input_spec=Directions2AmplitudeInputSpec output_spec=Directions2AmplitudeOutputSpec nipype-0.9.2/nipype/interfaces/mrtrix/tests/000077500000000000000000000000001227300005300211345ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py000066400000000000000000000035151227300005300324560ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tensors import ConstrainedSphericalDeconvolution def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', ), directions_file=dict(argstr='-directions %s', position=-2, ), encoding_file=dict(argstr='-grad %s', position=1, ), environ=dict(nohash=True, usedefault=True, ), filter_file=dict(argstr='-filter %s', position=-2, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-3, ), iterations=dict(argstr='-niter %s', ), lambda_value=dict(argstr='-lambda %s', ), mask_image=dict(argstr='-mask %s', position=2, ), maximum_harmonic_order=dict(argstr='-lmax %s', ), normalise=dict(argstr='-normalise', position=3, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), response_file=dict(argstr='%s', mandatory=True, position=-2, ), terminal_output=dict(mandatory=True, nohash=True, ), threshold_value=dict(argstr='-threshold %s', ), ) inputs = ConstrainedSphericalDeconvolution.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ConstrainedSphericalDeconvolution_outputs(): output_map = dict(spherical_harmonics_image=dict(), ) outputs = ConstrainedSphericalDeconvolution.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py000066400000000000000000000025511227300005300307470ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tensors import DWI2SphericalHarmonicsImage def test_DWI2SphericalHarmonicsImage_inputs(): input_map = dict(args=dict(argstr='%s', ), encoding_file=dict(argstr='-grad %s', mandatory=True, position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), maximum_harmonic_order=dict(argstr='-lmax %s', ), normalise=dict(argstr='-normalise', position=3, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DWI2SphericalHarmonicsImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DWI2SphericalHarmonicsImage_outputs(): output_map = dict(spherical_harmonics_image=dict(), ) outputs = DWI2SphericalHarmonicsImage.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py000066400000000000000000000027421227300005300255020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import DWI2Tensor def test_DWI2Tensor_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), encoding_file=dict(argstr='-grad %s', position=2, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), ignore_slice_by_volume=dict(argstr='-ignoreslices %s', position=2, sep=' ', ), ignore_volumes=dict(argstr='-ignorevolumes %s', position=2, sep=' ', ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), out_filename=dict(argstr='%s', name_source='in_file', name_template='%s_tensor.mif', output_name='tensor', position=-1, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DWI2Tensor.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DWI2Tensor_outputs(): output_map = dict(tensor=dict(), ) outputs = DWI2Tensor.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py000066400000000000000000000054201227300005300317300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tracking import DiffusionTensorStreamlineTrack def test_DiffusionTensorStreamlineTrack_inputs(): input_map = dict(args=dict(argstr='%s', ), cutoff_value=dict(argstr='-cutoff %s', units='NA', ), desired_number_of_tracks=dict(argstr='-number %d', ), do_not_precompute=dict(argstr='-noprecomputed', ), environ=dict(nohash=True, usedefault=True, ), exclude_file=dict(argstr='-exclude %s', position=2, ), exclude_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), gradient_encoding_file=dict(argstr='-grad %s', mandatory=True, position=-2, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), include_file=dict(argstr='-include %s', position=2, ), include_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), initial_cutoff_value=dict(argstr='-initcutoff %s', units='NA', ), initial_direction=dict(argstr='-initdirection %s', units='voxels', ), inputmodel=dict(argstr='%s', position=-3, usedefault=True, ), mask_file=dict(argstr='-exclude %s', position=2, ), mask_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), maximum_number_of_tracks=dict(argstr='-maxnum %d', ), maximum_tract_length=dict(argstr='-length %s', units='mm', ), minimum_radius_of_curvature=dict(argstr='-curvature %s', units='mm', ), minimum_tract_length=dict(argstr='-minlength %s', units='mm', ), no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict(argstr='%s', genfile=True, position=-1, ), seed_file=dict(argstr='-seed %s', position=2, ), seed_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), step_size=dict(argstr='-step %s', units='mm', ), stop=dict(argstr='-gzip', ), terminal_output=dict(mandatory=True, nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), ) inputs = DiffusionTensorStreamlineTrack.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DiffusionTensorStreamlineTrack_outputs(): output_map = dict(tracked=dict(), ) outputs = DiffusionTensorStreamlineTrack.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py000066400000000000000000000027611227300005300276350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tensors import Directions2Amplitude def test_Directions2Amplitude_inputs(): input_map = dict(args=dict(argstr='%s', ), display_debug=dict(argstr='-debug', ), display_info=dict(argstr='-info', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), num_peaks=dict(argstr='-num %s', ), out_file=dict(argstr='%s', hash_files=False, keep_extension=False, name_source=['in_file'], name_template='%s_amplitudes.mif', position=-1, ), peak_directions=dict(argstr='-direction %s', sep=' ', ), peaks_image=dict(argstr='-peaks %s', ), quiet_display=dict(argstr='-quiet', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Directions2Amplitude.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Directions2Amplitude_outputs(): output_map = dict(out_file=dict(), ) outputs = Directions2Amplitude.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_Erode.py000066400000000000000000000023731227300005300246400ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import Erode def test_Erode_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), dilate=dict(argstr='-dilate', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), number_of_passes=dict(argstr='-npass %s', ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Erode.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Erode_outputs(): output_map = dict(out_file=dict(), ) outputs = Erode.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py000066400000000000000000000027051227300005300277750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tensors import EstimateResponseForSH def test_EstimateResponseForSH_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', ), encoding_file=dict(argstr='-grad %s', mandatory=True, position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-3, ), mask_image=dict(argstr='%s', mandatory=True, position=-2, ), maximum_harmonic_order=dict(argstr='-lmax %s', ), normalise=dict(argstr='-normalise', ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = EstimateResponseForSH.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EstimateResponseForSH_outputs(): output_map = dict(response=dict(), ) outputs = EstimateResponseForSH.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py000066400000000000000000000017051227300005300254140ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tensors import FSL2MRTrix def test_FSL2MRTrix_inputs(): input_map = dict(bval_file=dict(mandatory=True, ), bvec_file=dict(mandatory=True, ), invert_x=dict(usedefault=True, ), invert_y=dict(usedefault=True, ), invert_z=dict(usedefault=True, ), out_encoding_file=dict(genfile=True, ), ) inputs = FSL2MRTrix.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FSL2MRTrix_outputs(): output_map = dict(encoding_file=dict(), ) outputs = FSL2MRTrix.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py000066400000000000000000000031141227300005300257330ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tensors import FindShPeaks def test_FindShPeaks_inputs(): input_map = dict(args=dict(argstr='%s', ), directions_file=dict(argstr='%s', mandatory=True, position=-2, ), display_debug=dict(argstr='-debug', ), display_info=dict(argstr='-info', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-3, ), num_peaks=dict(argstr='-num %s', ), out_file=dict(argstr='%s', hash_files=False, keep_extension=False, name_source=['in_file'], name_template='%s_peak_dirs.mif', position=-1, ), peak_directions=dict(argstr='-direction %s', sep=' ', ), peak_threshold=dict(argstr='-threshold %s', ), peaks_image=dict(argstr='-peaks %s', ), quiet_display=dict(argstr='-quiet', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = FindShPeaks.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FindShPeaks_outputs(): output_map = dict(out_file=dict(), ) outputs = FindShPeaks.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py000066400000000000000000000025221227300005300273540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tensors import GenerateDirections def test_GenerateDirections_inputs(): input_map = dict(args=dict(argstr='%s', ), display_debug=dict(argstr='-debug', ), display_info=dict(argstr='-info', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), niter=dict(argstr='-niter %s', ), num_dirs=dict(argstr='%s', mandatory=True, position=-2, ), out_file=dict(argstr='%s', hash_files=False, position=-1, usedefault=True, ), power=dict(argstr='-power %s', ), quiet_display=dict(argstr='-quiet', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = GenerateDirections.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GenerateDirections_outputs(): output_map = dict(out_file=dict(), ) outputs = GenerateDirections.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py000066400000000000000000000025431227300005300303250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import GenerateWhiteMatterMask def test_GenerateWhiteMatterMask_inputs(): input_map = dict(args=dict(argstr='%s', ), binary_mask=dict(argstr='%s', mandatory=True, position=-2, ), encoding_file=dict(argstr='-grad %s', mandatory=True, position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-3, ), noise_level_margin=dict(argstr='-margin %s', ), out_WMProb_filename=dict(argstr='%s', genfile=True, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = GenerateWhiteMatterMask.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GenerateWhiteMatterMask_outputs(): output_map = dict(WMprobabilitymap=dict(), ) outputs = GenerateWhiteMatterMask.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py000066400000000000000000000033441227300005300254600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import MRConvert def test_MRConvert_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), extension=dict(position=2, usedefault=True, ), extract_at_axis=dict(argstr='-coord %s', position=1, ), extract_at_coordinate=dict(argstr='%s', position=2, sep=',', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), layout=dict(argstr='-output %s', position=2, ), offset_bias=dict(argstr='-scale %d', position=3, units='mm', ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), output_datatype=dict(argstr='-output %s', position=2, ), prs=dict(argstr='-prs', position=3, ), replace_NaN_with_zero=dict(argstr='-zero', position=3, ), resample=dict(argstr='-scale %d', position=3, units='mm', ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-vox %s', position=3, sep=',', ), ) inputs = MRConvert.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRConvert_outputs(): output_map = dict(converted=dict(), ) outputs = MRConvert.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py000066400000000000000000000022471227300005300256600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import MRMultiply def test_MRMultiply_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=-2, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MRMultiply.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRMultiply_outputs(): output_map = dict(out_file=dict(), ) outputs = MRMultiply.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py000066400000000000000000000030751227300005300260140ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import MRTransform def test_MRTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), flip_x=dict(argstr='-flipx', position=1, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=-2, ), invert=dict(argstr='-inverse', position=1, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', position=1, ), reference_image=dict(argstr='-reference %s', position=1, ), replace_transform=dict(argstr='-replace', position=1, ), template_image=dict(argstr='-template %s', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), transformation_file=dict(argstr='-transform %s', position=1, ), ) inputs = MRTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRTransform_outputs(): output_map = dict(out_file=dict(), ) outputs = MRTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py000066400000000000000000000016221227300005300265140ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.convert import MRTrix2TrackVis def test_MRTrix2TrackVis_inputs(): input_map = dict(image_file=dict(), in_file=dict(mandatory=True, ), matrix_file=dict(), out_filename=dict(genfile=True, usedefault=True, ), registration_image_file=dict(), ) inputs = MRTrix2TrackVis.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRTrix2TrackVis_outputs(): output_map = dict(out_file=dict(), ) outputs = MRTrix2TrackVis.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py000066400000000000000000000017261227300005300256040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import MRTrixInfo def test_MRTrixInfo_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MRTrixInfo.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRTrixInfo_outputs(): output_map = dict() outputs = MRTrixInfo.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py000066400000000000000000000021171227300005300261450ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import MRTrixViewer def test_MRTrixViewer_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(argstr='%s', mandatory=True, position=-2, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MRTrixViewer.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MRTrixViewer_outputs(): output_map = dict() outputs = MRTrixViewer.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py000066400000000000000000000022721227300005300263320ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import MedianFilter3D def test_MedianFilter3D_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MedianFilter3D.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MedianFilter3D_outputs(): output_map = dict(out_file=dict(), ) outputs = MedianFilter3D.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py000066400000000000000000000055301227300005300360740ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/mrtrix/tests# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tracking import ProbabilisticSphericallyDeconvolutedStreamlineTrack def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict(args=dict(argstr='%s', ), cutoff_value=dict(argstr='-cutoff %s', units='NA', ), desired_number_of_tracks=dict(argstr='-number %d', ), do_not_precompute=dict(argstr='-noprecomputed', ), environ=dict(nohash=True, usedefault=True, ), exclude_file=dict(argstr='-exclude %s', position=2, ), exclude_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), include_file=dict(argstr='-include %s', position=2, ), include_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), initial_cutoff_value=dict(argstr='-initcutoff %s', units='NA', ), initial_direction=dict(argstr='-initdirection %s', units='voxels', ), inputmodel=dict(argstr='%s', position=-3, usedefault=True, ), mask_file=dict(argstr='-exclude %s', position=2, ), mask_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), maximum_number_of_tracks=dict(argstr='-maxnum %d', ), maximum_number_of_trials=dict(argstr='-trials %s', ), maximum_tract_length=dict(argstr='-length %s', units='mm', ), minimum_radius_of_curvature=dict(argstr='-curvature %s', units='mm', ), minimum_tract_length=dict(argstr='-minlength %s', units='mm', ), no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict(argstr='%s', genfile=True, position=-1, ), seed_file=dict(argstr='-seed %s', position=2, ), seed_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), step_size=dict(argstr='-step %s', units='mm', ), stop=dict(argstr='-gzip', ), terminal_output=dict(mandatory=True, nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), ) inputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_outputs(): output_map = dict(tracked=dict(), ) outputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py000066400000000000000000000053311227300005300334430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tracking import SphericallyDeconvolutedStreamlineTrack def test_SphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict(args=dict(argstr='%s', ), cutoff_value=dict(argstr='-cutoff %s', units='NA', ), desired_number_of_tracks=dict(argstr='-number %d', ), do_not_precompute=dict(argstr='-noprecomputed', ), environ=dict(nohash=True, usedefault=True, ), exclude_file=dict(argstr='-exclude %s', position=2, ), exclude_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), include_file=dict(argstr='-include %s', position=2, ), include_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), initial_cutoff_value=dict(argstr='-initcutoff %s', units='NA', ), initial_direction=dict(argstr='-initdirection %s', units='voxels', ), inputmodel=dict(argstr='%s', position=-3, usedefault=True, ), mask_file=dict(argstr='-exclude %s', position=2, ), mask_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), maximum_number_of_tracks=dict(argstr='-maxnum %d', ), maximum_tract_length=dict(argstr='-length %s', units='mm', ), minimum_radius_of_curvature=dict(argstr='-curvature %s', units='mm', ), minimum_tract_length=dict(argstr='-minlength %s', units='mm', ), no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict(argstr='%s', genfile=True, position=-1, ), seed_file=dict(argstr='-seed %s', position=2, ), seed_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), step_size=dict(argstr='-step %s', units='mm', ), stop=dict(argstr='-gzip', ), terminal_output=dict(mandatory=True, nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), ) inputs = SphericallyDeconvolutedStreamlineTrack.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SphericallyDeconvolutedStreamlineTrack_outputs(): output_map = dict(tracked=dict(), ) outputs = SphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py000066400000000000000000000051461227300005300266730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tracking import StreamlineTrack def test_StreamlineTrack_inputs(): input_map = dict(args=dict(argstr='%s', ), cutoff_value=dict(argstr='-cutoff %s', units='NA', ), desired_number_of_tracks=dict(argstr='-number %d', ), do_not_precompute=dict(argstr='-noprecomputed', ), environ=dict(nohash=True, usedefault=True, ), exclude_file=dict(argstr='-exclude %s', position=2, ), exclude_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), include_file=dict(argstr='-include %s', position=2, ), include_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), initial_cutoff_value=dict(argstr='-initcutoff %s', units='NA', ), initial_direction=dict(argstr='-initdirection %s', units='voxels', ), inputmodel=dict(argstr='%s', position=-3, usedefault=True, ), mask_file=dict(argstr='-exclude %s', position=2, ), mask_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), maximum_number_of_tracks=dict(argstr='-maxnum %d', ), maximum_tract_length=dict(argstr='-length %s', units='mm', ), minimum_radius_of_curvature=dict(argstr='-curvature %s', units='mm', ), minimum_tract_length=dict(argstr='-minlength %s', units='mm', ), no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict(argstr='%s', genfile=True, position=-1, ), seed_file=dict(argstr='-seed %s', position=2, ), seed_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', ), step_size=dict(argstr='-step %s', units='mm', ), stop=dict(argstr='-gzip', ), terminal_output=dict(mandatory=True, nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), ) inputs = StreamlineTrack.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_StreamlineTrack_outputs(): output_map = dict(tracked=dict(), ) outputs = StreamlineTrack.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py000066400000000000000000000023471227300005300305010ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import Tensor2ApparentDiffusion def test_Tensor2ApparentDiffusion_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Tensor2ApparentDiffusion.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Tensor2ApparentDiffusion_outputs(): output_map = dict(ADC=dict(), ) outputs = Tensor2ApparentDiffusion.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py000066400000000000000000000023651227300005300312320ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import Tensor2FractionalAnisotropy def test_Tensor2FractionalAnisotropy_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Tensor2FractionalAnisotropy.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Tensor2FractionalAnisotropy_outputs(): output_map = dict(FA=dict(), ) outputs = Tensor2FractionalAnisotropy.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py000066400000000000000000000022631227300005300263170ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import Tensor2Vector def test_Tensor2Vector_inputs(): input_map = dict(args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), quiet=dict(argstr='-quiet', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Tensor2Vector.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Tensor2Vector_outputs(): output_map = dict(vector=dict(), ) outputs = Tensor2Vector.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py000066400000000000000000000026341227300005300255360ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.preprocess import Threshold def test_Threshold_inputs(): input_map = dict(absolute_threshold_value=dict(argstr='-abs %s', ), args=dict(argstr='%s', ), debug=dict(argstr='-debug', position=1, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), invert=dict(argstr='-invert', position=1, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), percentage_threshold_value=dict(argstr='-percent %s', ), quiet=dict(argstr='-quiet', position=1, ), replace_zeros_with_NaN=dict(argstr='-nan', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Threshold.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Threshold_outputs(): output_map = dict(out_file=dict(), ) outputs = Threshold.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py000066400000000000000000000027321227300005300257350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.mrtrix.tracking import Tracks2Prob def test_Tracks2Prob_inputs(): input_map = dict(args=dict(argstr='%s', ), colour=dict(argstr='-colour', position=3, ), environ=dict(nohash=True, usedefault=True, ), fraction=dict(argstr='-fraction', position=3, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), out_filename=dict(argstr='%s', genfile=True, position=-1, ), output_datatype=dict(argstr='-datatype %s', position=2, ), resample=dict(argstr='-resample %d', position=3, units='mm', ), template_file=dict(argstr='-template %s', position=1, ), terminal_output=dict(mandatory=True, nohash=True, ), voxel_dims=dict(argstr='-vox %s', position=2, sep=',', ), ) inputs = Tracks2Prob.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Tracks2Prob_outputs(): output_map = dict(tract_image=dict(), ) outputs = Tracks2Prob.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/mrtrix/tracking.py000066400000000000000000000301411227300005300221450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File from nipype.utils.filemanip import split_filename import os, os.path as op from nipype.interfaces.traits_extension import isdefined class Tracks2ProbInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='tract file') template_file = File(exists=True, argstr='-template %s', position=1, desc='an image file to be used as a template for the output (the output image wil have the same transform and field of view)') voxel_dims = traits.List(traits.Float, argstr='-vox %s', sep=',', position=2, minlen=3, maxlen=3, desc='Three comma-separated numbers giving the size of each voxel in mm.') colour = traits.Bool(argstr='-colour', position=3, desc="add colour to the output image according to the direction of the tracks.") fraction = traits.Bool(argstr='-fraction', position=3, desc="produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count.") output_datatype = traits.Enum("Bit","Int8", "UInt8","Int16", "UInt16","Int32", "UInt32", "float32", "float64", argstr='-datatype %s', position=2, desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"') #, usedefault=True) resample = traits.Float(argstr='-resample %d', position=3, units='mm', desc='resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.') out_filename = File(genfile=True, argstr='%s', position= -1, desc='output data file') class Tracks2ProbOutputSpec(TraitedSpec): tract_image = File(exists=True, desc='Output tract count or track density image') class Tracks2Prob(CommandLine): """ Convert a tract file into a map of the fraction of tracks to enter each voxel - also known as a tract density image (TDI) - in MRtrix's image format (.mif). This can be viewed using MRview or converted to Nifti using MRconvert. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tdi = mrt.Tracks2Prob() >>> tdi.inputs.in_file = 'dwi_CSD_tracked.tck' >>> tdi.inputs.colour = True >>> tdi.run() # doctest: +SKIP """ _cmd = 'tracks2prob' input_spec=Tracks2ProbInputSpec output_spec=Tracks2ProbOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['tract_image'] = self.inputs.out_filename if not isdefined(outputs['tract_image']): outputs['tract_image'] = op.abspath(self._gen_outfilename()) else: outputs['tract_image'] = os.path.abspath(outputs['tract_image']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_TDI.mif' class StreamlineTrackInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the image containing the source data.' \ 'The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, ' \ 'the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.') seed_file = File(exists=True, argstr='-seed %s', position=2, desc='seed file') seed_spec = traits.List(traits.Float, desc='seed specification in mm and radius (x y z r)', position=2, argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='mm') include_file = File(exists=True, argstr='-include %s', position=2, desc='inclusion file') include_spec = traits.List(traits.Float, desc='inclusion specification in mm and radius (x y z r)', position=2, argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='mm') exclude_file = File(exists=True, argstr='-exclude %s', position=2, desc='exclusion file') exclude_spec = traits.List(traits.Float, desc='exclusion specification in mm and radius (x y z r)', position=2, argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='mm') mask_file = File(exists=True, argstr='-exclude %s', position=2, desc='mask file. Only tracks within mask.') mask_spec = traits.List(traits.Float, desc='Mask specification in mm and radius (x y z r). Tracks will be terminated when they leave the ROI.', position=2, argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='mm') inputmodel = traits.Enum('DT_STREAM', 'SD_PROB', 'SD_STREAM', argstr='%s', desc='input model type', usedefault=True, position=-3) stop = traits.Bool(argstr='-gzip', desc="stop track as soon as it enters any of the include regions.") do_not_precompute = traits.Bool(argstr='-noprecomputed', desc="Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4.") unidirectional = traits.Bool(argstr='-unidirectional', desc="Track from the seed point in one direction only (default is to track in both directions).") no_mask_interpolation = traits.Bool(argstr='-nomaskinterp', desc="Turns off trilinear interpolation of mask images.") step_size = traits.Float(argstr='-step %s', units='mm', desc="Set the step size of the algorithm in mm (default is 0.2).") minimum_radius_of_curvature = traits.Float(argstr='-curvature %s', units='mm', desc="Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)") desired_number_of_tracks = traits.Int(argstr='-number %d', desc='Sets the desired number of tracks.' \ 'The program will continue to generate tracks until this number of tracks have been selected and written to the output file' \ '(default is 100 for *_STREAM methods, 1000 for *_PROB methods).') maximum_number_of_tracks = traits.Int(argstr='-maxnum %d', desc='Sets the maximum number of tracks to generate.' \ "The program will not generate more tracks than this number, even if the desired number of tracks hasn't yet been reached" \ '(default is 100 x number).') minimum_tract_length = traits.Float(argstr='-minlength %s', units='mm', desc="Sets the minimum length of any track in millimeters (default is 10 mm).") maximum_tract_length = traits.Float(argstr='-length %s', units='mm', desc="Sets the maximum length of any track in millimeters (default is 200 mm).") cutoff_value = traits.Float(argstr='-cutoff %s', units='NA', desc="Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1).") initial_cutoff_value = traits.Float(argstr='-initcutoff %s', units='NA', desc="Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff).") initial_direction = traits.List(traits.Int, desc='Specify the initial tracking direction as a vector', argstr='-initdirection %s', minlen=2, maxlen=2, units='voxels') out_file = File(argstr='%s', position= -1, genfile=True, desc='output data file') class StreamlineTrackOutputSpec(TraitedSpec): tracked = File(exists=True, desc='output file containing reconstructed tracts') class StreamlineTrack(CommandLine): """ Performs tractography using one of the following models: 'dt_prob', 'dt_stream', 'sd_prob', 'sd_stream', Where 'dt' stands for diffusion tensor, 'sd' stands for spherical deconvolution, and 'prob' stands for probabilistic. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> strack = mrt.StreamlineTrack() >>> strack.inputs.inputmodel = 'SD_PROB' >>> strack.inputs.in_file = 'data.Bfloat' >>> strack.inputs.seed_file = 'seed_mask.nii' >>> strack.run() # doctest: +SKIP """ _cmd = 'streamtrack' input_spec = StreamlineTrackInputSpec output_spec = StreamlineTrackOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['tracked'] = op.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name is 'out_file': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name , _ = split_filename(self.inputs.in_file) return name + '_tracked.tck' class DiffusionTensorStreamlineTrackInputSpec(StreamlineTrackInputSpec): gradient_encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=-2, desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix') class DiffusionTensorStreamlineTrack(StreamlineTrack): """ Specialized interface to StreamlineTrack. This interface is used for streamline tracking from diffusion tensor data, and calls the MRtrix function 'streamtrack' with the option 'DT_STREAM' Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> dtstrack = mrt.DiffusionTensorStreamlineTrack() >>> dtstrack.inputs.in_file = 'data.Bfloat' >>> dtstrack.inputs.seed_file = 'seed_mask.nii' >>> dtstrack.run() # doctest: +SKIP """ input_spec = DiffusionTensorStreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "DT_STREAM" return super(DiffusionTensorStreamlineTrack, self).__init__(command, **inputs) class ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec(StreamlineTrackInputSpec): maximum_number_of_trials = traits.Int(argstr='-trials %s', desc="Set the maximum number of sampling trials at each point (only used for probabilistic tracking).") class ProbabilisticSphericallyDeconvolutedStreamlineTrack(StreamlineTrack): """ Performs probabilistic tracking using spherically deconvolved data Specialized interface to StreamlineTrack. This interface is used for probabilistic tracking from spherically deconvolved data, and calls the MRtrix function 'streamtrack' with the option 'SD_PROB' Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> sdprobtrack = mrt.ProbabilisticSphericallyDeconvolutedStreamlineTrack() >>> sdprobtrack.inputs.in_file = 'data.Bfloat' >>> sdprobtrack.inputs.seed_file = 'seed_mask.nii' >>> sdprobtrack.run() # doctest: +SKIP """ input_spec = ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "SD_PROB" return super(ProbabilisticSphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs) class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack): """ Performs streamline tracking using spherically deconvolved data Specialized interface to StreamlineTrack. This interface is used for streamline tracking from spherically deconvolved data, and calls the MRtrix function 'streamtrack' with the option 'SD_STREAM' Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> sdtrack = mrt.SphericallyDeconvolutedStreamlineTrack() >>> sdtrack.inputs.in_file = 'data.Bfloat' >>> sdtrack.inputs.seed_file = 'seed_mask.nii' >>> sdtrack.run() # doctest: +SKIP """ input_spec = StreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "SD_STREAM" return super(SphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs) nipype-0.9.2/nipype/interfaces/nipy/000077500000000000000000000000001227300005300174245ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/nipy/__init__.py000066400000000000000000000002211227300005300215300ustar00rootroot00000000000000from .model import FitGLM, EstimateContrast from .preprocess import ComputeMask, FmriRealign4d, SpaceTimeRealigner from .utils import Similarity nipype-0.9.2/nipype/interfaces/nipy/model.py000066400000000000000000000306331227300005300211030ustar00rootroot00000000000000import warnings import os import nibabel as nb import numpy as np from ...utils.misc import package_check have_nipy = True try: package_check('nipy') except Exception, e: have_nipy = False else: import nipy.modalities.fmri.design_matrix as dm import nipy.labs.glm.glm as GLM if have_nipy: try: BlockParadigm = dm.BlockParadigm except AttributeError: from nipy.modalities.fmri.experimental_paradigm import BlockParadigm from ..base import (BaseInterface, TraitedSpec, traits, File, OutputMultiPath, BaseInterfaceInputSpec, isdefined) class FitGLMInputSpec(BaseInterfaceInputSpec): session_info = traits.List(minlen=1, maxlen=1, mandatory=True, desc=('Session specific information generated by' ' ``modelgen.SpecifyModel``, FitGLM does ' 'not support multiple runs uless they are ' 'concatenated (see SpecifyModel options)')) hrf_model = traits.Enum('Canonical', 'Canonical With Derivative', 'FIR', desc=("that specifies the hemodynamic reponse " "function it can be 'Canonical', 'Canonical " "With Derivative' or 'FIR'"), usedefault=True) drift_model = traits.Enum("Cosine", "Polynomial", "Blank", desc = ("string that specifies the desired drift " "model, to be chosen among 'Polynomial', " "'Cosine', 'Blank'"), usedefault=True) TR = traits.Float(mandatory=True) model = traits.Enum("ar1", "spherical", desc=("autoregressive mode is available only for the " "kalman method"), usedefault=True) method = traits.Enum("kalman", "ols", desc=("method to fit the model, ols or kalma; kalman " "is more time consuming but it supports " "autoregressive model"), usedefault=True) mask = traits.File(exists=True, desc=("restrict the fitting only to the region defined " "by this mask")) normalize_design_matrix = traits.Bool(False, desc=("normalize (zscore) the " "regressors before fitting"), usedefault=True) save_residuals = traits.Bool(False, usedefault=True) plot_design_matrix = traits.Bool(False, usedefault=True) class FitGLMOutputSpec(TraitedSpec): beta = File(exists=True) nvbeta = traits.Any() s2 = File(exists=True) dof = traits.Any() constants = traits.Any() axis = traits.Any() reg_names = traits.List() residuals = traits.File() a = File(exists=True) class FitGLM(BaseInterface): ''' Fit GLM model based on the specified design. Supports only single or concatenated runs. ''' input_spec = FitGLMInputSpec output_spec = FitGLMOutputSpec def _run_interface(self, runtime): session_info = self.inputs.session_info functional_runs = self.inputs.session_info[0]['scans'] if isinstance(functional_runs, str): functional_runs = [functional_runs] nii = nb.load(functional_runs[0]) data = nii.get_data() if isdefined(self.inputs.mask): mask = nb.load(self.inputs.mask).get_data() > 0 else: mask = np.ones(nii.shape[:3]) == 1 timeseries = data.copy()[mask,:] del data for functional_run in functional_runs[1:]: nii = nb.load(functional_run) data = nii.get_data() npdata = data.copy() del data timeseries = np.concatenate((timeseries,npdata[mask,:]), axis=1) del npdata nscans = timeseries.shape[1] if 'hpf' in session_info[0].keys(): hpf = session_info[0]['hpf'] drift_model=self.inputs.drift_model else: hpf=0 drift_model = "Blank" reg_names = [] for reg in session_info[0]['regress']: reg_names.append(reg['name']) reg_vals = np.zeros((nscans,len(reg_names))) for i in range(len(reg_names)): reg_vals[:,i] = np.array(session_info[0]['regress'][i]['val']).reshape(1,-1) frametimes= np.linspace(0, (nscans-1)*self.inputs.TR, nscans) conditions = [] onsets = [] duration = [] for i,cond in enumerate(session_info[0]['cond']): onsets += cond['onset'] conditions += [cond['name']]*len(cond['onset']) if len(cond['duration']) == 1: duration += cond['duration']*len(cond['onset']) else: duration += cond['duration'] if conditions: paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration) else: paradigm = None design_matrix, self._reg_names = dm.dmtx_light(frametimes, paradigm, drift_model=drift_model, hfcut=hpf, hrf_model=self.inputs.hrf_model, add_regs=reg_vals, add_reg_names=reg_names ) if self.inputs.normalize_design_matrix: for i in range(len(self._reg_names)-1): design_matrix[:,i] = (design_matrix[:,i]-design_matrix[:,i].mean())/design_matrix[:,i].std() if self.inputs.plot_design_matrix: import pylab pylab.pcolor(design_matrix) pylab.savefig("design_matrix.pdf") pylab.close() pylab.clf() glm = GLM.glm() glm.fit(timeseries.T, design_matrix, method=self.inputs.method, model=self.inputs.model) self._beta_file = os.path.abspath("beta.nii") beta = np.zeros(mask.shape + (glm.beta.shape[0],)) beta[mask,:] = glm.beta.T nb.save(nb.Nifti1Image(beta, nii.get_affine()), self._beta_file) self._s2_file = os.path.abspath("s2.nii") s2 = np.zeros(mask.shape) s2[mask] = glm.s2 nb.save(nb.Nifti1Image(s2, nii.get_affine()), self._s2_file) if self.inputs.save_residuals: explained = np.dot(design_matrix,glm.beta) residuals = np.zeros(mask.shape + (nscans,)) residuals[mask,:] = timeseries - explained.T self._residuals_file = os.path.abspath("residuals.nii") nb.save(nb.Nifti1Image(residuals, nii.get_affine()), self._residuals_file) self._nvbeta = glm.nvbeta self._dof = glm.dof self._constants = glm._constants self._axis = glm._axis if self.inputs.model == "ar1": self._a_file = os.path.abspath("a.nii") a = np.zeros(mask.shape) a[mask] = glm.a.squeeze() nb.save(nb.Nifti1Image(a, nii.get_affine()), self._a_file) self._model = glm.model self._method = glm.method return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["beta"] = self._beta_file outputs["nvbeta"] = self._nvbeta outputs["s2"] = self._s2_file outputs["dof"] = self._dof outputs["constants"] = self._constants outputs["axis"] = self._axis outputs["reg_names"] = self._reg_names if self.inputs.model == "ar1": outputs["a"] = self._a_file if self.inputs.save_residuals: outputs["residuals"] = self._residuals_file return outputs class EstimateContrastInputSpec(BaseInterfaceInputSpec): contrasts = traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('F'), traits.List(traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float)))))), desc="""List of contrasts with each contrast being a list of the form: [('name', 'stat', [condition list], [weight list], [session list])]. if session list is None or not provided, all sessions are used. For F contrasts, the condition list should contain previously defined T-contrasts.""", mandatory=True) beta = File(exists=True, desc="beta coefficients of the fitted model",mandatory=True) nvbeta = traits.Any(mandatory=True) s2 = File(exists=True, desc="squared variance of the residuals",mandatory=True) dof = traits.Any(desc="degrees of freedom", mandatory=True) constants = traits.Any(mandatory=True) axis = traits.Any(mandatory=True) reg_names = traits.List(mandatory=True) mask = traits.File(exists=True) class EstimateContrastOutputSpec(TraitedSpec): stat_maps = OutputMultiPath(File(exists=True)) z_maps = OutputMultiPath(File(exists=True)) p_maps = OutputMultiPath(File(exists=True)) class EstimateContrast(BaseInterface): ''' Estimate contrast of a fitted model. ''' input_spec = EstimateContrastInputSpec output_spec = EstimateContrastOutputSpec def _run_interface(self, runtime): beta_nii = nb.load(self.inputs.beta) if isdefined(self.inputs.mask): mask = nb.load(self.inputs.mask).get_data() > 0 else: mask = np.ones(beta_nii.shape[:3]) == 1 glm = GLM.glm() nii = nb.load(self.inputs.beta) glm.beta = beta_nii.get_data().copy()[mask,:].T glm.nvbeta = self.inputs.nvbeta glm.s2 = nb.load(self.inputs.s2).get_data().copy()[mask] glm.dof = self.inputs.dof glm._axis = self.inputs.axis glm._constants = self.inputs.constants reg_names = self.inputs.reg_names self._stat_maps = [] self._p_maps = [] self._z_maps = [] for contrast_def in self.inputs.contrasts: name = contrast_def[0] _ = contrast_def[1] contrast = np.zeros(len(reg_names)) for i, reg_name in enumerate(reg_names): if reg_name in contrast_def[2]: idx = contrast_def[2].index(reg_name) contrast[i] = contrast_def[3][idx] est_contrast = glm.contrast(contrast) stat_map = np.zeros(mask.shape) stat_map[mask] = est_contrast.stat().T stat_map_file = os.path.abspath(name + "_stat_map.nii") nb.save(nb.Nifti1Image(stat_map, nii.get_affine()), stat_map_file) self._stat_maps.append(stat_map_file) p_map = np.zeros(mask.shape) p_map[mask] = est_contrast.pvalue().T p_map_file = os.path.abspath(name + "_p_map.nii") nb.save(nb.Nifti1Image(p_map, nii.get_affine()), p_map_file) self._p_maps.append(p_map_file) z_map = np.zeros(mask.shape) z_map[mask] = est_contrast.zscore().T z_map_file = os.path.abspath(name + "_z_map.nii") nb.save(nb.Nifti1Image(z_map, nii.get_affine()), z_map_file) self._z_maps.append(z_map_file) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["stat_maps"] = self._stat_maps outputs["p_maps"] = self._p_maps outputs["z_maps"] = self._z_maps return outputs nipype-0.9.2/nipype/interfaces/nipy/preprocess.py000066400000000000000000000356611227300005300221760ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os import warnings import nibabel as nb import numpy as np from ...utils.misc import package_check from ...utils.filemanip import split_filename, fname_presuffix have_nipy = True try: package_check('nipy') except Exception, e: have_nipy = False else: import nipy from nipy import save_image, load_image nipy_version = nipy.__version__ from ..base import (TraitedSpec, BaseInterface, traits, BaseInterfaceInputSpec, isdefined, File, InputMultiPath, OutputMultiPath) class ComputeMaskInputSpec(BaseInterfaceInputSpec): mean_volume = File(exists=True, mandatory=True, desc="mean EPI image, used to compute the threshold for the mask") reference_volume = File(exists=True, desc=("reference volume used to compute the mask. " "If none is give, the mean volume is used.")) m = traits.Float(desc="lower fraction of the histogram to be discarded") M = traits.Float(desc="upper fraction of the histogram to be discarded") cc = traits.Bool(desc="Keep only the largest connected component") class ComputeMaskOutputSpec(TraitedSpec): brain_mask = File(exists=True) class ComputeMask(BaseInterface): input_spec = ComputeMaskInputSpec output_spec = ComputeMaskOutputSpec def _run_interface(self, runtime): from nipy.labs.mask import compute_mask args = {} for key in [k for k, _ in self.inputs.items() if k not in BaseInterfaceInputSpec().trait_names()]: value = getattr(self.inputs, key) if isdefined(value): if key in ['mean_volume', 'reference_volume']: nii = nb.load(value) value = nii.get_data() args[key] = value brain_mask = compute_mask(**args) _, name, ext = split_filename(self.inputs.mean_volume) self._brain_mask_path = os.path.abspath("%s_mask.%s" % (name, ext)) nb.save(nb.Nifti1Image(brain_mask.astype(np.uint8), nii.get_affine()), self._brain_mask_path) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["brain_mask"] = self._brain_mask_path return outputs class FmriRealign4dInputSpec(BaseInterfaceInputSpec): in_file = InputMultiPath(File(exists=True), mandatory=True, desc="File to realign") tr = traits.Float(desc="TR in seconds", mandatory=True) slice_order = traits.List(traits.Int(), desc=('0 based slice order. This would be equivalent to entering' 'np.argsort(spm_slice_order) for this field. This effects' 'interleaved acquisition. This field will be deprecated in' 'future Nipy releases and be replaced by actual slice' 'acquisition times.'), requires=["time_interp"]) tr_slices = traits.Float(desc="TR slices", requires=['time_interp']) start = traits.Float(0.0, usedefault=True, desc="time offset into TR to align slices to") time_interp = traits.Enum(True, requires=["slice_order"], desc="Assume smooth changes across time e.g.,\ fmri series. If you don't want slice timing \ correction set this to undefined") loops = InputMultiPath([5], traits.Int, usedefault=True, desc="loops within each run") between_loops = InputMultiPath([5], traits.Int, usedefault=True, desc="loops used to \ realign different \ runs") speedup = InputMultiPath([5], traits.Int, usedefault=True, desc="successive image \ sub-sampling factors \ for acceleration") class FmriRealign4dOutputSpec(TraitedSpec): out_file = OutputMultiPath(File(exists=True), desc="Realigned files") par_file = OutputMultiPath(File(exists=True), desc="Motion parameter files") class FmriRealign4d(BaseInterface): """Simultaneous motion and slice timing correction algorithm This interface wraps nipy's FmriRealign4d algorithm [1]_. Examples -------- >>> from nipype.interfaces.nipy.preprocess import FmriRealign4d >>> realigner = FmriRealign4d() >>> realigner.inputs.in_file = ['functional.nii'] >>> realigner.inputs.tr = 2 >>> realigner.inputs.slice_order = range(0,67) >>> res = realigner.run() # doctest: +SKIP References ---------- .. [1] Roche A. A four-dimensional registration algorithm with \ application to joint correction of motion and slice timing \ in fMRI. IEEE Trans Med Imaging. 2011 Aug;30(8):1546-54. DOI_. .. _DOI: http://dx.doi.org/10.1109/TMI.2011.2131152 """ input_spec = FmriRealign4dInputSpec output_spec = FmriRealign4dOutputSpec keywords = ['slice timing', 'motion correction'] def _run_interface(self, runtime): from nipy.algorithms.registration import FmriRealign4d as FR4d all_ims = [load_image(fname) for fname in self.inputs.in_file] if not isdefined(self.inputs.tr_slices): TR_slices = None else: TR_slices = self.inputs.tr_slices R = FR4d(all_ims, tr=self.inputs.tr, slice_order=self.inputs.slice_order, tr_slices=TR_slices, time_interp=self.inputs.time_interp, start=self.inputs.start) R.estimate(loops=list(self.inputs.loops), between_loops=list(self.inputs.between_loops), speedup=list(self.inputs.speedup)) corr_run = R.resample() self._out_file_path = [] self._par_file_path = [] for j, corr in enumerate(corr_run): self._out_file_path.append(os.path.abspath('corr_%s.nii.gz' % (split_filename(self.inputs.in_file[j])[1]))) save_image(corr, self._out_file_path[j]) self._par_file_path.append(os.path.abspath('%s.par' % (os.path.split(self.inputs.in_file[j])[1]))) mfile = open(self._par_file_path[j], 'w') motion = R._transforms[j] # nipy does not encode euler angles. return in original form of # translation followed by rotation vector see: # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula for i, mo in enumerate(motion): params = ['%.10f' % item for item in np.hstack((mo.translation, mo.rotation))] string = ' '.join(params) + '\n' mfile.write(string) mfile.close() return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = self._out_file_path outputs['par_file'] = self._par_file_path return outputs class SpaceTimeRealignerInputSpec(BaseInterfaceInputSpec): in_file = InputMultiPath(File(exists=True), mandatory=True, min_ver='0.4.0.dev', desc="File to realign") tr = traits.Float(desc="TR in seconds", requires=['slice_times']) slice_times = traits.Either(traits.List(traits.Float()), traits.Enum('asc_alt_2', 'asc_alt_2_1', 'asc_alt_half', 'asc_alt_siemens', 'ascending', 'desc_alt_2', 'desc_alt_half', 'descending'), desc=('Actual slice acquisition times.')) slice_info = traits.Either(traits.Int, traits.List(min_len=2, max_len=2), desc=('Single integer or length 2 sequence ' 'If int, the axis in `images` that is the ' 'slice axis. In a 4D image, this will ' 'often be axis = 2. If a 2 sequence, then' ' elements are ``(slice_axis, ' 'slice_direction)``, where ``slice_axis`` ' 'is the slice axis in the image as above, ' 'and ``slice_direction`` is 1 if the ' 'slices were acquired slice 0 first, slice' ' -1 last, or -1 if acquired slice -1 ' 'first, slice 0 last. If `slice_info` is ' 'an int, assume ' '``slice_direction`` == 1.'), requires=['slice_times'], ) class SpaceTimeRealignerOutputSpec(TraitedSpec): out_file = OutputMultiPath(File(exists=True), desc="Realigned files") par_file = OutputMultiPath(File(exists=True), desc=("Motion parameter files. Angles are not " "euler angles")) class SpaceTimeRealigner(BaseInterface): """Simultaneous motion and slice timing correction algorithm If slice_times is not specified, this algorithm performs spatial motion correction This interface wraps nipy's SpaceTimeRealign algorithm [1]_ or simply the SpatialRealign algorithm when timing info is not provided. Examples -------- >>> from nipype.interfaces.nipy import SpaceTimeRealigner >>> #Run spatial realignment only >>> realigner = SpaceTimeRealigner() >>> realigner.inputs.in_file = ['functional.nii'] >>> res = realigner.run() # doctest: +SKIP >>> realigner = SpaceTimeRealigner() >>> realigner.inputs.in_file = ['functional.nii'] >>> realigner.inputs.tr = 2 >>> realigner.inputs.slice_times = range(0, 3, 67) >>> realigner.inputs.slice_info = 2 >>> res = realigner.run() # doctest: +SKIP References ---------- .. [1] Roche A. A four-dimensional registration algorithm with \ application to joint correction of motion and slice timing \ in fMRI. IEEE Trans Med Imaging. 2011 Aug;30(8):1546-54. DOI_. .. _DOI: http://dx.doi.org/10.1109/TMI.2011.2131152 """ input_spec = SpaceTimeRealignerInputSpec output_spec = SpaceTimeRealignerOutputSpec keywords = ['slice timing', 'motion correction'] @property def version(self): return nipy_version def _run_interface(self, runtime): all_ims = [load_image(fname) for fname in self.inputs.in_file] if not isdefined(self.inputs.slice_times): from nipy.algorithms.registration.groupwise_registration import \ SpaceRealign R = SpaceRealign(all_ims) else: from nipy.algorithms.registration import SpaceTimeRealign R = SpaceTimeRealign(all_ims, tr=self.inputs.tr, slice_times=self.inputs.slice_times, slice_info=self.inputs.slice_info, ) R.estimate(refscan=None) corr_run = R.resample() self._out_file_path = [] self._par_file_path = [] for j, corr in enumerate(corr_run): self._out_file_path.append(os.path.abspath('corr_%s.nii.gz' % (split_filename(self.inputs.in_file[j])[1]))) save_image(corr, self._out_file_path[j]) self._par_file_path.append(os.path.abspath('%s.par' % (os.path.split(self.inputs.in_file[j])[1]))) mfile = open(self._par_file_path[j], 'w') motion = R._transforms[j] # nipy does not encode euler angles. return in original form of # translation followed by rotation vector see: # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula for i, mo in enumerate(motion): params = ['%.10f' % item for item in np.hstack((mo.translation, mo.rotation))] string = ' '.join(params) + '\n' mfile.write(string) mfile.close() return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = self._out_file_path outputs['par_file'] = self._par_file_path return outputs class TrimInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, desc="EPI image to trim") begin_index = traits.Int( 0, usedefault=True, desc='first volume') end_index = traits.Int( 0, usedefault=True, desc='last volume indexed as in python (and 0 for last)') out_file = File(desc='output filename') suffix = traits.Str( '_trim', usedefault=True, desc='suffix for out_file to use if no out_file provided') class TrimOutputSpec(TraitedSpec): out_file = File(exists=True) class Trim(BaseInterface): """ Simple interface to trim a few volumes from a 4d fmri nifti file Examples -------- >>> from nipype.interfaces.nipy.preprocess import Trim >>> trim = Trim() >>> trim.inputs.in_file = 'functional.nii' >>> trim.inputs.begin_index = 3 # remove 3 first volumes >>> res = trim.run() # doctest: +SKIP """ input_spec = TrimInputSpec output_spec = TrimOutputSpec def _run_interface(self, runtime): out_file = self._list_outputs()['out_file'] nii = nb.load(self.inputs.in_file) if self.inputs.end_index == 0: s = slice(self.inputs.begin_index, nii.shape[3]) else: s = slice(self.inputs.begin_index, self.inputs.end_index) nii2 = nb.Nifti1Image( nii.get_data()[..., s], nii.get_affine(), nii.get_header()) nb.save(nii2, out_file) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_file if not isdefined(outputs['out_file']): outputs['out_file'] = fname_presuffix( self.inputs.in_file, newpath=os.getcwd(), suffix=self.inputs.suffix) outputs['out_file'] = os.path.abspath(outputs['out_file']) return outputs nipype-0.9.2/nipype/interfaces/nipy/tests/000077500000000000000000000000001227300005300205665ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py000066400000000000000000000015751227300005300254670ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nipy.preprocess import ComputeMask def test_ComputeMask_inputs(): input_map = dict(M=dict(), cc=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), m=dict(), mean_volume=dict(mandatory=True, ), reference_volume=dict(), ) inputs = ComputeMask.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ComputeMask_outputs(): output_map = dict(brain_mask=dict(), ) outputs = ComputeMask.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py000066400000000000000000000022111227300005300265140ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nipy.model import EstimateContrast def test_EstimateContrast_inputs(): input_map = dict(axis=dict(mandatory=True, ), beta=dict(mandatory=True, ), constants=dict(mandatory=True, ), contrasts=dict(mandatory=True, ), dof=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mask=dict(), nvbeta=dict(mandatory=True, ), reg_names=dict(mandatory=True, ), s2=dict(mandatory=True, ), ) inputs = EstimateContrast.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EstimateContrast_outputs(): output_map = dict(p_maps=dict(), stat_maps=dict(), z_maps=dict(), ) outputs = EstimateContrast.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nipy/tests/test_auto_FitGLM.py000066400000000000000000000024351227300005300243150ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nipy.model import FitGLM def test_FitGLM_inputs(): input_map = dict(TR=dict(mandatory=True, ), drift_model=dict(usedefault=True, ), hrf_model=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), mask=dict(), method=dict(usedefault=True, ), model=dict(usedefault=True, ), normalize_design_matrix=dict(usedefault=True, ), plot_design_matrix=dict(usedefault=True, ), save_residuals=dict(usedefault=True, ), session_info=dict(mandatory=True, ), ) inputs = FitGLM.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FitGLM_outputs(): output_map = dict(a=dict(), axis=dict(), beta=dict(), constants=dict(), dof=dict(), nvbeta=dict(), reg_names=dict(), residuals=dict(), s2=dict(), ) outputs = FitGLM.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nipy/tests/test_auto_FmriRealign4d.py000066400000000000000000000022671227300005300256650ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nipy.preprocess import FmriRealign4d def test_FmriRealign4d_inputs(): input_map = dict(between_loops=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(mandatory=True, ), loops=dict(usedefault=True, ), slice_order=dict(requires=['time_interp'], ), speedup=dict(usedefault=True, ), start=dict(usedefault=True, ), time_interp=dict(requires=['slice_order'], ), tr=dict(mandatory=True, ), tr_slices=dict(requires=['time_interp'], ), ) inputs = FmriRealign4d.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FmriRealign4d_outputs(): output_map = dict(out_file=dict(), par_file=dict(), ) outputs = FmriRealign4d.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nipy/tests/test_auto_Similarity.py000066400000000000000000000016331227300005300253600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nipy.utils import Similarity def test_Similarity_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), mask1=dict(), mask2=dict(), metric=dict(usedefault=True, ), volume1=dict(mandatory=True, ), volume2=dict(mandatory=True, ), ) inputs = Similarity.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Similarity_outputs(): output_map = dict(similarity=dict(), ) outputs = Similarity.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py000066400000000000000000000017721227300005300267410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nipy.preprocess import SpaceTimeRealigner def test_SpaceTimeRealigner_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(mandatory=True, min_ver='0.4.0.dev', ), slice_info=dict(requires=['slice_times'], ), slice_times=dict(), tr=dict(requires=['slice_times'], ), ) inputs = SpaceTimeRealigner.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SpaceTimeRealigner_outputs(): output_map = dict(out_file=dict(), par_file=dict(), ) outputs = SpaceTimeRealigner.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nipy/tests/test_auto_Trim.py000066400000000000000000000016411227300005300241440ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nipy.preprocess import Trim def test_Trim_inputs(): input_map = dict(begin_index=dict(usedefault=True, ), end_index=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(mandatory=True, ), out_file=dict(), suffix=dict(usedefault=True, ), ) inputs = Trim.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Trim_outputs(): output_map = dict(out_file=dict(), ) outputs = Trim.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nipy/utils.py000066400000000000000000000064211227300005300211410ustar00rootroot00000000000000""" Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import warnings import nibabel as nb from ...utils.misc import package_check have_nipy = True try: package_check('nipy') except Exception, e: have_nipy = False else: from nipy.algorithms.registration.histogram_registration import HistogramRegistration from nipy.algorithms.registration.affine import Affine from ..base import (TraitedSpec, BaseInterface, traits, BaseInterfaceInputSpec, File, isdefined) class SimilarityInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, desc="3D volume", mandatory=True) volume2 = File(exists=True, desc="3D volume", mandatory=True) mask1 = File(exists=True, desc="3D volume") mask2 = File(exists=True, desc="3D volume") metric = traits.Either(traits.Enum('cc', 'cr', 'crl1', 'mi', 'nmi', 'slr'), traits.Callable(), desc="""str or callable Cost-function for assessing image similarity. If a string, one of 'cc': correlation coefficient, 'cr': correlation ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual information, 'nmi': normalized mutual information, 'slr': supervised log-likelihood ratio. If a callable, it should take a two-dimensional array representing the image joint histogram as an input and return a float.""", usedefault=True) class SimilarityOutputSpec(TraitedSpec): similarity = traits.Float(desc="Similarity between volume 1 and 2") class Similarity(BaseInterface): """Calculates similarity between two 3D volumes. Both volumes have to be in the same coordinate system, same space within that coordinate system and with the same voxel dimensions. Example ------- >>> from nipype.interfaces.nipy.utils import Similarity >>> similarity = Similarity() >>> similarity.inputs.volume1 = 'rc1s1.nii' >>> similarity.inputs.volume2 = 'rc1s2.nii' >>> similarity.inputs.mask1 = 'mask.nii' >>> similarity.inputs.mask2 = 'mask.nii' >>> similarity.inputs.metric = 'cr' >>> res = similarity.run() # doctest: +SKIP """ input_spec = SimilarityInputSpec output_spec = SimilarityOutputSpec def _run_interface(self, runtime): vol1_nii = nb.load(self.inputs.volume1) vol2_nii = nb.load(self.inputs.volume2) if isdefined(self.inputs.mask1): mask1 = nb.load(self.inputs.mask1).get_data() == 1 else: mask1 = None if isdefined(self.inputs.mask2): mask2 = nb.load(self.inputs.mask2).get_data() == 1 else: mask2 = None histreg = HistogramRegistration(from_img = vol1_nii, to_img = vol2_nii, similarity=self.inputs.metric, from_mask = mask1, to_mask = mask2) self._similarity = histreg.eval(Affine()) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['similarity'] = self._similarity return outputs nipype-0.9.2/nipype/interfaces/nitime/000077500000000000000000000000001227300005300177325ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/nitime/__init__.py000066400000000000000000000003551227300005300220460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .analysis import (CoherenceAnalyzerInputSpec, CoherenceAnalyzerOutputSpec, CoherenceAnalyzer) nipype-0.9.2/nipype/interfaces/nitime/analysis.py000066400000000000000000000234241227300005300221340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Interfaces to functionality from nitime for time-series analysis of fmri data - nitime.analysis.CoherenceAnalyzer: Coherence/y - nitime.fmri.io: - nitime.viz.drawmatrix_channels """ import warnings import numpy as np import tempfile from ...utils.misc import package_check from ..base import (TraitedSpec, File, Undefined, traits, BaseInterface, isdefined, BaseInterfaceInputSpec) from ...utils.filemanip import fname_presuffix have_nitime = True try: package_check('nitime') except Exception, e: have_nitime = False else: import nitime.analysis as nta from nitime.timeseries import TimeSeries import nitime.viz as viz class CoherenceAnalyzerInputSpec(BaseInterfaceInputSpec): #Input either csv file, or time-series object and use _xor_inputs to #discriminate _xor_inputs = ('in_file', 'in_TS') in_file = File(desc=('csv file with ROIs on the columns and ' 'time-points on the rows. ROI names at the top row'), exists=True, requires=('TR',)) #If you gave just a file name, you need to specify the sampling_rate: TR = traits.Float(desc=('The TR used to collect the data' 'in your csv file ')) in_TS = traits.Any(desc='a nitime TimeSeries object') NFFT = traits.Range(low=32, value=64, usedefault=True, desc=('This is the size of the window used for ' 'the spectral estimation. Use values between ' '32 and the number of samples in your time-series.' '(Defaults to 64.)')) n_overlap = traits.Range(low=0, value=0, usedefault=True, desc=('The number of samples which overlap' 'between subsequent windows.(Defaults to 0)')) frequency_range = traits.List(value=[0.02, 0.15], usedefault=True, minlen=2, maxlen=2, desc=('The range of frequencies over' 'which the analysis will average.' '[low,high] (Default [0.02,0.15]')) output_csv_file = File(desc='File to write outputs (coherence,time-delay) with file-names: file_name_ {coherence,timedelay}') output_figure_file = File(desc='File to write output figures (coherence,time-delay) with file-names: file_name_{coherence,timedelay}. Possible formats: .png,.svg,.pdf,.jpg,...') figure_type = traits.Enum('matrix', 'network', usedefault=True, desc=("The type of plot to generate, where " "'matrix' denotes a matrix image and" "'network' denotes a graph representation." " Default: 'matrix'")) class CoherenceAnalyzerOutputSpec(TraitedSpec): coherence_array = traits.Array(desc=('The pairwise coherence values' 'between the ROIs')) timedelay_array = traits.Array(desc=('The pairwise time delays between the' 'ROIs (in seconds)')) coherence_csv = File(desc=('A csv file containing the pairwise ' 'coherence values')) timedelay_csv = File(desc=('A csv file containing the pairwise ' 'time delay values')) coherence_fig = File(desc=('Figure representing coherence values')) timedelay_fig = File(desc=('Figure representing coherence values')) class CoherenceAnalyzer(BaseInterface): input_spec = CoherenceAnalyzerInputSpec output_spec = CoherenceAnalyzerOutputSpec def _read_csv(self): """ Read from csv in_file and return an array and ROI names The input file should have a first row containing the names of the ROIs (strings) the rest of the data will be read in and transposed so that the rows (TRs) will becomes the second (and last) dimension of the array """ #Check that input conforms to expectations: first_row = open(self.inputs.in_file).readline() if not first_row[1].isalpha(): raise ValueError("First row of in_file should contain ROI names as strings of characters") roi_names = open(self.inputs.in_file).readline().replace('\"', '').strip('\n').split(',') #Transpose, so that the time is the last dimension: data = np.loadtxt(self.inputs.in_file, skiprows=1, delimiter=',').T return data, roi_names def _csv2ts(self): """ Read data from the in_file and generate a nitime TimeSeries object""" data, roi_names = self._read_csv() TS = TimeSeries(data=data, sampling_interval=self.inputs.TR, time_unit='s') TS.metadata = dict(ROIs=roi_names) return TS #Rewrite _run_interface, but not run def _run_interface(self, runtime): lb, ub = self.inputs.frequency_range if self.inputs.in_TS is Undefined: # get TS form csv and inputs.TR TS = self._csv2ts() else: # get TS from inputs.in_TS TS = self.inputs.in_TS # deal with creating or storing ROI names: if not TS.metadata.has_key('ROIs'): self.ROIs = ['roi_%d' % x for x, _ in enumerate(TS.data)] else: self.ROIs = TS.metadata['ROIs'] A = nta.CoherenceAnalyzer(TS, method=dict(this_method='welch', NFFT=self.inputs.NFFT, n_overlap=self.inputs.n_overlap)) freq_idx = np.where((A.frequencies > self.inputs.frequency_range[0]) * (A.frequencies < self.inputs.frequency_range[1]))[0] #Get the coherence matrix from the analyzer, averaging on the last #(frequency) dimension: (roi X roi array) self.coherence = np.mean(A.coherence[:, :, freq_idx], -1) # Get the time delay from analyzer, (roi X roi array) self.delay = np.mean(A.delay[:, :, freq_idx], -1) return runtime #Rewrite _list_outputs (look at BET) def _list_outputs(self): outputs = self.output_spec().get() #if isdefined(self.inputs.output_csv_file): #write to a csv file and assign a value to self.coherence_file (a #file name + path) #Always defined (the arrays): outputs['coherence_array'] = self.coherence outputs['timedelay_array'] = self.delay #Conditional if isdefined(self.inputs.output_csv_file) and hasattr(self, 'coherence'): # we need to make a function that we call here that writes the # coherence values to this file "coherence_csv" and makes the # time_delay csv file?? self._make_output_files() outputs['coherence_csv'] = fname_presuffix(self.inputs.output_csv_file, suffix='_coherence') outputs['timedelay_csv'] = fname_presuffix(self.inputs.output_csv_file, suffix='_delay') if isdefined(self.inputs.output_figure_file) and hasattr(self, 'coherence'): self._make_output_figures() outputs['coherence_fig'] = fname_presuffix(self.inputs.output_figure_file, suffix='_coherence') outputs['timedelay_fig'] = fname_presuffix(self.inputs.output_figure_file, suffix='_delay') return outputs def _make_output_files(self): """ Generate the output csv files. """ for this in zip([self.coherence, self.delay], ['coherence', 'delay']): tmp_f = tempfile.mkstemp()[1] np.savetxt(tmp_f, this[0], delimiter=',') fid = open(fname_presuffix(self.inputs.output_csv_file, suffix='_%s' % this[1]), 'w+') # this writes ROIs as header line fid.write(',' + ','.join(self.ROIs) + '\n') # this writes ROI and data to a line for r, line in zip(self.ROIs, open(tmp_f)): fid.write('%s,%s' % (r, line)) fid.close() def _make_output_figures(self): """ Generate the desired figure and save the files according to self.inputs.output_figure_file """ if self.inputs.figure_type == 'matrix': fig_coh = viz.drawmatrix_channels(self.coherence, channel_names=self.ROIs, color_anchor=0) fig_coh.savefig(fname_presuffix(self.inputs.output_figure_file, suffix='_coherence')) fig_dt = viz.drawmatrix_channels(self.delay, channel_names=self.ROIs, color_anchor=0) fig_dt.savefig(fname_presuffix(self.inputs.output_figure_file, suffix='_delay')) else: fig_coh = viz.drawgraph_channels(self.coherence, channel_names=self.ROIs) fig_coh.savefig(fname_presuffix(self.inputs.output_figure_file, suffix='_coherence')) fig_dt = viz.drawgraph_channels(self.delay, channel_names=self.ROIs) fig_dt.savefig(fname_presuffix(self.inputs.output_figure_file, suffix='_delay')) class GetTimeSeriesInputSpec(): pass class GetTimeSeriesOutputSpec(): pass class GetTimeSeries(): # getting time series data from nifti files and ROIs pass nipype-0.9.2/nipype/interfaces/nitime/tests/000077500000000000000000000000001227300005300210745ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py000066400000000000000000000023451227300005300271420ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.nitime.analysis import CoherenceAnalyzer def test_CoherenceAnalyzer_inputs(): input_map = dict(NFFT=dict(usedefault=True, ), TR=dict(), figure_type=dict(usedefault=True, ), frequency_range=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_TS=dict(), in_file=dict(requires=('TR',), ), n_overlap=dict(usedefault=True, ), output_csv_file=dict(), output_figure_file=dict(), ) inputs = CoherenceAnalyzer.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CoherenceAnalyzer_outputs(): output_map = dict(coherence_array=dict(), coherence_csv=dict(), coherence_fig=dict(), timedelay_array=dict(), timedelay_csv=dict(), timedelay_fig=dict(), ) outputs = CoherenceAnalyzer.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/nitime/tests/test_nitime.py000066400000000000000000000050621227300005300237750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import tempfile import numpy as np from nipype.testing import (assert_equal, assert_raises, skipif) from nipype.testing import example_data import nipype.interfaces.nitime as nitime no_nitime = not nitime.analysis.have_nitime display_available = 'DISPLAY' in os.environ and os.environ['DISPLAY'] @skipif(no_nitime) def test_read_csv(): """Test that reading the data from csv file gives you back a reasonable time-series object """ CA = nitime.CoherenceAnalyzer() CA.inputs.TR = 1.89 # bogus value just to pass traits test CA.inputs.in_file = example_data('fmri_timeseries_nolabels.csv') yield assert_raises,ValueError,CA._read_csv CA.inputs.in_file = example_data('fmri_timeseries.csv') data,roi_names = CA._read_csv() yield assert_equal, data[0][0],10125.9 yield assert_equal, roi_names[0],'WM' @skipif(no_nitime) def test_coherence_analysis(): """Test that the coherence analyzer works """ import nitime.analysis as nta import nitime.timeseries as ts #This is the nipype interface analysis: CA = nitime.CoherenceAnalyzer() CA.inputs.TR = 1.89 CA.inputs.in_file = example_data('fmri_timeseries.csv') if display_available: tmp_png = tempfile.mkstemp(suffix='.png')[1] CA.inputs.output_figure_file = tmp_png tmp_csv = tempfile.mkstemp(suffix='.csv')[1] CA.inputs.output_csv_file = tmp_csv o = CA.run() yield assert_equal,o.outputs.coherence_array.shape,(31,31) #This is the nitime analysis: TR=1.89 data_rec = np.recfromcsv(example_data('fmri_timeseries.csv')) roi_names= np.array(data_rec.dtype.names) n_samples = data_rec.shape[0] data = np.zeros((len(roi_names),n_samples)) for n_idx, roi in enumerate(roi_names): data[n_idx] = data_rec[roi] T = ts.TimeSeries(data,sampling_interval=TR) yield assert_equal,CA._csv2ts().data,T.data T.metadata['roi'] = roi_names C = nta.CoherenceAnalyzer(T,method=dict(this_method='welch', NFFT=CA.inputs.NFFT, n_overlap=CA.inputs.n_overlap)) freq_idx = np.where((C.frequencies>CA.inputs.frequency_range[0]) * (C.frequencies0 instead of thresholding? set fmri(conmask_zerothresh_yn) 0 nipype-0.9.2/nipype/interfaces/script_templates/feat_contrasts.tcl000066400000000000000000000377351227300005300255710ustar00rootroot00000000000000# Contrast & F-tests mode # real : control real EVs # orig : control original EVs set fmri(con_mode_old) orig set fmri(con_mode) orig ### Needs iteration # Display images for contrast_real 1 set fmri(conpic_real.1) 1 # Title for contrast_real 1 set fmri(conname_real.1) "left>right" # Real contrast_real vector 1 element 1 set fmri(con_real1.1) 1 # Real contrast_real vector 1 element 2 set fmri(con_real1.2) -1.0 # Real contrast_real vector 1 element 3 set fmri(con_real1.3) 1.0 # Real contrast_real vector 1 element 4 set fmri(con_real1.4) -1.0 # Real contrast_real vector 1 element 5 set fmri(con_real1.5) 1.0 # Real contrast_real vector 1 element 6 set fmri(con_real1.6) -1.0 # Real contrast_real vector 1 element 7 set fmri(con_real1.7) 1.0 # Real contrast_real vector 1 element 8 set fmri(con_real1.8) -1.0 # Display images for contrast_real 2 set fmri(conpic_real.2) 1 # Title for contrast_real 2 set fmri(conname_real.2) "visual>vibe" # Real contrast_real vector 2 element 1 set fmri(con_real2.1) -1.0 # Real contrast_real vector 2 element 2 set fmri(con_real2.2) -1.0 # Real contrast_real vector 2 element 3 set fmri(con_real2.3) -1.0 # Real contrast_real vector 2 element 4 set fmri(con_real2.4) -1.0 # Real contrast_real vector 2 element 5 set fmri(con_real2.5) 1.0 # Real contrast_real vector 2 element 6 set fmri(con_real2.6) 1.0 # Real contrast_real vector 2 element 7 set fmri(con_real2.7) 1.0 # Real contrast_real vector 2 element 8 set fmri(con_real2.8) 1.0 # Display images for contrast_real 3 set fmri(conpic_real.3) 1 # Title for contrast_real 3 set fmri(conname_real.3) "seq>all" # Real contrast_real vector 3 element 1 set fmri(con_real3.1) -1.0 # Real contrast_real vector 3 element 2 set fmri(con_real3.2) -1.0 # Real contrast_real vector 3 element 3 set fmri(con_real3.3) 1.0 # Real contrast_real vector 3 element 4 set fmri(con_real3.4) 1.0 # Real contrast_real vector 3 element 5 set fmri(con_real3.5) -1.0 # Real contrast_real vector 3 element 6 set fmri(con_real3.6) -1.0 # Real contrast_real vector 3 element 7 set fmri(con_real3.7) 1.0 # Real contrast_real vector 3 element 8 set fmri(con_real3.8) 1.0 # Display images for contrast_real 4 set fmri(conpic_real.4) 1 # Title for contrast_real 4 set fmri(conname_real.4) "visual seq>all" # Real contrast_real vector 4 element 1 set fmri(con_real4.1) 0 # Real contrast_real vector 4 element 2 set fmri(con_real4.2) 0 # Real contrast_real vector 4 element 3 set fmri(con_real4.3) 0 # Real contrast_real vector 4 element 4 set fmri(con_real4.4) 0 # Real contrast_real vector 4 element 5 set fmri(con_real4.5) -1.0 # Real contrast_real vector 4 element 6 set fmri(con_real4.6) -1.0 # Real contrast_real vector 4 element 7 set fmri(con_real4.7) 1.0 # Real contrast_real vector 4 element 8 set fmri(con_real4.8) 1.0 # Display images for contrast_real 5 set fmri(conpic_real.5) 1 # Title for contrast_real 5 set fmri(conname_real.5) "vibe seq>all" # Real contrast_real vector 5 element 1 set fmri(con_real5.1) -1.0 # Real contrast_real vector 5 element 2 set fmri(con_real5.2) -1.0 # Real contrast_real vector 5 element 3 set fmri(con_real5.3) 1.0 # Real contrast_real vector 5 element 4 set fmri(con_real5.4) 1.0 # Real contrast_real vector 5 element 5 set fmri(con_real5.5) 0 # Real contrast_real vector 5 element 6 set fmri(con_real5.6) 0 # Real contrast_real vector 5 element 7 set fmri(con_real5.7) 0 # Real contrast_real vector 5 element 8 set fmri(con_real5.8) 0 # Display images for contrast_real 6 set fmri(conpic_real.6) 1 # Title for contrast_real 6 set fmri(conname_real.6) "visual seq>vibe seq" # Real contrast_real vector 6 element 1 set fmri(con_real6.1) 0 # Real contrast_real vector 6 element 2 set fmri(con_real6.2) 0 # Real contrast_real vector 6 element 3 set fmri(con_real6.3) -1.0 # Real contrast_real vector 6 element 4 set fmri(con_real6.4) -1.0 # Real contrast_real vector 6 element 5 set fmri(con_real6.5) 0 # Real contrast_real vector 6 element 6 set fmri(con_real6.6) 0 # Real contrast_real vector 6 element 7 set fmri(con_real6.7) 1.0 # Real contrast_real vector 6 element 8 set fmri(con_real6.8) 1.0 # Display images for contrast_real 7 set fmri(conpic_real.7) 1 # Title for contrast_real 7 set fmri(conname_real.7) "visual all>vibe all" # Real contrast_real vector 7 element 1 set fmri(con_real7.1) -1.0 # Real contrast_real vector 7 element 2 set fmri(con_real7.2) -1.0 # Real contrast_real vector 7 element 3 set fmri(con_real7.3) 0 # Real contrast_real vector 7 element 4 set fmri(con_real7.4) 0 # Real contrast_real vector 7 element 5 set fmri(con_real7.5) 1.0 # Real contrast_real vector 7 element 6 set fmri(con_real7.6) 1.0 # Real contrast_real vector 7 element 7 set fmri(con_real7.7) 0 # Real contrast_real vector 7 element 8 set fmri(con_real7.8) 0 # Display images for contrast_real 8 set fmri(conpic_real.8) 1 # Title for contrast_real 8 set fmri(conname_real.8) "mode x complexity" # Real contrast_real vector 8 element 1 set fmri(con_real8.1) -1.0 # Real contrast_real vector 8 element 2 set fmri(con_real8.2) -1.0 # Real contrast_real vector 8 element 3 set fmri(con_real8.3) 1.0 # Real contrast_real vector 8 element 4 set fmri(con_real8.4) 1.0 # Real contrast_real vector 8 element 5 set fmri(con_real8.5) 1.0 # Real contrast_real vector 8 element 6 set fmri(con_real8.6) 1.0 # Real contrast_real vector 8 element 7 set fmri(con_real8.7) -1.0 # Real contrast_real vector 8 element 8 set fmri(con_real8.8) -1.0 # Display images for contrast_orig 1 set fmri(conpic_orig.1) 1 # Title for contrast_orig 1 set fmri(conname_orig.1) "left>right" # Real contrast_orig vector 1 element 1 set fmri(con_orig1.1) 1 # Real contrast_orig vector 1 element 2 set fmri(con_orig1.2) -1.0 # Real contrast_orig vector 1 element 3 set fmri(con_orig1.3) 1.0 # Real contrast_orig vector 1 element 4 set fmri(con_orig1.4) -1.0 # Real contrast_orig vector 1 element 5 set fmri(con_orig1.5) 1.0 # Real contrast_orig vector 1 element 6 set fmri(con_orig1.6) -1.0 # Real contrast_orig vector 1 element 7 set fmri(con_orig1.7) 1.0 # Real contrast_orig vector 1 element 8 set fmri(con_orig1.8) -1.0 # Display images for contrast_orig 2 set fmri(conpic_orig.2) 1 # Title for contrast_orig 2 set fmri(conname_orig.2) "visual>vibe" # Real contrast_orig vector 2 element 1 set fmri(con_orig2.1) -1.0 # Real contrast_orig vector 2 element 2 set fmri(con_orig2.2) -1.0 # Real contrast_orig vector 2 element 3 set fmri(con_orig2.3) -1.0 # Real contrast_orig vector 2 element 4 set fmri(con_orig2.4) -1.0 # Real contrast_orig vector 2 element 5 set fmri(con_orig2.5) 1.0 # Real contrast_orig vector 2 element 6 set fmri(con_orig2.6) 1.0 # Real contrast_orig vector 2 element 7 set fmri(con_orig2.7) 1.0 # Real contrast_orig vector 2 element 8 set fmri(con_orig2.8) 1.0 # Display images for contrast_orig 3 set fmri(conpic_orig.3) 1 # Title for contrast_orig 3 set fmri(conname_orig.3) "seq>all" # Real contrast_orig vector 3 element 1 set fmri(con_orig3.1) -1.0 # Real contrast_orig vector 3 element 2 set fmri(con_orig3.2) -1.0 # Real contrast_orig vector 3 element 3 set fmri(con_orig3.3) 1.0 # Real contrast_orig vector 3 element 4 set fmri(con_orig3.4) 1.0 # Real contrast_orig vector 3 element 5 set fmri(con_orig3.5) -1.0 # Real contrast_orig vector 3 element 6 set fmri(con_orig3.6) -1.0 # Real contrast_orig vector 3 element 7 set fmri(con_orig3.7) 1.0 # Real contrast_orig vector 3 element 8 set fmri(con_orig3.8) 1.0 # Display images for contrast_orig 4 set fmri(conpic_orig.4) 1 # Title for contrast_orig 4 set fmri(conname_orig.4) "visual seq>all" # Real contrast_orig vector 4 element 1 set fmri(con_orig4.1) 0 # Real contrast_orig vector 4 element 2 set fmri(con_orig4.2) 0 # Real contrast_orig vector 4 element 3 set fmri(con_orig4.3) 0 # Real contrast_orig vector 4 element 4 set fmri(con_orig4.4) 0 # Real contrast_orig vector 4 element 5 set fmri(con_orig4.5) -1.0 # Real contrast_orig vector 4 element 6 set fmri(con_orig4.6) -1.0 # Real contrast_orig vector 4 element 7 set fmri(con_orig4.7) 1.0 # Real contrast_orig vector 4 element 8 set fmri(con_orig4.8) 1.0 # Display images for contrast_orig 5 set fmri(conpic_orig.5) 1 # Title for contrast_orig 5 set fmri(conname_orig.5) "vibe seq>all" # Real contrast_orig vector 5 element 1 set fmri(con_orig5.1) -1.0 # Real contrast_orig vector 5 element 2 set fmri(con_orig5.2) -1.0 # Real contrast_orig vector 5 element 3 set fmri(con_orig5.3) 1.0 # Real contrast_orig vector 5 element 4 set fmri(con_orig5.4) 1.0 # Real contrast_orig vector 5 element 5 set fmri(con_orig5.5) 0 # Real contrast_orig vector 5 element 6 set fmri(con_orig5.6) 0 # Real contrast_orig vector 5 element 7 set fmri(con_orig5.7) 0 # Real contrast_orig vector 5 element 8 set fmri(con_orig5.8) 0 # Display images for contrast_orig 6 set fmri(conpic_orig.6) 1 # Title for contrast_orig 6 set fmri(conname_orig.6) "visual seq>vibe seq" # Real contrast_orig vector 6 element 1 set fmri(con_orig6.1) 0 # Real contrast_orig vector 6 element 2 set fmri(con_orig6.2) 0 # Real contrast_orig vector 6 element 3 set fmri(con_orig6.3) -1.0 # Real contrast_orig vector 6 element 4 set fmri(con_orig6.4) -1.0 # Real contrast_orig vector 6 element 5 set fmri(con_orig6.5) 0 # Real contrast_orig vector 6 element 6 set fmri(con_orig6.6) 0 # Real contrast_orig vector 6 element 7 set fmri(con_orig6.7) 1.0 # Real contrast_orig vector 6 element 8 set fmri(con_orig6.8) 1.0 # Display images for contrast_orig 7 set fmri(conpic_orig.7) 1 # Title for contrast_orig 7 set fmri(conname_orig.7) "visual all>vibe all" # Real contrast_orig vector 7 element 1 set fmri(con_orig7.1) -1.0 # Real contrast_orig vector 7 element 2 set fmri(con_orig7.2) -1.0 # Real contrast_orig vector 7 element 3 set fmri(con_orig7.3) 0 # Real contrast_orig vector 7 element 4 set fmri(con_orig7.4) 0 # Real contrast_orig vector 7 element 5 set fmri(con_orig7.5) 1.0 # Real contrast_orig vector 7 element 6 set fmri(con_orig7.6) 1.0 # Real contrast_orig vector 7 element 7 set fmri(con_orig7.7) 0 # Real contrast_orig vector 7 element 8 set fmri(con_orig7.8) 0 # Display images for contrast_orig 8 set fmri(conpic_orig.8) 1 # Title for contrast_orig 8 set fmri(conname_orig.8) "mode x complexity" # Real contrast_orig vector 8 element 1 set fmri(con_orig8.1) -1.0 # Real contrast_orig vector 8 element 2 set fmri(con_orig8.2) -1.0 # Real contrast_orig vector 8 element 3 set fmri(con_orig8.3) 1.0 # Real contrast_orig vector 8 element 4 set fmri(con_orig8.4) 1.0 # Real contrast_orig vector 8 element 5 set fmri(con_orig8.5) 1.0 # Real contrast_orig vector 8 element 6 set fmri(con_orig8.6) 1.0 # Real contrast_orig vector 8 element 7 set fmri(con_orig8.7) -1.0 # Real contrast_orig vector 8 element 8 set fmri(con_orig8.8) -1.0 ### This is fixed # Contrast masking - use >0 instead of thresholding? set fmri(conmask_zerothresh_yn) 0 ### These are set for the full combo of contrasts - needs iteration # Mask real contrast/F-test 1 with real contrast/F-test 2? set fmri(conmask1_2) 0 # Mask real contrast/F-test 1 with real contrast/F-test 3? set fmri(conmask1_3) 0 # Mask real contrast/F-test 1 with real contrast/F-test 4? set fmri(conmask1_4) 0 # Mask real contrast/F-test 1 with real contrast/F-test 5? set fmri(conmask1_5) 0 # Mask real contrast/F-test 1 with real contrast/F-test 6? set fmri(conmask1_6) 0 # Mask real contrast/F-test 1 with real contrast/F-test 7? set fmri(conmask1_7) 0 # Mask real contrast/F-test 1 with real contrast/F-test 8? set fmri(conmask1_8) 0 # Mask real contrast/F-test 2 with real contrast/F-test 1? set fmri(conmask2_1) 0 # Mask real contrast/F-test 2 with real contrast/F-test 3? set fmri(conmask2_3) 0 # Mask real contrast/F-test 2 with real contrast/F-test 4? set fmri(conmask2_4) 0 # Mask real contrast/F-test 2 with real contrast/F-test 5? set fmri(conmask2_5) 0 # Mask real contrast/F-test 2 with real contrast/F-test 6? set fmri(conmask2_6) 0 # Mask real contrast/F-test 2 with real contrast/F-test 7? set fmri(conmask2_7) 0 # Mask real contrast/F-test 2 with real contrast/F-test 8? set fmri(conmask2_8) 0 # Mask real contrast/F-test 3 with real contrast/F-test 1? set fmri(conmask3_1) 0 # Mask real contrast/F-test 3 with real contrast/F-test 2? set fmri(conmask3_2) 0 # Mask real contrast/F-test 3 with real contrast/F-test 4? set fmri(conmask3_4) 0 # Mask real contrast/F-test 3 with real contrast/F-test 5? set fmri(conmask3_5) 0 # Mask real contrast/F-test 3 with real contrast/F-test 6? set fmri(conmask3_6) 0 # Mask real contrast/F-test 3 with real contrast/F-test 7? set fmri(conmask3_7) 0 # Mask real contrast/F-test 3 with real contrast/F-test 8? set fmri(conmask3_8) 0 # Mask real contrast/F-test 4 with real contrast/F-test 1? set fmri(conmask4_1) 0 # Mask real contrast/F-test 4 with real contrast/F-test 2? set fmri(conmask4_2) 0 # Mask real contrast/F-test 4 with real contrast/F-test 3? set fmri(conmask4_3) 0 # Mask real contrast/F-test 4 with real contrast/F-test 5? set fmri(conmask4_5) 0 # Mask real contrast/F-test 4 with real contrast/F-test 6? set fmri(conmask4_6) 0 # Mask real contrast/F-test 4 with real contrast/F-test 7? set fmri(conmask4_7) 0 # Mask real contrast/F-test 4 with real contrast/F-test 8? set fmri(conmask4_8) 0 # Mask real contrast/F-test 5 with real contrast/F-test 1? set fmri(conmask5_1) 0 # Mask real contrast/F-test 5 with real contrast/F-test 2? set fmri(conmask5_2) 0 # Mask real contrast/F-test 5 with real contrast/F-test 3? set fmri(conmask5_3) 0 # Mask real contrast/F-test 5 with real contrast/F-test 4? set fmri(conmask5_4) 0 # Mask real contrast/F-test 5 with real contrast/F-test 6? set fmri(conmask5_6) 0 # Mask real contrast/F-test 5 with real contrast/F-test 7? set fmri(conmask5_7) 0 # Mask real contrast/F-test 5 with real contrast/F-test 8? set fmri(conmask5_8) 0 # Mask real contrast/F-test 6 with real contrast/F-test 1? set fmri(conmask6_1) 0 # Mask real contrast/F-test 6 with real contrast/F-test 2? set fmri(conmask6_2) 0 # Mask real contrast/F-test 6 with real contrast/F-test 3? set fmri(conmask6_3) 0 # Mask real contrast/F-test 6 with real contrast/F-test 4? set fmri(conmask6_4) 0 # Mask real contrast/F-test 6 with real contrast/F-test 5? set fmri(conmask6_5) 0 # Mask real contrast/F-test 6 with real contrast/F-test 7? set fmri(conmask6_7) 0 # Mask real contrast/F-test 6 with real contrast/F-test 8? set fmri(conmask6_8) 0 # Mask real contrast/F-test 7 with real contrast/F-test 1? set fmri(conmask7_1) 0 # Mask real contrast/F-test 7 with real contrast/F-test 2? set fmri(conmask7_2) 0 # Mask real contrast/F-test 7 with real contrast/F-test 3? set fmri(conmask7_3) 0 # Mask real contrast/F-test 7 with real contrast/F-test 4? set fmri(conmask7_4) 0 # Mask real contrast/F-test 7 with real contrast/F-test 5? set fmri(conmask7_5) 0 # Mask real contrast/F-test 7 with real contrast/F-test 6? set fmri(conmask7_6) 0 # Mask real contrast/F-test 7 with real contrast/F-test 8? set fmri(conmask7_8) 0 # Mask real contrast/F-test 8 with real contrast/F-test 1? set fmri(conmask8_1) 0 # Mask real contrast/F-test 8 with real contrast/F-test 2? set fmri(conmask8_2) 0 # Mask real contrast/F-test 8 with real contrast/F-test 3? set fmri(conmask8_3) 0 # Mask real contrast/F-test 8 with real contrast/F-test 4? set fmri(conmask8_4) 0 # Mask real contrast/F-test 8 with real contrast/F-test 5? set fmri(conmask8_5) 0 # Mask real contrast/F-test 8 with real contrast/F-test 6? set fmri(conmask8_6) 0 # Mask real contrast/F-test 8 with real contrast/F-test 7? set fmri(conmask8_7) 0 ### The rest is just fixed # Do contrast masking at all? set fmri(conmask1_1) 0 # Now options that don't appear in the GUI # Alternative example_func image (not derived from input 4D dataset) set fmri(alternative_example_func) "" # Alternative (to BETting) mask image set fmri(alternative_mask) "" # Initial structural space registration initialisation transform set fmri(init_initial_highres) "" # Structural space registration initialisation transform set fmri(init_highres) "" # Standard space registration initialisation transform set fmri(init_standard) "" # For full FEAT analysis: overwrite existing .feat output dir? set fmri(overwrite_yn) 1 nipype-0.9.2/nipype/interfaces/script_templates/feat_ev_gamma.tcl000066400000000000000000000013451227300005300253110ustar00rootroot00000000000000# EV title set fmri(evtitle$ev_num) "$ev_name" # Basic waveform shape # 0 : Square # 1 : Sinusoid # 2 : Custom (1 entry per volume) # 3 : Custom (3 column format) # 4 : Interaction # 10 : Empty (all zeros) set fmri(shape$ev_num) 3 # Convolution # 0 : None # 1 : Gaussian # 2 : Gamma # 3 : Double-Gamma HRF # 4 : Gamma basis functions # 5 : Sine basis functions # 6 : FIR basis functions set fmri(convolve$ev_num) 2 # Convolve phase set fmri(convolve_phase$ev_num) 0 # Apply temporal filtering set fmri(tempfilt_yn$ev_num) 1 # Add temporal derivative set fmri(deriv_yn$ev_num) $temporalderiv # Custom EV file set fmri(custom$ev_num) "$cond_file" # Gamma sigma set fmri(gammasigma$ev_num) 3 # Gamma delay set fmri(gammadelay$ev_num) 6 nipype-0.9.2/nipype/interfaces/script_templates/feat_ev_hrf.tcl000066400000000000000000000013441227300005300250050ustar00rootroot00000000000000# EV title set fmri(evtitle$ev_num) "$ev_name" # Basic waveform shape (EV $ev_num) # 0 : Square # 1 : Sinusoid # 2 : Custom (1 entry per volume) # 3 : Custom (3 column format) # 4 : Interaction # 10 : Empty (all zeros) set fmri(shape$ev_num) 3 # Convolution (EV $ev_num) # 0 : None # 1 : Gaussian # 2 : Gamma # 3 : Double-Gamma HRF # 4 : Gamma basis functions # 5 : Sine basis functions # 6 : FIR basis functions set fmri(convolve$ev_num) 3 # Convolve phase (EV $ev_num) set fmri(convolve_phase$ev_num) 0 # Apply temporal filtering (EV $ev_num) set fmri(tempfilt_yn$ev_num) $tempfilt_yn # Add temporal derivative (EV $ev_num) set fmri(deriv_yn$ev_num) $temporalderiv # Custom EV file (EV $ev_num) set fmri(custom$ev_num) "$cond_file" nipype-0.9.2/nipype/interfaces/script_templates/feat_ev_none.tcl000066400000000000000000000011251227300005300251620ustar00rootroot00000000000000# EV title set fmri(evtitle$ev_num) "$ev_name" # Basic waveform shape # 0 : Square # 1 : Sinusoid # 2 : Custom (1 entry per volume) # 3 : Custom (3 column format) # 4 : Interaction # 10 : Empty (all zeros) set fmri(shape$ev_num) 2 # Convolution # 0 : None # 1 : Gaussian # 2 : Gamma # 3 : Double-Gamma HRF # 4 : Gamma basis functions # 5 : Sine basis functions # 6 : FIR basis functions set fmri(convolve$ev_num) 0 # Apply temporal filtering set fmri(tempfilt_yn$ev_num) $tempfilt_yn # Add temporal derivative set fmri(deriv_yn$ev_num) 0 # Custom EV file set fmri(custom$ev_num) "$cond_file" nipype-0.9.2/nipype/interfaces/script_templates/feat_ev_ortho.tcl000066400000000000000000000000731227300005300253570ustar00rootroot00000000000000# Orthogonalise EV $c0 wrt EV $c1 set fmri(ortho$c0.$c1) 0 nipype-0.9.2/nipype/interfaces/script_templates/feat_fe_copes.tcl000066400000000000000000000001321227300005300253110ustar00rootroot00000000000000# Use lower-level cope $copeno for higher-level analysis set fmri(copeinput.${copeno}) 1 nipype-0.9.2/nipype/interfaces/script_templates/feat_fe_ev_element.tcl000066400000000000000000000002171227300005300263270ustar00rootroot00000000000000# Higher-level EV value for EV 1 and input $input set fmri(evg${input}.1) 1 # Group membership for input $input set fmri(groupmem.${input}) 1 nipype-0.9.2/nipype/interfaces/script_templates/feat_fe_ev_header.tcl000066400000000000000000000014241227300005300261270ustar00rootroot00000000000000# Add confound EVs text file set fmri(confoundevs) 0 # EV 1 title set fmri(evtitle1) "" # Basic waveform shape (EV 1) # 0 : Square # 1 : Sinusoid # 2 : Custom (1 entry per volume) # 3 : Custom (3 column format) # 4 : Interaction # 10 : Empty (all zeros) set fmri(shape1) 2 # Convolution (EV 1) # 0 : None # 1 : Gaussian # 2 : Gamma # 3 : Double-Gamma HRF # 4 : Gamma basis functions # 5 : Sine basis functions # 6 : FIR basis functions set fmri(convolve1) 0 # Convolve phase (EV 1) set fmri(convolve_phase1) 0 # Apply temporal filtering (EV 1) set fmri(tempfilt_yn1) 0 # Add temporal derivative (EV 1) set fmri(deriv_yn1) 0 # Custom EV file (EV 1) set fmri(custom1) "dummy" # Orthogonalise EV 1 wrt EV 0 set fmri(ortho1.0) 0 # Orthogonalise EV 1 wrt EV 1 set fmri(ortho1.1) 0 nipype-0.9.2/nipype/interfaces/script_templates/feat_fe_featdirs.tcl000066400000000000000000000001151227300005300260020ustar00rootroot00000000000000# 4D AVW data or FEAT directory ($runno) set feat_files($runno) "${rundir}" nipype-0.9.2/nipype/interfaces/script_templates/feat_fe_footer.tcl000066400000000000000000000021351227300005300255030ustar00rootroot00000000000000# Contrast & F-tests mode # real : control real EVs # orig : control original EVs set fmri(con_mode_old) real set fmri(con_mode) real # Display images for contrast_real 1 set fmri(conpic_real.1) 1 # Title for contrast_real 1 set fmri(conname_real.1) "group mean" # Real contrast_real vector 1 element 1 set fmri(con_real1.1) 1 # Contrast masking - use >0 instead of thresholding? set fmri(conmask_zerothresh_yn) 0 # Do contrast masking at all? set fmri(conmask1_1) 0 ########################################################## # Now options that don't appear in the GUI # Alternative example_func image (not derived from input 4D dataset) set fmri(alternative_example_func) "" # Alternative (to BETting) mask image set fmri(alternative_mask) "" # Initial structural space registration initialisation transform set fmri(init_initial_highres) "" # Structural space registration initialisation transform set fmri(init_highres) "" # Standard space registration initialisation transform set fmri(init_standard) "" # For full FEAT analysis: overwrite existing .feat output dir? set fmri(overwrite_yn) $overwrite nipype-0.9.2/nipype/interfaces/script_templates/feat_fe_header.tcl000066400000000000000000000122521227300005300254360ustar00rootroot00000000000000# FEAT version number set fmri(version) 5.98 # Are we in MELODIC? set fmri(inmelodic) 0 # Analysis level # 1 : First-level analysis # 2 : Higher-level analysis set fmri(level) 2 # Which stages to run # 0 : No first-level analysis (registration and/or group stats only) # 7 : Full first-level analysis # 1 : Pre-Stats # 3 : Pre-Stats + Stats # 2 : Stats # 6 : Stats + Post-stats # 4 : Post-stats set fmri(analysis) 6 # Use relative filenames set fmri(relative_yn) 0 # Balloon help set fmri(help_yn) 1 # Run Featwatcher set fmri(featwatcher_yn) 1 # Cleanup first-level standard-space images set fmri(sscleanup_yn) 0 # Output directory set fmri(outputdir) "./output" # TR(s) set fmri(tr) 3 # Total volumes set fmri(npts) ${num_runs} # Delete volumes set fmri(ndelete) 0 # Perfusion tag/control order set fmri(tagfirst) 1 # Number of first-level analyses set fmri(multiple) ${num_runs} # Higher-level input type # 1 : Inputs are lower-level FEAT directories # 2 : Inputs are cope images from FEAT directories set fmri(inputtype) 1 # Carry out pre-stats processing? set fmri(filtering_yn) 0 # Brain/background threshold, % set fmri(brain_thresh) 10 # Critical z for design efficiency calculation set fmri(critical_z) 5.3 # Noise level set fmri(noise) 0.66 # Noise AR(1) set fmri(noisear) 0.34 # Post-stats-only directory copying # 0 : Overwrite original post-stats results # 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering set fmri(newdir_yn) 0 # Motion correction # 0 : None # 1 : MCFLIRT set fmri(mc) 1 # Spin-history (currently obsolete) set fmri(sh_yn) 0 # B0 fieldmap unwarping? set fmri(regunwarp_yn) 0 # EPI dwell time (ms) set fmri(dwell) 0.7 # EPI TE (ms) set fmri(te) 35 # % Signal loss threshold set fmri(signallossthresh) 10 # Unwarp direction set fmri(unwarp_dir) y- # Slice timing correction # 0 : None # 1 : Regular up (0, 1, 2, 3, ...) # 2 : Regular down # 3 : Use slice order file # 4 : Use slice timings file # 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) set fmri(st) 0 # Slice timings file set fmri(st_file) "" # BET brain extraction set fmri(bet_yn) 1 # Spatial smoothing FWHM (mm) set fmri(smooth) 5 # Intensity normalization set fmri(norm_yn) 0 # Perfusion subtraction set fmri(perfsub_yn) 0 # Highpass temporal filtering set fmri(temphp_yn) 1 # Lowpass temporal filtering set fmri(templp_yn) 0 # MELODIC ICA data exploration set fmri(melodic_yn) 0 # Carry out main stats? set fmri(stats_yn) 1 # Carry out prewhitening? set fmri(prewhiten_yn) 1 # Add motion parameters to model # 0 : No # 1 : Yes set fmri(motionevs) 0 # Robust outlier detection in FLAME? set fmri(robust_yn) 0 # Higher-level modelling # 3 : Fixed effects # 0 : Mixed Effects: Simple OLS # 2 : Mixed Effects: FLAME 1 # 1 : Mixed Effects: FLAME 1+2 set fmri(mixed_yn) 3 # Number of EVs set fmri(evs_orig) 1 set fmri(evs_real) 1 set fmri(evs_vox) 0 # Number of contrasts set fmri(ncon_orig) 1 set fmri(ncon_real) 1 # Number of F-tests set fmri(nftests_orig) 0 set fmri(nftests_real) 0 # Add constant column to design matrix? (obsolete) set fmri(constcol) 0 # Carry out post-stats steps? set fmri(poststats_yn) 1 # Pre-threshold masking? set fmri(threshmask) "" # Thresholding # 0 : None # 1 : Uncorrected # 2 : Voxel # 3 : Cluster set fmri(thresh) 3 # P threshold set fmri(prob_thresh) 0.05 # Z threshold set fmri(z_thresh) 2.3 # Z min/max for colour rendering # 0 : Use actual Z min/max # 1 : Use preset Z min/max set fmri(zdisplay) 0 # Z min in colour rendering set fmri(zmin) 2 # Z max in colour rendering set fmri(zmax) 8 # Colour rendering type # 0 : Solid blobs # 1 : Transparent blobs set fmri(rendertype) 1 # Background image for higher-level stats overlays # 1 : Mean highres # 2 : First highres # 3 : Mean functional # 4 : First functional # 5 : Standard space template set fmri(bgimage) 1 # Create time series plots set fmri(tsplot_yn) 1 # Registration? set fmri(reg_yn) 0 # Registration to initial structural set fmri(reginitial_highres_yn) 0 # Search space for registration to initial structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reginitial_highres_search) 90 # Degrees of Freedom for registration to initial structural set fmri(reginitial_highres_dof) 3 # Registration to main structural set fmri(reghighres_yn) 0 # Search space for registration to main structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reghighres_search) 90 # Degrees of Freedom for registration to main structural set fmri(reghighres_dof) 6 # Registration to standard image? set fmri(regstandard_yn) 0 # Standard image set fmri(regstandard) "regimage" # Search space for registration to standard space # 0 : No search # 90 : Normal search # 180 : Full search set fmri(regstandard_search) 90 # Degrees of Freedom for registration to standard space set fmri(regstandard_dof) 12 # Do nonlinear registration from structural to standard space? set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) 100 # Number of lower-level copes feeding into higher-level analysis set fmri(ncopeinputs) ${num_copes} nipype-0.9.2/nipype/interfaces/script_templates/feat_header.tcl000066400000000000000000000124261227300005300247670ustar00rootroot00000000000000# FEAT version number set fmri(version) 5.98 # Are we in MELODIC? set fmri(inmelodic) 0 # Analysis level # 1 : First-level analysis # 2 : Higher-level analysis set fmri(level) 1 # Which stages to run # 0 : No first-level analysis (registration and/or group stats only) # 7 : Full first-level analysis # 1 : Pre-Stats # 3 : Pre-Stats + Stats # 2 : Stats # 6 : Stats + Post-stats # 4 : Post-stats set fmri(analysis) $analysis_stages # Use relative filenames set fmri(relative_yn) 0 # Balloon help set fmri(help_yn) 1 # Run Featwatcher set fmri(featwatcher_yn) 0 # Cleanup first-level standard-space images set fmri(sscleanup_yn) 0 # Output directory set fmri(outputdir) "scan$scan_num" # TR(s) set fmri(tr) 2.0 # Total volumes set fmri(npts) $num_vols # Delete volumes set fmri(ndelete) 0 # Perfusion tag/control order set fmri(tagfirst) 1 # Number of first-level analyses set fmri(multiple) 1 # Higher-level input type # 1 : Inputs are lower-level FEAT directories # 2 : Inputs are cope images from FEAT directories set fmri(inputtype) 1 # Carry out pre-stats processing? set fmri(filtering_yn) 0 # Brain/background threshold, set fmri(brain_thresh) 10 # Critical z for design efficiency calculation set fmri(critical_z) 5.3 # Noise level set fmri(noise) 0.66 # Noise AR(1) set fmri(noisear) 0.34 # Post-stats-only directory copying # 0 : Overwrite original post-stats results # 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering set fmri(newdir_yn) 0 # Motion correction # 0 : None # 1 : MCFLIRT set fmri(mc) 0 # Spin-history (currently obsolete) set fmri(sh_yn) 0 # B0 fieldmap unwarping? set fmri(regunwarp_yn) 0 # EPI dwell time (ms) set fmri(dwell) 0.7 # EPI TE (ms) set fmri(te) 35 # Signal loss threshold set fmri(signallossthresh) 10 # Unwarp direction set fmri(unwarp_dir) y- # Slice timing correction # 0 : None # 1 : Regular up (0, 1, 2, 3, ...) # 2 : Regular down # 3 : Use slice order file # 4 : Use slice timings file # 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) set fmri(st) 0 # Slice timings file set fmri(st_file) "" # BET brain extraction set fmri(bet_yn) 0 # Spatial smoothing FWHM (mm) set fmri(smooth) 5 # Intensity normalization set fmri(norm_yn) 0 # Perfusion subtraction set fmri(perfsub_yn) 0 # Highpass temporal filtering set fmri(temphp_yn) 1 # Lowpass temporal filtering set fmri(templp_yn) 0 # MELODIC ICA data exploration set fmri(melodic_yn) 0 # Carry out main stats? set fmri(stats_yn) 1 # Carry out prewhitening? set fmri(prewhiten_yn) 1 # Add motion parameters to model # 0 : No # 1 : Yes set fmri(motionevs) 0 # Robust outlier detection in FLAME? set fmri(robust_yn) 0 # Higher-level modelling # 3 : Fixed effects # 0 : Mixed Effects: Simple OLS # 2 : Mixed Effects: FLAME 1 # 1 : Mixed Effects: FLAME 1+2 set fmri(mixed_yn) 2 # Number of EVs set fmri(evs_orig) $num_evs set fmri(evs_real) $num_evs set fmri(evs_vox) 0 # Number of contrasts set fmri(ncon_orig) $num_contrasts set fmri(ncon_real) $num_contrasts # Number of F-tests set fmri(nftests_orig) 0 set fmri(nftests_real) 0 # Add constant column to design matrix? (obsolete) set fmri(constcol) 0 # Carry out post-stats steps? set fmri(poststats_yn) $do_contrasts # Pre-threshold masking? set fmri(threshmask) "" # Thresholding # 0 : None # 1 : Uncorrected # 2 : Voxel # 3 : Cluster set fmri(thresh) 3 # P threshold set fmri(prob_thresh) 0.05 # Z threshold set fmri(z_thresh) 2.3 # Z min/max for colour rendering # 0 : Use actual Z min/max # 1 : Use preset Z min/max set fmri(zdisplay) 0 # Z min in colour rendering set fmri(zmin) 2 # Z max in colour rendering set fmri(zmax) 8 # Colour rendering type # 0 : Solid blobs # 1 : Transparent blobs set fmri(rendertype) 1 # Background image for higher-level stats overlays # 1 : Mean highres # 2 : First highres # 3 : Mean functional # 4 : First functional # 5 : Standard space template set fmri(bgimage) 1 # Create time series plots set fmri(tsplot_yn) 1 #Registration? set fmri(reg_yn) 0 # Registration to initial structural set fmri(reginitial_highres_yn) 0 # Search space for registration to initial structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reginitial_highres_search) 90 # Degrees of Freedom for registration to initial structural set fmri(reginitial_highres_dof) 3 # Registration to main structural set fmri(reghighres_yn) 0 # Search space for registration to main structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reghighres_search) 90 # Degrees of Freedom for registration to main structural set fmri(reghighres_dof) 6 # Registration to standard image? set fmri(regstandard_yn) 0 # Standard image set fmri(regstandard) "standard_image" # Search space for registration to standard space # 0 : No search # 90 : Normal search # 180 : Full search set fmri(regstandard_search) 90 # Degrees of Freedom for registration to standard space set fmri(regstandard_dof) 12 # Do nonlinear registration from structural to standard space? set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) 100 # 4D AVW data or FEAT directory (1) set feat_files(1) "$func_file" # Subject's structural for analysis 1 set highres_files(1) "$struct_file" nipype-0.9.2/nipype/interfaces/script_templates/feat_header_l1.tcl000066400000000000000000000124441227300005300253630ustar00rootroot00000000000000# FEAT version number set fmri(version) 5.98 # Are we in MELODIC? set fmri(inmelodic) 0 # Analysis level # 1 : First-level analysis # 2 : Higher-level analysis set fmri(level) 1 # Which stages to run # 0 : No first-level analysis (registration and/or group stats only) # 7 : Full first-level analysis # 1 : Pre-Stats # 3 : Pre-Stats + Stats # 2 : Stats # 6 : Stats + Post-stats # 4 : Post-stats set fmri(analysis) 6 # Use relative filenames set fmri(relative_yn) 0 # Balloon help set fmri(help_yn) 1 # Run Featwatcher set fmri(featwatcher_yn) 0 # Cleanup first-level standard-space images set fmri(sscleanup_yn) 0 # Output directory set fmri(outputdir) "run$run_num" # TR(s) set fmri(tr) $interscan_interval # Total volumes set fmri(npts) $num_vols # Delete volumes set fmri(ndelete) 0 # Perfusion tag/control order set fmri(tagfirst) 1 # Number of first-level analyses set fmri(multiple) 1 # Higher-level input type # 1 : Inputs are lower-level FEAT directories # 2 : Inputs are cope images from FEAT directories set fmri(inputtype) 1 # Carry out pre-stats processing? set fmri(filtering_yn) 0 # Brain/background threshold, set fmri(brain_thresh) 10 # Critical z for design efficiency calculation set fmri(critical_z) 5.3 # Noise level set fmri(noise) 0.66 # Noise AR(1) set fmri(noisear) 0.34 # Post-stats-only directory copying # 0 : Overwrite original post-stats results # 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering set fmri(newdir_yn) 0 # Motion correction # 0 : None # 1 : MCFLIRT set fmri(mc) 0 # Spin-history (currently obsolete) set fmri(sh_yn) 0 # B0 fieldmap unwarping? set fmri(regunwarp_yn) 0 # EPI dwell time (ms) set fmri(dwell) 0.7 # EPI TE (ms) set fmri(te) 35 # Signal loss threshold set fmri(signallossthresh) 10 # Unwarp direction set fmri(unwarp_dir) y- # Slice timing correction # 0 : None # 1 : Regular up (0, 1, 2, 3, ...) # 2 : Regular down # 3 : Use slice order file # 4 : Use slice timings file # 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) set fmri(st) 0 # Slice timings file set fmri(st_file) "" # BET brain extraction set fmri(bet_yn) 0 # Spatial smoothing FWHM (mm) set fmri(smooth) 0 # Intensity normalization set fmri(norm_yn) 0 # Perfusion subtraction set fmri(perfsub_yn) 0 # Highpass temporal filtering set fmri(temphp_yn) $temphp_yn # Lowpass temporal filtering set fmri(templp_yn) 0 # MELODIC ICA data exploration set fmri(melodic_yn) 0 # Carry out main stats? set fmri(stats_yn) 1 # Carry out prewhitening? set fmri(prewhiten_yn) $prewhiten # Add motion parameters to model # 0 : No # 1 : Yes set fmri(motionevs) 0 # Robust outlier detection in FLAME? set fmri(robust_yn) 0 # Higher-level modelling # 3 : Fixed effects # 0 : Mixed Effects: Simple OLS # 2 : Mixed Effects: FLAME 1 # 1 : Mixed Effects: FLAME 1+2 set fmri(mixed_yn) 2 # Number of EVs set fmri(evs_orig) $num_evs set fmri(evs_real) $num_evs_real set fmri(evs_vox) 0 # Number of contrasts set fmri(ncon_orig) $num_tcon set fmri(ncon_real) $num_tcon # Number of F-tests set fmri(nftests_orig) $num_fcon set fmri(nftests_real) $num_fcon # Add constant column to design matrix? (obsolete) set fmri(constcol) 0 # Carry out post-stats steps? set fmri(poststats_yn) 1 # Pre-threshold masking? set fmri(threshmask) "" # Thresholding # 0 : None # 1 : Uncorrected # 2 : Voxel # 3 : Cluster set fmri(thresh) 3 # P threshold set fmri(prob_thresh) 0.05 # Z threshold set fmri(z_thresh) 2.3 # Z min/max for colour rendering # 0 : Use actual Z min/max # 1 : Use preset Z min/max set fmri(zdisplay) 0 # Z min in colour rendering set fmri(zmin) 2 # Z max in colour rendering set fmri(zmax) 8 # Colour rendering type # 0 : Solid blobs # 1 : Transparent blobs set fmri(rendertype) 1 # Background image for higher-level stats overlays # 1 : Mean highres # 2 : First highres # 3 : Mean functional # 4 : First functional # 5 : Standard space template set fmri(bgimage) 1 # Create time series plots set fmri(tsplot_yn) 1 #Registration? set fmri(reg_yn) 0 # Registration to initial structural set fmri(reginitial_highres_yn) 0 # Search space for registration to initial structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reginitial_highres_search) 90 # Degrees of Freedom for registration to initial structural set fmri(reginitial_highres_dof) 3 # Registration to main structural set fmri(reghighres_yn) 0 # Search space for registration to main structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reghighres_search) 90 # Degrees of Freedom for registration to main structural set fmri(reghighres_dof) 6 # Registration to standard image? set fmri(regstandard_yn) 0 # Standard image set fmri(regstandard) "MNI152" # Search space for registration to standard space # 0 : No search # 90 : Normal search # 180 : Full search set fmri(regstandard_search) 90 # Degrees of Freedom for registration to standard space set fmri(regstandard_dof) 0 # Do nonlinear registration from structural to standard space? set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) $high_pass_filter_cutoff # 4D AVW data or FEAT directory (1) set feat_files(1) "$func_file" # Subject's structural for analysis 1 set highres_files(1) "" nipype-0.9.2/nipype/interfaces/script_templates/feat_nongui.tcl000066400000000000000000000012041227300005300250260ustar00rootroot00000000000000########################################################## # Now options that don't appear in the GUI # Alternative example_func image (not derived from input 4D dataset) set fmri(alternative_example_func) "" # Alternative (to BETting) mask image set fmri(alternative_mask) "" # Initial structural space registration initialisation transform set fmri(init_initial_highres) "" # Structural space registration initialisation transform set fmri(init_highres) "" # Standard space registration initialisation transform set fmri(init_standard) "" # For full FEAT analysis: overwrite existing .feat output dir? set fmri(overwrite_yn) $overwrite nipype-0.9.2/nipype/interfaces/script_templates/featreg_header.tcl000066400000000000000000000122351227300005300254630ustar00rootroot00000000000000# FEAT version number set fmri(version) 5.98 # Are we in MELODIC? set fmri(inmelodic) 0 # Analysis level # 1 : First-level analysis # 2 : Higher-level analysis set fmri(level) 2 # Which stages to run # 0 : No first-level analysis (registration and/or group stats only) # 7 : Full first-level analysis # 1 : Pre-Stats # 3 : Pre-Stats + Stats # 2 : Stats # 6 : Stats + Post-stats # 4 : Post-stats set fmri(analysis) 0 # Use relative filenames set fmri(relative_yn) 0 # Balloon help set fmri(help_yn) 1 # Run Featwatcher set fmri(featwatcher_yn) 1 # Cleanup first-level standard-space images set fmri(sscleanup_yn) 0 # Output directory set fmri(outputdir) "" # TR(s) set fmri(tr) 3 # Total volumes set fmri(npts) 2 # Delete volumes set fmri(ndelete) 0 # Perfusion tag/control order set fmri(tagfirst) 1 # Number of first-level analyses set fmri(multiple) ${num_runs} # Higher-level input type # 1 : Inputs are lower-level FEAT directories # 2 : Inputs are cope images from FEAT directories set fmri(inputtype) 1 # Carry out pre-stats processing? set fmri(filtering_yn) 0 # Brain/background threshold, % set fmri(brain_thresh) 10 # Critical z for design efficiency calculation set fmri(critical_z) 5.3 # Noise level set fmri(noise) 0.66 # Noise AR(1) set fmri(noisear) 0.34 # Post-stats-only directory copying # 0 : Overwrite original post-stats results # 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering set fmri(newdir_yn) 0 # Motion correction # 0 : None # 1 : MCFLIRT set fmri(mc) 1 # Spin-history (currently obsolete) set fmri(sh_yn) 0 # B0 fieldmap unwarping? set fmri(regunwarp_yn) 0 # EPI dwell time (ms) set fmri(dwell) 0.7 # EPI TE (ms) set fmri(te) 35 # % Signal loss threshold set fmri(signallossthresh) 10 # Unwarp direction set fmri(unwarp_dir) y- # Slice timing correction # 0 : None # 1 : Regular up (0, 1, 2, 3, ...) # 2 : Regular down # 3 : Use slice order file # 4 : Use slice timings file # 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) set fmri(st) 0 # Slice timings file set fmri(st_file) "" # BET brain extraction set fmri(bet_yn) 1 # Spatial smoothing FWHM (mm) set fmri(smooth) 5 # Intensity normalization set fmri(norm_yn) 0 # Perfusion subtraction set fmri(perfsub_yn) 0 # Highpass temporal filtering set fmri(temphp_yn) 1 # Lowpass temporal filtering set fmri(templp_yn) 0 # MELODIC ICA data exploration set fmri(melodic_yn) 0 # Carry out main stats? set fmri(stats_yn) 1 # Carry out prewhitening? set fmri(prewhiten_yn) 1 # Add motion parameters to model # 0 : No # 1 : Yes set fmri(motionevs) 0 # Robust outlier detection in FLAME? set fmri(robust_yn) 0 # Higher-level modelling # 3 : Fixed effects # 0 : Mixed Effects: Simple OLS # 2 : Mixed Effects: FLAME 1 # 1 : Mixed Effects: FLAME 1+2 set fmri(mixed_yn) 3 # Number of EVs set fmri(evs_orig) 0 set fmri(evs_real) 0 set fmri(evs_vox) 0 # Number of contrasts set fmri(ncon_orig) 0 set fmri(ncon_real) 0 # Number of F-tests set fmri(nftests_orig) 0 set fmri(nftests_real) 0 # Add constant column to design matrix? (obsolete) set fmri(constcol) 0 # Carry out post-stats steps? set fmri(poststats_yn) 1 # Pre-threshold masking? set fmri(threshmask) "" # Thresholding # 0 : None # 1 : Uncorrected # 2 : Voxel # 3 : Cluster set fmri(thresh) 3 # P threshold set fmri(prob_thresh) 0.05 # Z threshold set fmri(z_thresh) 2.3 # Z min/max for colour rendering # 0 : Use actual Z min/max # 1 : Use preset Z min/max set fmri(zdisplay) 0 # Z min in colour rendering set fmri(zmin) 2 # Z max in colour rendering set fmri(zmax) 8 # Colour rendering type # 0 : Solid blobs # 1 : Transparent blobs set fmri(rendertype) 1 # Background image for higher-level stats overlays # 1 : Mean highres # 2 : First highres # 3 : Mean functional # 4 : First functional # 5 : Standard space template set fmri(bgimage) 1 # Create time series plots set fmri(tsplot_yn) 1 # Registration? set fmri(reg_yn) 0 # Registration to initial structural set fmri(reginitial_highres_yn) 0 # Search space for registration to initial structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reginitial_highres_search) 90 # Degrees of Freedom for registration to initial structural set fmri(reginitial_highres_dof) 3 # Registration to main structural set fmri(reghighres_yn) 0 # Search space for registration to main structural # 0 : No search # 90 : Normal search # 180 : Full search set fmri(reghighres_search) 90 # Degrees of Freedom for registration to main structural set fmri(reghighres_dof) 6 # Registration to standard image? set fmri(regstandard_yn) 1 # Standard image set fmri(regstandard) "$regimage" # Search space for registration to standard space # 0 : No search # 90 : Normal search # 180 : Full search set fmri(regstandard_search) 90 # Degrees of Freedom for registration to standard space set fmri(regstandard_dof) $regdof # Do nonlinear registration from structural to standard space? set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) 100 # Number of lower-level copes feeding into higher-level analysis set fmri(ncopeinputs) ${num_runs} nipype-0.9.2/nipype/interfaces/setup.py000066400000000000000000000017741227300005300201700ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('interfaces', parent_package, top_path) config.add_subpackage('afni') config.add_subpackage('ants') config.add_subpackage('camino') config.add_subpackage('camino2trackvis') config.add_subpackage('cmtk') config.add_subpackage('diffusion_toolkit') config.add_subpackage('dipy') config.add_subpackage('freesurfer') config.add_subpackage('fsl') config.add_subpackage('mne') config.add_subpackage('mrtrix') config.add_subpackage('nipy') config.add_subpackage('spm') config.add_subpackage('slicer') config.add_data_dir('script_templates') config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/slicer/000077500000000000000000000000001227300005300177265ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/__init__.py000066400000000000000000000006071227300005300220420ustar00rootroot00000000000000from diffusion import * from segmentation import * from filtering import * from utilities import EMSegmentTransformToNewFormat from surface import MergeModels, ModelToLabelMap, GrayscaleModelMaker, ProbeVolumeWithModel, LabelMapSmoothing, ModelMaker from quantification import * from legacy import * from registration import * from converters import DicomToNrrdConverter, OrientScalarVolume nipype-0.9.2/nipype/interfaces/slicer/base.py000066400000000000000000000001351227300005300212110ustar00rootroot00000000000000from ..base import SEMLikeCommandLine class SlicerCommandLine(SEMLikeCommandLine): pass nipype-0.9.2/nipype/interfaces/slicer/converters.py000066400000000000000000000130231227300005300224710ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class DicomToNrrdConverterInputSpec(CommandLineInputSpec): inputDicomDirectory = Directory(desc="Directory holding Dicom series", exists=True, argstr="--inputDicomDirectory %s") outputDirectory = traits.Either(traits.Bool, Directory(), hash_files=False, desc="Directory holding the output NRRD format", argstr="--outputDirectory %s") outputVolume = traits.Str(desc="Output filename (.nhdr or .nrrd)", argstr="--outputVolume %s") smallGradientThreshold = traits.Float(desc="If a gradient magnitude is greater than 0 and less than smallGradientThreshold, then DicomToNrrdConverter will display an error message and quit, unless the useBMatrixGradientDirections option is set.", argstr="--smallGradientThreshold %f") writeProtocolGradientsFile = traits.Bool(desc="Write the protocol gradients to a file suffixed by \'.txt\' as they were specified in the procol by multiplying each diffusion gradient direction by the measurement frame. This file is for debugging purposes only, the format is not fixed, and will likely change as debugging of new dicom formats is necessary.", argstr="--writeProtocolGradientsFile ") useIdentityMeaseurementFrame = traits.Bool(desc="Adjust all the gradients so that the measurement frame is an identity matrix.", argstr="--useIdentityMeaseurementFrame ") useBMatrixGradientDirections = traits.Bool(desc="Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data.", argstr="--useBMatrixGradientDirections ") class DicomToNrrdConverterOutputSpec(TraitedSpec): outputDirectory = Directory(desc="Directory holding the output NRRD format", exists=True) class DicomToNrrdConverter(SEMLikeCommandLine): """title: DICOM to NRRD Converter category: Converters description: Converts diffusion weighted MR images in dicom series into Nrrd format for analysis in Slicer. This program has been tested on only a limited subset of DTI dicom formats available from Siemens, GE, and Phillips scanners. Work in progress to support dicom multi-frame data. The program parses dicom header to extract necessary information about measurement frame, diffusion weighting directions, b-values, etc, and write out a nrrd image. For non-diffusion weighted dicom images, it loads in an entire dicom series and writes out a single dicom volume in a .nhdr/.raw pair. version: 0.2.0.$Revision: 916 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DicomToNrrdConverter license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: Xiaodong Tao (GE), Vince Magnotta (UIowa), Hans Johnson (UIowa) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. """ input_spec = DicomToNrrdConverterInputSpec output_spec = DicomToNrrdConverterOutputSpec _cmd = "DicomToNrrdConverter " _outputs_filenames = {'outputDirectory':'outputDirectory'} class OrientScalarVolumeInputSpec(CommandLineInputSpec): inputVolume1 = File(position=-2, desc="Input volume 1", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="The oriented volume", argstr="%s") orientation = traits.Enum("Axial", "Coronal", "Sagittal", "RIP", "LIP", "RSP", "LSP", "RIA", "LIA", "RSA", "LSA", "IRP", "ILP", "SRP", "SLP", "IRA", "ILA", "SRA", "SLA", "RPI", "LPI", "RAI", "LAI", "RPS", "LPS", "RAS", "LAS", "PRI", "PLI", "ARI", "ALI", "PRS", "PLS", "ARS", "ALS", "IPR", "SPR", "IAR", "SAR", "IPL", "SPL", "IAL", "SAL", "PIR", "PSR", "AIR", "ASR", "PIL", "PSL", "AIL", "ASL", desc="Orientation choices", argstr="--orientation %s") class OrientScalarVolumeOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="The oriented volume", exists=True) class OrientScalarVolume(SEMLikeCommandLine): """title: Orient Scalar Volume category: Converters description: Orients an output volume. Rearranges the slices in a volume according to the selected orientation. The slices are not interpolated. They are just reordered and/or permuted. The resulting volume will cover the original volume. NOTE: since Slicer takes into account the orientation of a volume, the re-oriented volume will not show any difference from the original volume, To see the difference, save the volume and display it with a system that either ignores the orientation of the image (e.g. Paraview) or displays individual images. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OrientImage contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = OrientScalarVolumeInputSpec output_spec = OrientScalarVolumeOutputSpec _cmd = "OrientScalarVolume " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/diffusion/000077500000000000000000000000001227300005300217145ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/diffusion/__init__.py000066400000000000000000000003431227300005300240250ustar00rootroot00000000000000from diffusion import ResampleDTIVolume, DWIRicianLMMSEFilter, TractographyLabelMapSeeding, DWIJointRicianLMMSEFilter, DiffusionWeightedVolumeMasking, DTIimport, DWIToDTIEstimation, DiffusionTensorScalarMeasurements, DTIexport nipype-0.9.2/nipype/interfaces/slicer/diffusion/diffusion.py000066400000000000000000000555321227300005300242660ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class ResampleDTIVolumeInputSpec(CommandLineInputSpec): inputVolume = File(position=-2, desc="Input volume to be resampled", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", argstr="%s") Reference = File(desc="Reference Volume (spacing,size,orientation,origin)", exists=True, argstr="--Reference %s") transformationFile = File(exists=True, argstr="--transformationFile %s") defField = File(desc="File containing the deformation field (3D vector image containing vectors with 3 components)", exists=True, argstr="--defField %s") hfieldtype = traits.Enum("displacement", "h-Field", desc="Set if the deformation field is an -Field", argstr="--hfieldtype %s") interpolation = traits.Enum("linear", "nn", "ws", "bs", desc="Sampling algorithm (linear , nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", argstr="--interpolation %s") correction = traits.Enum("zero", "none", "abs", "nearest", desc="Correct the tensors if computed tensor is not semi-definite positive", argstr="--correction %s") transform_tensor_method = traits.Enum("PPD", "FS", desc="Chooses between 2 methods to transform the tensors: Finite Strain (FS), faster but less accurate, or Preservation of the Principal Direction (PPD)", argstr="--transform_tensor_method %s") transform_order = traits.Enum("input-to-output", "output-to-input", desc="Select in what order the transforms are read", argstr="--transform_order %s") notbulk = traits.Bool(desc="The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", argstr="--notbulk ") spaceChange = traits.Bool(desc="Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", argstr="--spaceChange ") rotation_point = traits.List(desc="Center of rotation (only for rigid and affine transforms)", argstr="--rotation_point %s") centered_transform = traits.Bool(desc="Set the center of the transformation to the center of the input image (only for rigid and affine transforms)", argstr="--centered_transform ") image_center = traits.Enum("input", "output", desc="Image to use to center the transform (used only if \'Centered Transform\' is selected)", argstr="--image_center %s") Inverse_ITK_Transformation = traits.Bool(desc="Inverse the transformation before applying it from output image to input image (only for rigid and affine transforms)", argstr="--Inverse_ITK_Transformation ") spacing = InputMultiPath(traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", argstr="--spacing %s") size = InputMultiPath(traits.Float, desc="Size along each dimension (0 means use input size)", sep=",", argstr="--size %s") origin = traits.List(desc="Origin of the output Image", argstr="--origin %s") direction_matrix = InputMultiPath(traits.Float, desc="9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", sep=",", argstr="--direction_matrix %s") number_of_thread = traits.Int(desc="Number of thread used to compute the output image", argstr="--number_of_thread %d") default_pixel_value = traits.Float(desc="Default pixel value for samples falling outside of the input region", argstr="--default_pixel_value %f") window_function = traits.Enum("h", "c", "w", "l", "b", desc="Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", argstr="--window_function %s") spline_order = traits.Int(desc="Spline Order (Spline order may be from 0 to 5)", argstr="--spline_order %d") transform_matrix = InputMultiPath(traits.Float, desc="12 parameters of the transform matrix by rows ( --last 3 being translation-- )", sep=",", argstr="--transform_matrix %s") transform = traits.Enum("rt", "a", desc="Transform algorithm, rt = Rigid Transform, a = Affine Transform", argstr="--transform %s") class ResampleDTIVolumeOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Resampled Volume", exists=True) class ResampleDTIVolume(SEMLikeCommandLine): """title: Resample DTI Volume category: Diffusion.Diffusion Tensor Images description: Resampling an image is a very important task in image analysis. It is especially important in the frame of image registration. This module implements DT image resampling through the use of itk Transforms. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. version: 0.1 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleDTI contributor: Francois Budin (UNC) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics """ input_spec = ResampleDTIVolumeInputSpec output_spec = ResampleDTIVolumeOutputSpec _cmd = "ResampleDTIVolume " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class DWIRicianLMMSEFilterInputSpec(CommandLineInputSpec): iter = traits.Int(desc="Number of iterations for the noise removal filter.", argstr="--iter %d") re = InputMultiPath(traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") rf = InputMultiPath(traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") mnvf = traits.Int(desc="Minimum number of voxels in kernel used for filtering.", argstr="--mnvf %d") mnve = traits.Int(desc="Minimum number of voxels in kernel used for estimation.", argstr="--mnve %d") minnstd = traits.Int(desc="Minimum allowed noise standard deviation.", argstr="--minnstd %d") maxnstd = traits.Int(desc="Maximum allowed noise standard deviation.", argstr="--maxnstd %d") hrf = traits.Float(desc="How many histogram bins per unit interval.", argstr="--hrf %f") uav = traits.Bool(desc="Use absolute value in case of negative square.", argstr="--uav ") inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", argstr="%s") compressOutput = traits.Bool(desc="Compress the data of the compressed file using gzip", argstr="--compressOutput ") class DWIRicianLMMSEFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) class DWIRicianLMMSEFilter(SEMLikeCommandLine): """title: DWI Rician LMMSE Filter category: Diffusion.Diffusion Weighted Images description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. Images corresponding to each gradient direction, including baseline, are processed individually. The noise parameter is automatically estimated (noise estimation improved but slower). Note that this is a general purpose filter for MRi images. The module jointLMMSE has been specifically designed for DWI volumes and shows a better performance, so its use is recommended instead. A complete description of the algorithm in this module can be found in: S. Aja-Fernandez, M. Niethammer, M. Kubicki, M. Shenton, and C.-F. Westin. Restoration of DWI data using a Rician LMMSE estimator. IEEE Transactions on Medical Imaging, 27(10): pp. 1389-1403, Oct. 2008. version: 0.1.1.$Revision: 1 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RicianLMMSEImageFilter contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa), Marc Niethammer (UNC) acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). """ input_spec = DWIRicianLMMSEFilterInputSpec output_spec = DWIRicianLMMSEFilterOutputSpec _cmd = "DWIRicianLMMSEFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class TractographyLabelMapSeedingInputSpec(CommandLineInputSpec): InputVolume = File(position=-2, desc="Input DTI volume", exists=True, argstr="%s") inputroi = File(desc="Label map with seeding ROIs", exists=True, argstr="--inputroi %s") OutputFibers = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Tractography result", argstr="%s") useindexspace = traits.Bool(desc="Seed at IJK voxel grid", argstr="--useindexspace ") seedspacing = traits.Float(desc="Spacing (in mm) between seed points, only matters if use Use Index Space is off", argstr="--seedspacing %f") randomgrid = traits.Bool(desc="Enable random placing of seeds", argstr="--randomgrid ") clthreshold = traits.Float(desc="Minimum Linear Measure for the seeding to start.", argstr="--clthreshold %f") minimumlength = traits.Float(desc="Minimum length of the fibers (in mm)", argstr="--minimumlength %f") maximumlength = traits.Float(desc="Maximum length of fibers (in mm)", argstr="--maximumlength %f") stoppingmode = traits.Enum("LinearMeasure", "FractionalAnisotropy", desc="Tensor measurement used to stop the tractography", argstr="--stoppingmode %s") stoppingvalue = traits.Float(desc="Tractography will stop when the stopping measurement drops below this value", argstr="--stoppingvalue %f") stoppingcurvature = traits.Float(desc="Tractography will stop if radius of curvature becomes smaller than this number units are degrees per mm", argstr="--stoppingcurvature %f") integrationsteplength = traits.Float(desc="Distance between points on the same fiber in mm", argstr="--integrationsteplength %f") label = traits.Int(desc="Label value that defines seeding region.", argstr="--label %d") writetofile = traits.Bool(desc="Write fibers to disk or create in the scene?", argstr="--writetofile ") outputdirectory = traits.Either(traits.Bool, Directory(), hash_files=False, desc="Directory in which to save fiber(s)", argstr="--outputdirectory %s") name = traits.Str(desc="Name to use for fiber files", argstr="--name %s") class TractographyLabelMapSeedingOutputSpec(TraitedSpec): OutputFibers = File(position=-1, desc="Tractography result", exists=True) outputdirectory = Directory(desc="Directory in which to save fiber(s)", exists=True) class TractographyLabelMapSeeding(SEMLikeCommandLine): """title: Tractography Label Map Seeding category: Diffusion.Diffusion Tensor Images description: Seed tracts on a Diffusion Tensor Image (DT) from a label map version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Seeding license: slicer3 contributor: Raul San Jose (SPL, BWH), Demian Wassermann (SPL, BWH) acknowledgements: Laboratory of Mathematics in Imaging. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = TractographyLabelMapSeedingInputSpec output_spec = TractographyLabelMapSeedingOutputSpec _cmd = "TractographyLabelMapSeeding " _outputs_filenames = {'OutputFibers':'OutputFibers.vtk','outputdirectory':'outputdirectory'} class DWIJointRicianLMMSEFilterInputSpec(CommandLineInputSpec): re = InputMultiPath(traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") rf = InputMultiPath(traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") ng = traits.Int(desc="The number of the closest gradients that are used to jointly filter a given gradient direction (0 to use all).", argstr="--ng %d") inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", argstr="%s") compressOutput = traits.Bool(desc="Compress the data of the compressed file using gzip", argstr="--compressOutput ") class DWIJointRicianLMMSEFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) class DWIJointRicianLMMSEFilter(SEMLikeCommandLine): """title: DWI Joint Rician LMMSE Filter category: Diffusion.Diffusion Weighted Images description: This module reduces Rician noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. The N closest gradient directions to the direction being processed are filtered together to improve the results: the noise-free signal is seen as an n-diemensional vector which has to be estimated with the LMMSE method from a set of corrupted measurements. To that end, the covariance matrix of the noise-free vector and the cross covariance between this signal and the noise have to be estimated, which is done taking into account the image formation process. The noise parameter is automatically estimated from a rough segmentation of the background of the image. In this area the signal is simply 0, so that Rician statistics reduce to Rayleigh and the noise power can be easily estimated from the mode of the histogram. A complete description of the algorithm may be found in: Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. version: 0.1.1.$Revision: 1 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/JointRicianLMMSEImageFilter contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). """ input_spec = DWIJointRicianLMMSEFilterInputSpec output_spec = DWIJointRicianLMMSEFilterOutputSpec _cmd = "DWIJointRicianLMMSEFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class DiffusionWeightedVolumeMaskingInputSpec(CommandLineInputSpec): inputVolume = File(position=-4, desc="Input DWI volume", exists=True, argstr="%s") outputBaseline = traits.Either(traits.Bool, File(), position=-2, hash_files=False, desc="Estimated baseline volume", argstr="%s") thresholdMask = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Otsu Threshold Mask", argstr="%s") otsuomegathreshold = traits.Float(desc="Control the sharpness of the threshold in the Otsu computation. 0: lower threshold, 1: higher threhold", argstr="--otsuomegathreshold %f") removeislands = traits.Bool(desc="Remove Islands in Threshold Mask?", argstr="--removeislands ") class DiffusionWeightedVolumeMaskingOutputSpec(TraitedSpec): outputBaseline = File(position=-2, desc="Estimated baseline volume", exists=True) thresholdMask = File(position=-1, desc="Otsu Threshold Mask", exists=True) class DiffusionWeightedVolumeMasking(SEMLikeCommandLine): """title: Diffusion Weighted Volume Masking category: Diffusion.Diffusion Weighted Images description:

Performs a mask calculation from a diffusion weighted (DW) image.

Starting from a dw image, this module computes the baseline image averaging all the images without diffusion weighting and then applies the otsu segmentation algorithm in order to produce a mask. this mask can then be used when estimating the diffusion tensor (dt) image, not to estimate tensors all over the volume.

version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionWeightedMasking license: slicer3 contributor: Demian Wassermann (SPL, BWH) """ input_spec = DiffusionWeightedVolumeMaskingInputSpec output_spec = DiffusionWeightedVolumeMaskingOutputSpec _cmd = "DiffusionWeightedVolumeMasking " _outputs_filenames = {'outputBaseline':'outputBaseline.nii','thresholdMask':'thresholdMask.nii'} class DTIimportInputSpec(CommandLineInputSpec): inputFile = File(position=-2, desc="Input DTI file", exists=True, argstr="%s") outputTensor = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DTI volume", argstr="%s") testingmode = traits.Bool(desc="Enable testing mode. Sample helix file (helix-DTI.nhdr) will be loaded into Slicer and converted in Nifti.", argstr="--testingmode ") class DTIimportOutputSpec(TraitedSpec): outputTensor = File(position=-1, desc="Output DTI volume", exists=True) class DTIimport(SEMLikeCommandLine): """title: DTIimport category: Diffusion.Diffusion Data Conversion description: Import tensor datasets from various formats, including the NifTi file format version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIImport contributor: Sonia Pujol (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = DTIimportInputSpec output_spec = DTIimportOutputSpec _cmd = "DTIimport " _outputs_filenames = {'outputTensor':'outputTensor.nii'} class DWIToDTIEstimationInputSpec(CommandLineInputSpec): inputVolume = File(position=-3, desc="Input DWI volume", exists=True, argstr="%s") mask = File(desc="Mask where the tensors will be computed", exists=True, argstr="--mask %s") outputTensor = traits.Either(traits.Bool, File(), position=-2, hash_files=False, desc="Estimated DTI volume", argstr="%s") outputBaseline = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Estimated baseline volume", argstr="%s") enumeration = traits.Enum("LS", "WLS", desc="LS: Least Squares, WLS: Weighted Least Squares", argstr="--enumeration %s") shiftNeg = traits.Bool(desc="Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error)", argstr="--shiftNeg ") class DWIToDTIEstimationOutputSpec(TraitedSpec): outputTensor = File(position=-2, desc="Estimated DTI volume", exists=True) outputBaseline = File(position=-1, desc="Estimated baseline volume", exists=True) class DWIToDTIEstimation(SEMLikeCommandLine): """title: DWI to DTI Estimation category: Diffusion.Diffusion Weighted Images description: Performs a tensor model estimation from diffusion weighted images. There are three estimation methods available: least squares, weigthed least squares and non-linear estimation. The first method is the traditional method for tensor estimation and the fastest one. Weighted least squares takes into account the noise characteristics of the MRI images to weight the DWI samples used in the estimation based on its intensity magnitude. The last method is the more complex. version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorEstimation license: slicer3 contributor: Raul San Jose (SPL, BWH) acknowledgements: This command module is based on the estimation functionality provided by the Teem library. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = DWIToDTIEstimationInputSpec output_spec = DWIToDTIEstimationOutputSpec _cmd = "DWIToDTIEstimation " _outputs_filenames = {'outputTensor':'outputTensor.nii','outputBaseline':'outputBaseline.nii'} class DiffusionTensorScalarMeasurementsInputSpec(CommandLineInputSpec): inputVolume = File(position=-3, desc="Input DTI volume", exists=True, argstr="%s") outputScalar = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Scalar volume derived from tensor", argstr="%s") enumeration = traits.Enum("Trace", "Determinant", "RelativeAnisotropy", "FractionalAnisotropy", "Mode", "LinearMeasure", "PlanarMeasure", "SphericalMeasure", "MinEigenvalue", "MidEigenvalue", "MaxEigenvalue", "MaxEigenvalueProjectionX", "MaxEigenvalueProjectionY", "MaxEigenvalueProjectionZ", "RAIMaxEigenvecX", "RAIMaxEigenvecY", "RAIMaxEigenvecZ", "MaxEigenvecX", "MaxEigenvecY", "MaxEigenvecZ", "D11", "D22", "D33", "ParallelDiffusivity", "PerpendicularDffusivity", desc="An enumeration of strings", argstr="--enumeration %s") class DiffusionTensorScalarMeasurementsOutputSpec(TraitedSpec): outputScalar = File(position=-1, desc="Scalar volume derived from tensor", exists=True) class DiffusionTensorScalarMeasurements(SEMLikeCommandLine): """title: Diffusion Tensor Scalar Measurements category: Diffusion.Diffusion Tensor Images description: Compute a set of different scalar measurements from a tensor field, specially oriented for Diffusion Tensors where some rotationally invariant measurements, like Fractional Anisotropy, are highly used to describe the anistropic behaviour of the tensor. version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorMathematics contributor: Raul San Jose (SPL, BWH) acknowledgements: LMI """ input_spec = DiffusionTensorScalarMeasurementsInputSpec output_spec = DiffusionTensorScalarMeasurementsOutputSpec _cmd = "DiffusionTensorScalarMeasurements " _outputs_filenames = {'outputScalar':'outputScalar.nii'} class DTIexportInputSpec(CommandLineInputSpec): inputTensor = File(position=-2, desc="Input DTI volume", exists=True, argstr="%s") outputFile = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DTI file", argstr="%s") class DTIexportOutputSpec(TraitedSpec): outputFile = File(position=-1, desc="Output DTI file", exists=True) class DTIexport(SEMLikeCommandLine): """title: DTIexport category: Diffusion.Diffusion Data Conversion description: Export DTI data to various file formats version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIExport contributor: Sonia Pujol (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = DTIexportInputSpec output_spec = DTIexportOutputSpec _cmd = "DTIexport " _outputs_filenames = {'outputFile':'outputFile'} nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/000077500000000000000000000000001227300005300230565ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py000066400000000000000000000021011227300005300273730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import DTIexport def test_DTIexport_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputTensor=dict(argstr='%s', position=-2, ), outputFile=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DTIexport.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTIexport_outputs(): output_map = dict(outputFile=dict(position=-1, ), ) outputs = DTIexport.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py000066400000000000000000000021701227300005300273720ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import DTIimport def test_DTIimport_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputFile=dict(argstr='%s', position=-2, ), outputTensor=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), testingmode=dict(argstr='--testingmode ', ), ) inputs = DTIimport.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DTIimport_outputs(): output_map = dict(outputTensor=dict(position=-1, ), ) outputs = DTIimport.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py000066400000000000000000000025311227300005300322410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import DWIJointRicianLMMSEFilter def test_DWIJointRicianLMMSEFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), compressOutput=dict(argstr='--compressOutput ', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), ng=dict(argstr='--ng %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), re=dict(argstr='--re %s', sep=',', ), rf=dict(argstr='--rf %s', sep=',', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DWIJointRicianLMMSEFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DWIJointRicianLMMSEFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py000066400000000000000000000031001227300005300312260ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import DWIRicianLMMSEFilter def test_DWIRicianLMMSEFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), compressOutput=dict(argstr='--compressOutput ', ), environ=dict(nohash=True, usedefault=True, ), hrf=dict(argstr='--hrf %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), iter=dict(argstr='--iter %d', ), maxnstd=dict(argstr='--maxnstd %d', ), minnstd=dict(argstr='--minnstd %d', ), mnve=dict(argstr='--mnve %d', ), mnvf=dict(argstr='--mnvf %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), re=dict(argstr='--re %s', sep=',', ), rf=dict(argstr='--rf %s', sep=',', ), terminal_output=dict(mandatory=True, nohash=True, ), uav=dict(argstr='--uav ', ), ) inputs = DWIRicianLMMSEFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DWIRicianLMMSEFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py000066400000000000000000000026001227300005300310410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import DWIToDTIEstimation def test_DWIToDTIEstimation_inputs(): input_map = dict(args=dict(argstr='%s', ), enumeration=dict(argstr='--enumeration %s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-3, ), mask=dict(argstr='--mask %s', ), outputBaseline=dict(argstr='%s', hash_files=False, position=-1, ), outputTensor=dict(argstr='%s', hash_files=False, position=-2, ), shiftNeg=dict(argstr='--shiftNeg ', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DWIToDTIEstimation.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DWIToDTIEstimation_outputs(): output_map = dict(outputBaseline=dict(position=-1, ), outputTensor=dict(position=-2, ), ) outputs = DWIToDTIEstimation.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py000066400000000000000000000023641227300005300343640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import DiffusionTensorScalarMeasurements def test_DiffusionTensorScalarMeasurements_inputs(): input_map = dict(args=dict(argstr='%s', ), enumeration=dict(argstr='--enumeration %s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-3, ), outputScalar=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DiffusionTensorScalarMeasurements.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DiffusionTensorScalarMeasurements_outputs(): output_map = dict(outputScalar=dict(position=-1, ), ) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py000066400000000000000000000026551227300005300336400ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import DiffusionWeightedVolumeMasking def test_DiffusionWeightedVolumeMasking_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-4, ), otsuomegathreshold=dict(argstr='--otsuomegathreshold %f', ), outputBaseline=dict(argstr='%s', hash_files=False, position=-2, ), removeislands=dict(argstr='--removeislands ', ), terminal_output=dict(mandatory=True, nohash=True, ), thresholdMask=dict(argstr='%s', hash_files=False, position=-1, ), ) inputs = DiffusionWeightedVolumeMasking.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DiffusionWeightedVolumeMasking_outputs(): output_map = dict(outputBaseline=dict(position=-2, ), thresholdMask=dict(position=-1, ), ) outputs = DiffusionWeightedVolumeMasking.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py000066400000000000000000000050451227300005300310240ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import ResampleDTIVolume def test_ResampleDTIVolume_inputs(): input_map = dict(Inverse_ITK_Transformation=dict(argstr='--Inverse_ITK_Transformation ', ), Reference=dict(argstr='--Reference %s', ), args=dict(argstr='%s', ), centered_transform=dict(argstr='--centered_transform ', ), correction=dict(argstr='--correction %s', ), defField=dict(argstr='--defField %s', ), default_pixel_value=dict(argstr='--default_pixel_value %f', ), direction_matrix=dict(argstr='--direction_matrix %s', sep=',', ), environ=dict(nohash=True, usedefault=True, ), hfieldtype=dict(argstr='--hfieldtype %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), image_center=dict(argstr='--image_center %s', ), inputVolume=dict(argstr='%s', position=-2, ), interpolation=dict(argstr='--interpolation %s', ), notbulk=dict(argstr='--notbulk ', ), number_of_thread=dict(argstr='--number_of_thread %d', ), origin=dict(argstr='--origin %s', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), rotation_point=dict(argstr='--rotation_point %s', ), size=dict(argstr='--size %s', sep=',', ), spaceChange=dict(argstr='--spaceChange ', ), spacing=dict(argstr='--spacing %s', sep=',', ), spline_order=dict(argstr='--spline_order %d', ), terminal_output=dict(mandatory=True, nohash=True, ), transform=dict(argstr='--transform %s', ), transform_matrix=dict(argstr='--transform_matrix %s', sep=',', ), transform_order=dict(argstr='--transform_order %s', ), transform_tensor_method=dict(argstr='--transform_tensor_method %s', ), transformationFile=dict(argstr='--transformationFile %s', ), window_function=dict(argstr='--window_function %s', ), ) inputs = ResampleDTIVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ResampleDTIVolume_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = ResampleDTIVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py000066400000000000000000000040341227300005300330640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.diffusion.diffusion import TractographyLabelMapSeeding def test_TractographyLabelMapSeeding_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-2, ), OutputFibers=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), clthreshold=dict(argstr='--clthreshold %f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputroi=dict(argstr='--inputroi %s', ), integrationsteplength=dict(argstr='--integrationsteplength %f', ), label=dict(argstr='--label %d', ), maximumlength=dict(argstr='--maximumlength %f', ), minimumlength=dict(argstr='--minimumlength %f', ), name=dict(argstr='--name %s', ), outputdirectory=dict(argstr='--outputdirectory %s', hash_files=False, ), randomgrid=dict(argstr='--randomgrid ', ), seedspacing=dict(argstr='--seedspacing %f', ), stoppingcurvature=dict(argstr='--stoppingcurvature %f', ), stoppingmode=dict(argstr='--stoppingmode %s', ), stoppingvalue=dict(argstr='--stoppingvalue %f', ), terminal_output=dict(mandatory=True, nohash=True, ), useindexspace=dict(argstr='--useindexspace ', ), writetofile=dict(argstr='--writetofile ', ), ) inputs = TractographyLabelMapSeeding.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TractographyLabelMapSeeding_outputs(): output_map = dict(OutputFibers=dict(position=-1, ), outputdirectory=dict(), ) outputs = TractographyLabelMapSeeding.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/000077500000000000000000000000001227300005300217115ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/filtering/__init__.py000066400000000000000000000014311227300005300240210ustar00rootroot00000000000000from morphology import GrayscaleGrindPeakImageFilter, GrayscaleFillHoleImageFilter from denoising import GradientAnisotropicDiffusion, CurvatureAnisotropicDiffusion, GaussianBlurImageFilter, MedianImageFilter from arithmetic import MultiplyScalarVolumes, MaskScalarVolume, SubtractScalarVolumes, AddScalarVolumes, CastScalarVolume from extractskeleton import ExtractSkeleton from histogrammatching import HistogramMatching from thresholdscalarvolume import ThresholdScalarVolume from n4itkbiasfieldcorrection import N4ITKBiasFieldCorrection from checkerboardfilter import CheckerBoardFilter from imagelabelcombine import ImageLabelCombine from votingbinaryholefillingimagefilter import VotingBinaryHoleFillingImageFilter from resamplescalarvectordwivolume import ResampleScalarVectorDWIVolume nipype-0.9.2/nipype/interfaces/slicer/filtering/arithmetic.py000066400000000000000000000200601227300005300244120ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class MultiplyScalarVolumesInputSpec(CommandLineInputSpec): inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s") inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 * Volume2", argstr="%s") order = traits.Enum("0", "1", "2", "3", desc="Interpolation order if two images are in different coordinate frames or have different sampling.", argstr="--order %s") class MultiplyScalarVolumesOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Volume1 * Volume2", exists=True) class MultiplyScalarVolumes(SEMLikeCommandLine): """title: Multiply Scalar Volumes category: Filtering.Arithmetic description: Multiplies two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. version: 0.1.0.$Revision: 8595 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Multiply contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = MultiplyScalarVolumesInputSpec output_spec = MultiplyScalarVolumesOutputSpec _cmd = "MultiplyScalarVolumes " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class MaskScalarVolumeInputSpec(CommandLineInputSpec): InputVolume = File(position=-3, desc="Input volume to be masked", exists=True, argstr="%s") MaskVolume = File(position=-2, desc="Label volume containing the mask", exists=True, argstr="%s") OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output volume: Input Volume masked by label value from Mask Volume", argstr="%s") label = traits.Int(desc="Label value in the Mask Volume to use as the mask", argstr="--label %d") replace = traits.Int(desc="Value to use for the output volume outside of the mask", argstr="--replace %d") class MaskScalarVolumeOutputSpec(TraitedSpec): OutputVolume = File(position=-1, desc="Output volume: Input Volume masked by label value from Mask Volume", exists=True) class MaskScalarVolume(SEMLikeCommandLine): """title: Mask Scalar Volume category: Filtering.Arithmetic description: Masks two images. The output image is set to 0 everywhere except where the chosen label from the mask volume is present, at which point it will retain it's original values. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. version: 0.1.0.$Revision: 8595 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Mask contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = MaskScalarVolumeInputSpec output_spec = MaskScalarVolumeOutputSpec _cmd = "MaskScalarVolume " _outputs_filenames = {'OutputVolume':'OutputVolume.nii'} class SubtractScalarVolumesInputSpec(CommandLineInputSpec): inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s") inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 - Volume2", argstr="%s") order = traits.Enum("0", "1", "2", "3", desc="Interpolation order if two images are in different coordinate frames or have different sampling.", argstr="--order %s") class SubtractScalarVolumesOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Volume1 - Volume2", exists=True) class SubtractScalarVolumes(SEMLikeCommandLine): """title: Subtract Scalar Volumes category: Filtering.Arithmetic description: Subtracts two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Subtract contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = SubtractScalarVolumesInputSpec output_spec = SubtractScalarVolumesOutputSpec _cmd = "SubtractScalarVolumes " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class AddScalarVolumesInputSpec(CommandLineInputSpec): inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s") inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 + Volume2", argstr="%s") order = traits.Enum("0", "1", "2", "3", desc="Interpolation order if two images are in different coordinate frames or have different sampling.", argstr="--order %s") class AddScalarVolumesOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Volume1 + Volume2", exists=True) class AddScalarVolumes(SEMLikeCommandLine): """title: Add Scalar Volumes category: Filtering.Arithmetic description: Adds two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Add contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = AddScalarVolumesInputSpec output_spec = AddScalarVolumesOutputSpec _cmd = "AddScalarVolumes " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class CastScalarVolumeInputSpec(CommandLineInputSpec): InputVolume = File(position=-2, desc="Input volume, the volume to cast.", exists=True, argstr="%s") OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output volume, cast to the new type.", argstr="%s") type = traits.Enum("Char", "UnsignedChar", "Short", "UnsignedShort", "Int", "UnsignedInt", "Float", "Double", desc="Type for the new output volume.", argstr="--type %s") class CastScalarVolumeOutputSpec(TraitedSpec): OutputVolume = File(position=-1, desc="Output volume, cast to the new type.", exists=True) class CastScalarVolume(SEMLikeCommandLine): """title: Cast Scalar Volume category: Filtering.Arithmetic description: Cast a volume to a given data type. Use at your own risk when casting an input volume into a lower precision type! Allows casting to the same type as the input volume. version: 0.1.0.$Revision: 2104 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Cast contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = CastScalarVolumeInputSpec output_spec = CastScalarVolumeOutputSpec _cmd = "CastScalarVolume " _outputs_filenames = {'OutputVolume':'OutputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/checkerboardfilter.py000066400000000000000000000044201227300005300261050ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class CheckerBoardFilterInputSpec(CommandLineInputSpec): checkerPattern = InputMultiPath(traits.Int, desc="The pattern of input 1 and input 2 in the output image. The user can specify the number of checkers in each dimension. A checkerPattern of 2,2,1 means that images will alternate in every other checker in the first two dimensions. The same pattern will be used in the 3rd dimension.", sep=",", argstr="--checkerPattern %s") inputVolume1 = File(position=-3, desc="First Input volume", exists=True, argstr="%s") inputVolume2 = File(position=-2, desc="Second Input volume", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class CheckerBoardFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class CheckerBoardFilter(SEMLikeCommandLine): """title: CheckerBoard Filter category: Filtering description: Create a checkerboard volume of two volumes. The output volume will show the two inputs alternating according to the user supplied checkerPattern. This filter is often used to compare the results of image registration. Note that the second input is resampled to the same origin, spacing and direction before it is composed with the first input. The scalar type of the output volume will be the same as the input image scalar type. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CheckerBoard contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = CheckerBoardFilterInputSpec output_spec = CheckerBoardFilterOutputSpec _cmd = "CheckerBoardFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/denoising.py000066400000000000000000000213231227300005300242430ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class GradientAnisotropicDiffusionInputSpec(CommandLineInputSpec): conductance = traits.Float(desc="Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", argstr="--conductance %f") iterations = traits.Int(desc="The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", argstr="--iterations %d") timeStep = traits.Float(desc="The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", argstr="--timeStep %f") inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class GradientAnisotropicDiffusionOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class GradientAnisotropicDiffusion(SEMLikeCommandLine): """title: Gradient Anisotropic Diffusion category: Filtering.Denoising description: Runs gradient anisotropic diffusion on a volume. Anisotropic diffusion methods reduce noise (or unwanted detail) in images while preserving specific image features, like edges. For many applications, there is an assumption that light-dark transitions (edges) are interesting. Standard isotropic diffusion methods move and blur light-dark boundaries. Anisotropic diffusion methods are formulated to specifically preserve edges. The conductance term for this implementation is a function of the gradient magnitude of the image at each point, reducing the strength of diffusion at edges. The numerical implementation of this equation is similar to that described in the Perona-Malik paper, but uses a more robust technique for gradient magnitude estimation and has been generalized to N-dimensions. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GradientAnisotropicDiffusion contributor: Bill Lorensen (GE) acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium """ input_spec = GradientAnisotropicDiffusionInputSpec output_spec = GradientAnisotropicDiffusionOutputSpec _cmd = "GradientAnisotropicDiffusion " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class CurvatureAnisotropicDiffusionInputSpec(CommandLineInputSpec): conductance = traits.Float(desc="Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", argstr="--conductance %f") iterations = traits.Int(desc="The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", argstr="--iterations %d") timeStep = traits.Float(desc="The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", argstr="--timeStep %f") inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class CurvatureAnisotropicDiffusionOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class CurvatureAnisotropicDiffusion(SEMLikeCommandLine): """title: Curvature Anisotropic Diffusion category: Filtering.Denoising description: Performs anisotropic diffusion on an image using a modified curvature diffusion equation (MCDE). MCDE does not exhibit the edge enhancing properties of classic anisotropic diffusion, which can under certain conditions undergo a 'negative' diffusion, which enhances the contrast of edges. Equations of the form of MCDE always undergo positive diffusion, with the conductance term only varying the strength of that diffusion. Qualitatively, MCDE compares well with other non-linear diffusion techniques. It is less sensitive to contrast than classic Perona-Malik style diffusion, and preserves finer detailed structures in images. There is a potential speed trade-off for using this function in place of Gradient Anisotropic Diffusion. Each iteration of the solution takes roughly twice as long. Fewer iterations, however, may be required to reach an acceptable solution. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CurvatureAnisotropicDiffusion contributor: Bill Lorensen (GE) acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium """ input_spec = CurvatureAnisotropicDiffusionInputSpec output_spec = CurvatureAnisotropicDiffusionOutputSpec _cmd = "CurvatureAnisotropicDiffusion " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class GaussianBlurImageFilterInputSpec(CommandLineInputSpec): sigma = traits.Float(desc="Sigma value in physical units (e.g., mm) of the Gaussian kernel", argstr="--sigma %f") inputVolume = File(position=-2, desc="Input volume", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Blurred Volume", argstr="%s") class GaussianBlurImageFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Blurred Volume", exists=True) class GaussianBlurImageFilter(SEMLikeCommandLine): """title: Gaussian Blur Image Filter category: Filtering.Denoising description: Apply a gaussian blurr to an image version: 0.1.0.$Revision: 1.1 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GaussianBlurImageFilter contributor: Julien Jomier (Kitware), Stephen Aylward (Kitware) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = GaussianBlurImageFilterInputSpec output_spec = GaussianBlurImageFilterOutputSpec _cmd = "GaussianBlurImageFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class MedianImageFilterInputSpec(CommandLineInputSpec): neighborhood = InputMultiPath(traits.Int, desc="The size of the neighborhood in each dimension", sep=",", argstr="--neighborhood %s") inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class MedianImageFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class MedianImageFilter(SEMLikeCommandLine): """title: Median Image Filter category: Filtering.Denoising description: The MedianImageFilter is commonly used as a robust approach for noise reduction. This filter is particularly efficient against "salt-and-pepper" noise. In other words, it is robust to the presence of gray-level outliers. MedianImageFilter computes the value of each output pixel as the statistical median of the neighborhood of values around the corresponding input pixel. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MedianImageFilter contributor: Bill Lorensen (GE) acknowledgements: This command module was derived from Insight/Examples/Filtering/MedianImageFilter (copyright) Insight Software Consortium """ input_spec = MedianImageFilterInputSpec output_spec = MedianImageFilterOutputSpec _cmd = "MedianImageFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/extractskeleton.py000066400000000000000000000043761227300005300255140ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class ExtractSkeletonInputSpec(CommandLineInputSpec): InputImageFileName = File(position=-2, desc="Input image", exists=True, argstr="%s") OutputImageFileName = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Skeleton of the input image", argstr="%s") type = traits.Enum("1D", "2D", desc="Type of skeleton to create", argstr="--type %s") dontPrune = traits.Bool(desc="Return the full skeleton, not just the maximal skeleton", argstr="--dontPrune ") numPoints = traits.Int(desc="Number of points used to represent the skeleton", argstr="--numPoints %d") pointsFile = traits.Str(desc="Name of the file to store the coordinates of the central (1D) skeleton points", argstr="--pointsFile %s") class ExtractSkeletonOutputSpec(TraitedSpec): OutputImageFileName = File(position=-1, desc="Skeleton of the input image", exists=True) class ExtractSkeleton(SEMLikeCommandLine): """title: Extract Skeleton category: Filtering description: Extract the skeleton of a binary object. The skeleton can be limited to being a 1D curve or allowed to be a full 2D manifold. The branches of the skeleton can be pruned so that only the maximal center skeleton is returned. version: 0.1.0.$Revision: 2104 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExtractSkeleton contributor: Pierre Seroul (UNC), Martin Styner (UNC), Guido Gerig (UNC), Stephen Aylward (Kitware) acknowledgements: The original implementation of this method was provided by ETH Zurich, Image Analysis Laboratory of Profs Olaf Kuebler, Gabor Szekely and Guido Gerig. Martin Styner at UNC, Chapel Hill made enhancements. Wrapping for Slicer was provided by Pierre Seroul and Stephen Aylward at Kitware, Inc. """ input_spec = ExtractSkeletonInputSpec output_spec = ExtractSkeletonOutputSpec _cmd = "ExtractSkeleton " _outputs_filenames = {'OutputImageFileName':'OutputImageFileName.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/histogrammatching.py000066400000000000000000000056421227300005300260020ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class HistogramMatchingInputSpec(CommandLineInputSpec): numberOfHistogramLevels = traits.Int(desc="The number of hisogram levels to use", argstr="--numberOfHistogramLevels %d") numberOfMatchPoints = traits.Int(desc="The number of match points to use", argstr="--numberOfMatchPoints %d") threshold = traits.Bool(desc="If on, only pixels above the mean in each volume are thresholded.", argstr="--threshold ") inputVolume = File(position=-3, desc="Input volume to be filtered", exists=True, argstr="%s") referenceVolume = File(position=-2, desc="Input volume whose histogram will be matched", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output volume. This is the input volume with intensities matched to the reference volume.", argstr="%s") class HistogramMatchingOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output volume. This is the input volume with intensities matched to the reference volume.", exists=True) class HistogramMatching(SEMLikeCommandLine): """title: Histogram Matching category: Filtering description: Normalizes the grayscale values of a source image based on the grayscale values of a reference image. This filter uses a histogram matching technique where the histograms of the two images are matched only at a specified number of quantile values. The filter was orginally designed to normalize MR images of the sameMR protocol and same body part. The algorithm works best if background pixels are excluded from both the source and reference histograms. A simple background exclusion method is to exclude all pixels whose grayscale values are smaller than the mean grayscale value. ThresholdAtMeanIntensity switches on this simple background exclusion method. Number of match points governs the number of quantile values to be matched. The filter assumes that both the source and reference are of the same type and that the input and output image type have the same number of dimension and have scalar pixel types. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/HistogramMatching contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = HistogramMatchingInputSpec output_spec = HistogramMatchingOutputSpec _cmd = "HistogramMatching " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/imagelabelcombine.py000066400000000000000000000026721227300005300257110ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class ImageLabelCombineInputSpec(CommandLineInputSpec): InputLabelMap_A = File(position=-3, desc="Label map image", exists=True, argstr="%s") InputLabelMap_B = File(position=-2, desc="Label map image", exists=True, argstr="%s") OutputLabelMap = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Resulting Label map image", argstr="%s") first_overwrites = traits.Bool(desc="Use first or second label when both are present", argstr="--first_overwrites ") class ImageLabelCombineOutputSpec(TraitedSpec): OutputLabelMap = File(position=-1, desc="Resulting Label map image", exists=True) class ImageLabelCombine(SEMLikeCommandLine): """title: Image Label Combine category: Filtering description: Combine two label maps into one version: 0.1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ImageLabelCombine contributor: Alex Yarmarkovich (SPL, BWH) """ input_spec = ImageLabelCombineInputSpec output_spec = ImageLabelCombineOutputSpec _cmd = "ImageLabelCombine " _outputs_filenames = {'OutputLabelMap':'OutputLabelMap.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/morphology.py000066400000000000000000000120731227300005300244650ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class GrayscaleGrindPeakImageFilterInputSpec(CommandLineInputSpec): inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class GrayscaleGrindPeakImageFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class GrayscaleGrindPeakImageFilter(SEMLikeCommandLine): """title: Grayscale Grind Peak Image Filter category: Filtering.Morphology description: GrayscaleGrindPeakImageFilter removes peaks in a grayscale image. Peaks are local maxima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a peak are extrapolated through the peak. This filter is used to smooth over local maxima without affecting the values of local minima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local maxima. This filter uses the GrayscaleGeodesicDilateImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the minimum pixel value in the input image. This filter is the dual to the GrayscaleFillholeImageFilter which implements the Fillhole algorithm. Since it is a dual, it is somewhat superfluous but is provided as a convenience. Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. A companion filter, Grayscale Fill Hole, fills holes in grayscale images. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleGrindPeakImageFilter contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = GrayscaleGrindPeakImageFilterInputSpec output_spec = GrayscaleGrindPeakImageFilterOutputSpec _cmd = "GrayscaleGrindPeakImageFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class GrayscaleFillHoleImageFilterInputSpec(CommandLineInputSpec): inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class GrayscaleFillHoleImageFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class GrayscaleFillHoleImageFilter(SEMLikeCommandLine): """title: Grayscale Fill Hole Image Filter category: Filtering.Morphology description: GrayscaleFillholeImageFilter fills holes in a grayscale image. Holes are local minima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a hole are extrapolated across the hole. This filter is used to smooth over local minima without affecting the values of local maxima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local minima. This filter uses the itkGrayscaleGeodesicErodeImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the maximum pixel value in the input image. Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. A companion filter, Grayscale Grind Peak, removes peaks in grayscale images. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleFillHoleImageFilter contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = GrayscaleFillHoleImageFilterInputSpec output_spec = GrayscaleFillHoleImageFilterOutputSpec _cmd = "GrayscaleFillHoleImageFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py000066400000000000000000000110011227300005300272400ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class N4ITKBiasFieldCorrectionInputSpec(CommandLineInputSpec): inputimage = File(desc="Input image where you observe signal inhomegeneity", exists=True, argstr="--inputimage %s") maskimage = File(desc="Binary mask that defines the structure of your interest. NOTE: This parameter is OPTIONAL. If the mask is not specified, the module will use internally Otsu thresholding to define this mask. Better processing results can often be obtained when a meaningful mask is defined.", exists=True, argstr="--maskimage %s") outputimage = traits.Either(traits.Bool, File(), hash_files=False, desc="Result of processing", argstr="--outputimage %s") outputbiasfield = traits.Either(traits.Bool, File(), hash_files=False, desc="Recovered bias field (OPTIONAL)", argstr="--outputbiasfield %s") iterations = InputMultiPath(traits.Int, desc="Maximum number of iterations at each level of resolution. Larger values will increase execution time, but may lead to better results.", sep=",", argstr="--iterations %s") convergencethreshold = traits.Float(desc="Stopping criterion for the iterative bias estimation. Larger values will lead to smaller execution time.", argstr="--convergencethreshold %f") meshresolution = InputMultiPath(traits.Float, desc="Resolution of the initial bspline grid defined as a sequence of three numbers. The actual resolution will be defined by adding the bspline order (default is 3) to the resolution in each dimension specified here. For example, 1,1,1 will result in a 4x4x4 grid of control points. This parameter may need to be adjusted based on your input image. In the multi-resolution N4 framework, the resolution of the bspline grid at subsequent iterations will be doubled. The number of resolutions is implicitly defined by Number of iterations parameter (the size of this list is the number of resolutions)", sep=",", argstr="--meshresolution %s") splinedistance = traits.Float(desc="An alternative means to define the spline grid, by setting the distance between the control points. This parameter is used only if the grid resolution is not specified.", argstr="--splinedistance %f") shrinkfactor = traits.Int(desc="Defines how much the image should be upsampled before estimating the inhomogeneity field. Increase if you want to reduce the execution time. 1 corresponds to the original resolution. Larger values will significantly reduce the computation time.", argstr="--shrinkfactor %d") bsplineorder = traits.Int(desc="Order of B-spline used in the approximation. Larger values will lead to longer execution times, may result in overfitting and poor result.", argstr="--bsplineorder %d") weightimage = File(desc="Weight Image", exists=True, argstr="--weightimage %s") histogramsharpening = InputMultiPath(traits.Float, desc="A vector of up to three values. Non-zero values correspond to Bias Field Full Width at Half Maximum, Wiener filter noise, and Number of histogram bins.", sep=",", argstr="--histogramsharpening %s") class N4ITKBiasFieldCorrectionOutputSpec(TraitedSpec): outputimage = File(desc="Result of processing", exists=True) outputbiasfield = File(desc="Recovered bias field (OPTIONAL)", exists=True) class N4ITKBiasFieldCorrection(SEMLikeCommandLine): """title: N4ITK MRI Bias correction category: Filtering description: Performs image bias correction using N4 algorithm. This module is based on the ITK filters contributed in the following publication: Tustison N, Gee J "N4ITK: Nick's N3 ITK Implementation For MRI Bias Field Correction", The Insight Journal 2009 January-June, http://hdl.handle.net/10380/3053 version: 9 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/N4ITKBiasFieldCorrection contributor: Nick Tustison (UPenn), Andrey Fedorov (SPL, BWH), Ron Kikinis (SPL, BWH) acknowledgements: The development of this module was partially supported by NIH grants R01 AA016748-01, R01 CA111288 and U01 CA151261 as well as by NA-MIC, NAC, NCIGT and the Slicer community. """ input_spec = N4ITKBiasFieldCorrectionInputSpec output_spec = N4ITKBiasFieldCorrectionOutputSpec _cmd = "N4ITKBiasFieldCorrection " _outputs_filenames = {'outputimage':'outputimage.nii','outputbiasfield':'outputbiasfield.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py000066400000000000000000000125661227300005300304520ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class ResampleScalarVectorDWIVolumeInputSpec(CommandLineInputSpec): inputVolume = File(position=-2, desc="Input Volume to be resampled", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", argstr="%s") Reference = File(desc="Reference Volume (spacing,size,orientation,origin)", exists=True, argstr="--Reference %s") transformationFile = File(exists=True, argstr="--transformationFile %s") defField = File(desc="File containing the deformation field (3D vector image containing vectors with 3 components)", exists=True, argstr="--defField %s") hfieldtype = traits.Enum("displacement", "h-Field", desc="Set if the deformation field is an h-Field", argstr="--hfieldtype %s") interpolation = traits.Enum("linear", "nn", "ws", "bs", desc="Sampling algorithm (linear or nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", argstr="--interpolation %s") transform_order = traits.Enum("input-to-output", "output-to-input", desc="Select in what order the transforms are read", argstr="--transform_order %s") notbulk = traits.Bool(desc="The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", argstr="--notbulk ") spaceChange = traits.Bool(desc="Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", argstr="--spaceChange ") rotation_point = traits.List(desc="Rotation Point in case of rotation around a point (otherwise useless)", argstr="--rotation_point %s") centered_transform = traits.Bool(desc="Set the center of the transformation to the center of the input image", argstr="--centered_transform ") image_center = traits.Enum("input", "output", desc="Image to use to center the transform (used only if \'Centered Transform\' is selected)", argstr="--image_center %s") Inverse_ITK_Transformation = traits.Bool(desc="Inverse the transformation before applying it from output image to input image", argstr="--Inverse_ITK_Transformation ") spacing = InputMultiPath(traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", argstr="--spacing %s") size = InputMultiPath(traits.Float, desc="Size along each dimension (0 means use input size)", sep=",", argstr="--size %s") origin = traits.List(desc="Origin of the output Image", argstr="--origin %s") direction_matrix = InputMultiPath(traits.Float, desc="9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", sep=",", argstr="--direction_matrix %s") number_of_thread = traits.Int(desc="Number of thread used to compute the output image", argstr="--number_of_thread %d") default_pixel_value = traits.Float(desc="Default pixel value for samples falling outside of the input region", argstr="--default_pixel_value %f") window_function = traits.Enum("h", "c", "w", "l", "b", desc="Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", argstr="--window_function %s") spline_order = traits.Int(desc="Spline Order", argstr="--spline_order %d") transform_matrix = InputMultiPath(traits.Float, desc="12 parameters of the transform matrix by rows ( --last 3 being translation-- )", sep=",", argstr="--transform_matrix %s") transform = traits.Enum("rt", "a", desc="Transform algorithm, rt = Rigid Transform, a = Affine Transform", argstr="--transform %s") class ResampleScalarVectorDWIVolumeOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Resampled Volume", exists=True) class ResampleScalarVectorDWIVolume(SEMLikeCommandLine): """title: Resample Scalar/Vector/DWI Volume category: Filtering description: This module implements image and vector-image resampling through the use of itk Transforms.It can also handle diffusion weighted MRI image resampling. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. Warning: To resample DWMR Images, use nrrd input and output files. Warning: Do not use to resample Diffusion Tensor Images, tensors would not be reoriented version: 0.1 documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleScalarVectorDWIVolume contributor: Francois Budin (UNC) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics """ input_spec = ResampleScalarVectorDWIVolumeInputSpec output_spec = ResampleScalarVectorDWIVolumeOutputSpec _cmd = "ResampleScalarVectorDWIVolume " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/000077500000000000000000000000001227300005300230535ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py000066400000000000000000000023201227300005300307020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.arithmetic import AddScalarVolumes def test_AddScalarVolumes_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', position=-3, ), inputVolume2=dict(argstr='%s', position=-2, ), order=dict(argstr='--order %s', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = AddScalarVolumes.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AddScalarVolumes_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = AddScalarVolumes.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py000066400000000000000000000022221227300005300307220ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.arithmetic import CastScalarVolume def test_CastScalarVolume_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-2, ), OutputVolume=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), type=dict(argstr='--type %s', ), ) inputs = CastScalarVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CastScalarVolume_outputs(): output_map = dict(OutputVolume=dict(position=-1, ), ) outputs = CastScalarVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py000066400000000000000000000024011227300005300311730ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.checkerboardfilter import CheckerBoardFilter def test_CheckerBoardFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), checkerPattern=dict(argstr='--checkerPattern %s', sep=',', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', position=-3, ), inputVolume2=dict(argstr='%s', position=-2, ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = CheckerBoardFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CheckerBoardFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = CheckerBoardFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py000066400000000000000000000025061227300005300335610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.denoising import CurvatureAnisotropicDiffusion def test_CurvatureAnisotropicDiffusion_inputs(): input_map = dict(args=dict(argstr='%s', ), conductance=dict(argstr='--conductance %f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), iterations=dict(argstr='--iterations %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), timeStep=dict(argstr='--timeStep %f', ), ) inputs = CurvatureAnisotropicDiffusion.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CurvatureAnisotropicDiffusion_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py000066400000000000000000000025001227300005300306300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.extractskeleton import ExtractSkeleton def test_ExtractSkeleton_inputs(): input_map = dict(InputImageFileName=dict(argstr='%s', position=-2, ), OutputImageFileName=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), dontPrune=dict(argstr='--dontPrune ', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), numPoints=dict(argstr='--numPoints %d', ), pointsFile=dict(argstr='--pointsFile %s', ), terminal_output=dict(mandatory=True, nohash=True, ), type=dict(argstr='--type %s', ), ) inputs = ExtractSkeleton.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ExtractSkeleton_outputs(): output_map = dict(OutputImageFileName=dict(position=-1, ), ) outputs = ExtractSkeleton.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py000066400000000000000000000022661227300005300322320ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.denoising import GaussianBlurImageFilter def test_GaussianBlurImageFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), sigma=dict(argstr='--sigma %f', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = GaussianBlurImageFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GaussianBlurImageFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py000066400000000000000000000025011227300005300333310ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.denoising import GradientAnisotropicDiffusion def test_GradientAnisotropicDiffusion_inputs(): input_map = dict(args=dict(argstr='%s', ), conductance=dict(argstr='--conductance %f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), iterations=dict(argstr='--iterations %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), timeStep=dict(argstr='--timeStep %f', ), ) inputs = GradientAnisotropicDiffusion.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GradientAnisotropicDiffusion_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py000066400000000000000000000022451227300005300331610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.morphology import GrayscaleFillHoleImageFilter def test_GrayscaleFillHoleImageFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = GrayscaleFillHoleImageFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GrayscaleFillHoleImageFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py000066400000000000000000000022521227300005300333250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.morphology import GrayscaleGrindPeakImageFilter def test_GrayscaleGrindPeakImageFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = GrayscaleGrindPeakImageFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GrayscaleGrindPeakImageFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py000066400000000000000000000025721227300005300311320ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.histogrammatching import HistogramMatching def test_HistogramMatching_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-3, ), numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d', ), numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), referenceVolume=dict(argstr='%s', position=-2, ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='--threshold ', ), ) inputs = HistogramMatching.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_HistogramMatching_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = HistogramMatching.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py000066400000000000000000000023721227300005300307770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.imagelabelcombine import ImageLabelCombine def test_ImageLabelCombine_inputs(): input_map = dict(InputLabelMap_A=dict(argstr='%s', position=-3, ), InputLabelMap_B=dict(argstr='%s', position=-2, ), OutputLabelMap=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), first_overwrites=dict(argstr='--first_overwrites ', ), ignore_exception=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ImageLabelCombine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ImageLabelCombine_outputs(): output_map = dict(OutputLabelMap=dict(position=-1, ), ) outputs = ImageLabelCombine.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py000066400000000000000000000023741227300005300307330ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.arithmetic import MaskScalarVolume def test_MaskScalarVolume_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-3, ), MaskVolume=dict(argstr='%s', position=-2, ), OutputVolume=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), label=dict(argstr='--label %d', ), replace=dict(argstr='--replace %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MaskScalarVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MaskScalarVolume_outputs(): output_map = dict(OutputVolume=dict(position=-1, ), ) outputs = MaskScalarVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py000066400000000000000000000022631227300005300310250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.denoising import MedianImageFilter def test_MedianImageFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), neighborhood=dict(argstr='--neighborhood %s', sep=',', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MedianImageFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MedianImageFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = MedianImageFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py000066400000000000000000000023511227300005300320350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.arithmetic import MultiplyScalarVolumes def test_MultiplyScalarVolumes_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', position=-3, ), inputVolume2=dict(argstr='%s', position=-2, ), order=dict(argstr='--order %s', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MultiplyScalarVolumes.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MultiplyScalarVolumes_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py000066400000000000000000000034671227300005300321520ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.n4itkbiasfieldcorrection import N4ITKBiasFieldCorrection def test_N4ITKBiasFieldCorrection_inputs(): input_map = dict(args=dict(argstr='%s', ), bsplineorder=dict(argstr='--bsplineorder %d', ), convergencethreshold=dict(argstr='--convergencethreshold %f', ), environ=dict(nohash=True, usedefault=True, ), histogramsharpening=dict(argstr='--histogramsharpening %s', sep=',', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputimage=dict(argstr='--inputimage %s', ), iterations=dict(argstr='--iterations %s', sep=',', ), maskimage=dict(argstr='--maskimage %s', ), meshresolution=dict(argstr='--meshresolution %s', sep=',', ), outputbiasfield=dict(argstr='--outputbiasfield %s', hash_files=False, ), outputimage=dict(argstr='--outputimage %s', hash_files=False, ), shrinkfactor=dict(argstr='--shrinkfactor %d', ), splinedistance=dict(argstr='--splinedistance %f', ), terminal_output=dict(mandatory=True, nohash=True, ), weightimage=dict(argstr='--weightimage %s', ), ) inputs = N4ITKBiasFieldCorrection.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_N4ITKBiasFieldCorrection_outputs(): output_map = dict(outputbiasfield=dict(), outputimage=dict(), ) outputs = N4ITKBiasFieldCorrection.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py000066400000000000000000000047611227300005300333410ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.resamplescalarvectordwivolume import ResampleScalarVectorDWIVolume def test_ResampleScalarVectorDWIVolume_inputs(): input_map = dict(Inverse_ITK_Transformation=dict(argstr='--Inverse_ITK_Transformation ', ), Reference=dict(argstr='--Reference %s', ), args=dict(argstr='%s', ), centered_transform=dict(argstr='--centered_transform ', ), defField=dict(argstr='--defField %s', ), default_pixel_value=dict(argstr='--default_pixel_value %f', ), direction_matrix=dict(argstr='--direction_matrix %s', sep=',', ), environ=dict(nohash=True, usedefault=True, ), hfieldtype=dict(argstr='--hfieldtype %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), image_center=dict(argstr='--image_center %s', ), inputVolume=dict(argstr='%s', position=-2, ), interpolation=dict(argstr='--interpolation %s', ), notbulk=dict(argstr='--notbulk ', ), number_of_thread=dict(argstr='--number_of_thread %d', ), origin=dict(argstr='--origin %s', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), rotation_point=dict(argstr='--rotation_point %s', ), size=dict(argstr='--size %s', sep=',', ), spaceChange=dict(argstr='--spaceChange ', ), spacing=dict(argstr='--spacing %s', sep=',', ), spline_order=dict(argstr='--spline_order %d', ), terminal_output=dict(mandatory=True, nohash=True, ), transform=dict(argstr='--transform %s', ), transform_matrix=dict(argstr='--transform_matrix %s', sep=',', ), transform_order=dict(argstr='--transform_order %s', ), transformationFile=dict(argstr='--transformationFile %s', ), window_function=dict(argstr='--window_function %s', ), ) inputs = ResampleScalarVectorDWIVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ResampleScalarVectorDWIVolume_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py000066400000000000000000000023511227300005300320050ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.arithmetic import SubtractScalarVolumes def test_SubtractScalarVolumes_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', position=-3, ), inputVolume2=dict(argstr='%s', position=-2, ), order=dict(argstr='--order %s', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SubtractScalarVolumes.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SubtractScalarVolumes_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = SubtractScalarVolumes.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py000066400000000000000000000026121227300005300317670ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.thresholdscalarvolume import ThresholdScalarVolume def test_ThresholdScalarVolume_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-2, ), OutputVolume=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), lower=dict(argstr='--lower %d', ), outsidevalue=dict(argstr='--outsidevalue %d', ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='--threshold %d', ), thresholdtype=dict(argstr='--thresholdtype %s', ), upper=dict(argstr='--upper %d', ), ) inputs = ThresholdScalarVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ThresholdScalarVolume_outputs(): output_map = dict(OutputVolume=dict(position=-1, ), ) outputs = ThresholdScalarVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value test_auto_VotingBinaryHoleFillingImageFilter.py000066400000000000000000000027021227300005300342770ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/filtering/tests# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.votingbinaryholefillingimagefilter import VotingBinaryHoleFillingImageFilter def test_VotingBinaryHoleFillingImageFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), background=dict(argstr='--background %d', ), environ=dict(nohash=True, usedefault=True, ), foreground=dict(argstr='--foreground %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), majorityThreshold=dict(argstr='--majorityThreshold %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), radius=dict(argstr='--radius %s', sep=',', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = VotingBinaryHoleFillingImageFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_VotingBinaryHoleFillingImageFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py000066400000000000000000000051311227300005300266750ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class ThresholdScalarVolumeInputSpec(CommandLineInputSpec): InputVolume = File(position=-2, desc="Input volume", exists=True, argstr="%s") OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Thresholded input volume", argstr="%s") threshold = traits.Int(desc="Threshold value", argstr="--threshold %d") lower = traits.Int(desc="Lower threshold value", argstr="--lower %d") upper = traits.Int(desc="Upper threshold value", argstr="--upper %d") outsidevalue = traits.Int(desc="Set the voxels to this value if they fall outside the threshold range", argstr="--outsidevalue %d") thresholdtype = traits.Enum("Below", "Above", "Outside", desc="What kind of threshold to perform. If Outside is selected, uses Upper and Lower values. If Below is selected, uses the ThresholdValue, if Above is selected, uses the ThresholdValue.", argstr="--thresholdtype %s") class ThresholdScalarVolumeOutputSpec(TraitedSpec): OutputVolume = File(position=-1, desc="Thresholded input volume", exists=True) class ThresholdScalarVolume(SEMLikeCommandLine): """title: Threshold Scalar Volume category: Filtering description:

Threshold an image.

Set image values to a user-specified outside value if they are below, above, or between simple threshold values.

ThresholdAbove: The values greater than or equal to the threshold value are set to OutsideValue.

ThresholdBelow: The values less than or equal to the threshold value are set to OutsideValue.

ThresholdOutside: The values outside the range Lower-Upper are set to OutsideValue.

Although all image types are supported on input, only signed types are produced.

version: 0.1.0.$Revision: 2104 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Threshold contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = ThresholdScalarVolumeInputSpec output_spec = ThresholdScalarVolumeOutputSpec _cmd = "ThresholdScalarVolume " _outputs_filenames = {'OutputVolume':'OutputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py000066400000000000000000000050701227300005300314260ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class VotingBinaryHoleFillingImageFilterInputSpec(CommandLineInputSpec): radius = InputMultiPath(traits.Int, desc="The radius of a hole to be filled", sep=",", argstr="--radius %s") majorityThreshold = traits.Int(desc="The number of pixels over 50% that will decide whether an OFF pixel will become ON or not. For example, if the neighborhood of a pixel has 124 pixels (excluding itself), the 50% will be 62, and if you set a Majority threshold of 5, that means that the filter will require 67 or more neighbor pixels to be ON in order to switch the current OFF pixel to ON.", argstr="--majorityThreshold %d") background = traits.Int(desc="The value associated with the background (not object)", argstr="--background %d") foreground = traits.Int(desc="The value associated with the foreground (object)", argstr="--foreground %d") inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class VotingBinaryHoleFillingImageFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class VotingBinaryHoleFillingImageFilter(SEMLikeCommandLine): """title: Voting Binary Hole Filling Image Filter category: Filtering description: Applies a voting operation in order to fill-in cavities. This can be used for smoothing contours and for filling holes in binary images. This technique is used frequently when segmenting complete organs that may have ducts or vasculature that may not have been included in the initial segmentation, e.g. lungs, kidneys, liver. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/VotingBinaryHoleFillingImageFilter contributor: Bill Lorensen (GE) acknowledgements: This command module was derived from Insight/Examples/Filtering/VotingBinaryHoleFillingImageFilter (copyright) Insight Software Consortium """ input_spec = VotingBinaryHoleFillingImageFilterInputSpec output_spec = VotingBinaryHoleFillingImageFilterOutputSpec _cmd = "VotingBinaryHoleFillingImageFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/generate_classes.py000066400000000000000000000454521227300005300236210ustar00rootroot00000000000000"""This script generates Slicer Interfaces based on the CLI modules XML. CLI modules are selected from the hardcoded list below and generated code is placed in the cli_modules.py file (and imported in __init__.py). For this to work correctly you must have your CLI executabes in $PATH""" import xml.dom.minidom import subprocess import os from shutil import rmtree import keyword python_keywords = keyword.kwlist # If c++ SEM module uses one of these key words as a command line parameter, we need to modify variable def force_to_valid_python_variable_name(old_name): """ Valid c++ names are not always valid in python, so provide alternate naming >>> force_to_valid_python_variable_name('lambda') 'opt_lambda' >>> force_to_valid_python_variable_name('inputVolume') 'inputVolume' """ new_name = old_name new_name = new_name.lstrip().rstrip() if old_name in python_keywords: new_name = 'opt_' + old_name return new_name def add_class_to_package(class_codes, class_names, module_name, package_dir): module_python_filename = os.path.join(package_dir, "%s.py" % module_name) f_m = open(module_python_filename, 'w') f_i = open(os.path.join(package_dir, "__init__.py"), 'a+') f_m.write("""# -*- coding: utf8 -*- \"\"\"Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n""") imports = """from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os\n\n\n""" f_m.write(imports) f_m.write("\n\n".join(class_codes).encode('utf8')) f_i.write("from %s import %s\n" % (module_name, ", ".join(class_names))) f_m.close() f_i.close() def crawl_code_struct(code_struct, package_dir): subpackages = [] for k, v in code_struct.iteritems(): if isinstance(v, str) or isinstance(v, unicode): module_name = k.lower() class_name = k class_code = v add_class_to_package( [class_code], [class_name], module_name, package_dir) else: l1 = {} l2 = {} for key in v.keys(): if (isinstance(v[key], str) or isinstance(v[key], unicode)): l1[key] = v[key] else: l2[key] = v[key] if l2: v = l2 subpackages.append(k.lower()) f_i = open(os.path.join(package_dir, "__init__.py"), 'a+') f_i.write("from %s import *\n" % k.lower()) f_i.close() new_pkg_dir = os.path.join(package_dir, k.lower()) if os.path.exists(new_pkg_dir): rmtree(new_pkg_dir) os.mkdir(new_pkg_dir) crawl_code_struct(v, new_pkg_dir) if l1: for ik, iv in l1.iteritems(): crawl_code_struct({ik: {ik: iv}}, new_pkg_dir) elif l1: v = l1 module_name = k.lower() add_class_to_package( v.values(), v.keys(), module_name, package_dir) if subpackages: f = open(os.path.join(package_dir, "setup.py"), 'w') f.write("""# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('{pkg_name}', parent_package, top_path) {sub_pks} return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) """.format(pkg_name=package_dir.split("/")[-1], sub_pks="\n ".join(["config.add_data_dir('%s')" % sub_pkg for sub_pkg in subpackages]))) f.close() def generate_all_classes(modules_list=[], launcher=[]): """ modules_list contains all the SEM compliant tools that should have wrappers created for them. launcher containtains the command line prefix wrapper arugments needed to prepare a proper environment for each of the modules. """ all_code = {} for module in modules_list: print("=" * 80) print("Generating Definition for module {0}".format(module)) print("^" * 80) package, code = generate_class(module, launcher) cur_package = all_code module_name = package.strip().split(" ")[0].split(".")[-1] for package in package.strip().split(" ")[0].split(".")[:-1]: if package not in cur_package: cur_package[package] = {} cur_package = cur_package[package] if module_name not in cur_package: cur_package[module_name] = {} cur_package[module_name][module] = code if os.path.exists("__init__.py"): os.unlink("__init__.py") crawl_code_struct(all_code, os.getcwd()) def generate_class(module, launcher): dom = grab_xml(module, launcher) inputTraits = [] outputTraits = [] outputs_filenames = {} #self._outputs_nodes = [] class_string = "\"\"\"" for desc_str in ['title', 'category', 'description', 'version', 'documentation-url', 'license', 'contributor', 'acknowledgements']: el = dom.getElementsByTagName(desc_str) if el and el[0].firstChild: class_string += desc_str + ": " + el[ 0].firstChild.nodeValue + "\n\n" if desc_str == 'category': category = el[0].firstChild.nodeValue class_string += "\"\"\"" for paramGroup in dom.getElementsByTagName("parameters"): indices = paramGroup.getElementsByTagName('index') max_index = 0 for index in indices: if int(index.firstChild.nodeValue) > max_index: max_index = int(index.firstChild.nodeValue) for param in paramGroup.childNodes: if param.nodeName in ['label', 'description', '#text', '#comment']: continue traitsParams = {} longFlagNode = param.getElementsByTagName('longflag') if longFlagNode: ## Prefer to use longFlag as name if it is given, rather than the parameter name longFlagName = longFlagNode[0].firstChild.nodeValue ## SEM automatically strips prefixed "--" or "-" from from xml before processing ## we need to replicate that behavior here The following ## two nodes in xml have the same behavior in the program ## --test ## test longFlagName = longFlagName.lstrip(" -").rstrip(" ") name = longFlagName name = force_to_valid_python_variable_name(name) traitsParams["argstr"] = "--" + longFlagName + " " else: name = param.getElementsByTagName( 'name')[0].firstChild.nodeValue name = force_to_valid_python_variable_name(name) if param.getElementsByTagName('index'): traitsParams["argstr"] = "" else: traitsParams["argstr"] = "--" + name + " " if param.getElementsByTagName('description') and param.getElementsByTagName('description')[0].firstChild: traitsParams["desc"] = param.getElementsByTagName('description')[0].firstChild.nodeValue.replace('"', "\\\"").replace("\n", ", ") argsDict = {'directory': '%s', 'file': '%s', 'integer': "%d", 'double': "%f", 'float': "%f", 'image': "%s", 'transform': "%s", 'boolean': '', 'string-enumeration': '%s', 'string': "%s", 'integer-enumeration': '%s', 'table': '%s', 'point': '%s', 'region': '%s', 'geometry': '%s'} if param.nodeName.endswith('-vector'): traitsParams["argstr"] += "%s" else: traitsParams["argstr"] += argsDict[param.nodeName] index = param.getElementsByTagName('index') if index: traitsParams["position"] = int( index[0].firstChild.nodeValue) - (max_index + 1) desc = param.getElementsByTagName('description') if index: traitsParams["desc"] = desc[0].firstChild.nodeValue typesDict = {'integer': "traits.Int", 'double': "traits.Float", 'float': "traits.Float", 'image': "File", 'transform': "File", 'boolean': "traits.Bool", 'string': "traits.Str", 'file': "File", 'geometry': "File", 'directory': "Directory", 'table': "File", 'point': "traits.List", 'region': "traits.List"} if param.nodeName.endswith('-enumeration'): type = "traits.Enum" values = ['"%s"' % el.firstChild.nodeValue for el in param.getElementsByTagName('element')] elif param.nodeName.endswith('-vector'): type = "InputMultiPath" if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table']: values = ["%s(exists=True)" % typesDict[ param.nodeName.replace('-vector', '')]] else: values = [typesDict[param.nodeName.replace('-vector', '')]] traitsParams["sep"] = ',' elif param.getAttribute('multiple') == "true": type = "InputMultiPath" if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table']: values = ["%s(exists=True)" % typesDict[param.nodeName]] elif param.nodeName in ['point', 'region']: values = ["%s(traits.Float(), minlen=3, maxlen=3)" % typesDict[param.nodeName]] else: values = [typesDict[param.nodeName]] traitsParams["argstr"] += "..." else: values = [] type = typesDict[param.nodeName] if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table']: if not param.getElementsByTagName('channel'): raise RuntimeError("Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{0}".format(traitsParams)) elif param.getElementsByTagName('channel')[0].firstChild.nodeValue == 'output': traitsParams["hash_files"] = False inputTraits.append( "%s = traits.Either(traits.Bool, %s(%s), %s)" % (name, type, parse_values( values).replace("exists=True", ""), parse_params(traitsParams))) traitsParams["exists"] = True traitsParams.pop("argstr") traitsParams.pop("hash_files") outputTraits.append("%s = %s(%s%s)" % (name, type.replace("Input", "Output"), parse_values(values), parse_params(traitsParams))) outputs_filenames[ name] = gen_filename_from_param(param, name) elif param.getElementsByTagName('channel')[0].firstChild.nodeValue == 'input': if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table'] and type not in ["InputMultiPath", "traits.List"]: traitsParams["exists"] = True inputTraits.append("%s = %s(%s%s)" % (name, type, parse_values(values), parse_params(traitsParams))) else: raise RuntimeError("Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{0}".format(traitsParams)) else: # For all other parameter types, they are implicitly only input types inputTraits.append("%s = %s(%s%s)" % (name, type, parse_values( values), parse_params(traitsParams))) input_spec_code = "class " + module + "InputSpec(CommandLineInputSpec):\n" for trait in inputTraits: input_spec_code += " " + trait + "\n" output_spec_code = "class " + module + "OutputSpec(TraitedSpec):\n" if not outputTraits: output_spec_code += " pass\n" else: for trait in outputTraits: output_spec_code += " " + trait + "\n" output_filenames_code = "_outputs_filenames = {" output_filenames_code += ",".join(["'%s':'%s'" % ( key, value) for key, value in outputs_filenames.iteritems()]) output_filenames_code += "}" input_spec_code += "\n\n" output_spec_code += "\n\n" template = """class %name%(SEMLikeCommandLine): %class_str% input_spec = %name%InputSpec output_spec = %name%OutputSpec _cmd = "%launcher% %name% " %output_filenames_code%\n""" main_class = template.replace('%class_str%', class_string).replace("%name%", module).replace("%output_filenames_code%", output_filenames_code).replace("%launcher%", " ".join(launcher)) return category, input_spec_code + output_spec_code + main_class def grab_xml(module, launcher): # cmd = CommandLine(command = "Slicer3", args="--launch %s --xml"%module) # ret = cmd.run() command_list = launcher[:] # force copy to preserve original command_list.extend([module, "--xml"]) final_command = " ".join(command_list) xmlReturnValue = subprocess.Popen( final_command, stdout=subprocess.PIPE, shell=True).communicate()[0] return xml.dom.minidom.parseString(xmlReturnValue) # if ret.runtime.returncode == 0: # return xml.dom.minidom.parseString(ret.runtime.stdout) # else: # raise Exception(cmd.cmdline + " failed:\n%s"%ret.runtime.stderr) def parse_params(params): list = [] for key, value in params.iteritems(): if isinstance(value, str) or isinstance(value, unicode): list.append('%s="%s"' % (key, value.replace('"', "'"))) else: list.append('%s=%s' % (key, value)) return ", ".join(list) def parse_values(values): values = ['%s' % value for value in values] if len(values) > 0: retstr = ", ".join(values) + ", " else: retstr = "" return retstr def gen_filename_from_param(param, base): fileExtensions = param.getAttribute("fileExtensions") if fileExtensions: ## It is possible that multiple file extensions can be specified in a ## comma separated list, This will extract just the first extension firstFileExtension = fileExtensions.split(',')[0] ext = firstFileExtension else: ext = {'image': '.nii', 'transform': '.mat', 'file': '', 'directory': '', 'geometry': '.vtk'}[param.nodeName] return base + ext if __name__ == "__main__": ## NOTE: For now either the launcher needs to be found on the default path, or ## every tool in the modules list must be found on the default path ## AND calling the module with --xml must be supported and compliant. modules_list = ['MedianImageFilter', 'CheckerBoardFilter', 'EMSegmentCommandLine', 'GrayscaleFillHoleImageFilter', #'CreateDICOMSeries', #missing channel 'TractographyLabelMapSeeding', 'IntensityDifferenceMetric', 'DWIToDTIEstimation', 'MaskScalarVolume', 'ImageLabelCombine', 'DTIimport', 'OtsuThresholdImageFilter', 'ExpertAutomatedRegistration', 'ThresholdScalarVolume', 'DWIUnbiasedNonLocalMeansFilter', 'BRAINSFit', 'MergeModels', 'ResampleDTIVolume', 'MultiplyScalarVolumes', 'LabelMapSmoothing', 'RigidRegistration', 'VotingBinaryHoleFillingImageFilter', 'BRAINSROIAuto', 'RobustStatisticsSegmenter', 'GradientAnisotropicDiffusion', 'ProbeVolumeWithModel', 'ModelMaker', 'ExtractSkeleton', 'GrayscaleGrindPeakImageFilter', 'N4ITKBiasFieldCorrection', 'BRAINSResample', 'DTIexport', 'VBRAINSDemonWarp', 'ResampleScalarVectorDWIVolume', 'ResampleScalarVolume', 'OtsuThresholdSegmentation', # 'ExecutionModelTour', 'HistogramMatching', 'BRAINSDemonWarp', 'ModelToLabelMap', 'GaussianBlurImageFilter', 'DiffusionWeightedVolumeMasking', 'GrayscaleModelMaker', 'CastScalarVolume', 'DicomToNrrdConverter', 'AffineRegistration', 'AddScalarVolumes', 'LinearRegistration', 'SimpleRegionGrowingSegmentation', 'DWIJointRicianLMMSEFilter', 'MultiResolutionAffineRegistration', 'SubtractScalarVolumes', 'DWIRicianLMMSEFilter', 'OrientScalarVolume', 'FiducialRegistration', 'BSplineDeformableRegistration', 'CurvatureAnisotropicDiffusion', 'PETStandardUptakeValueComputation', 'DiffusionTensorScalarMeasurements', 'ACPCTransform', 'EMSegmentTransformToNewFormat', 'BSplineToDeformationField'] ## SlicerExecutionModel compliant tools that are usually statically built, and don't need the Slicer3 --launcher generate_all_classes(modules_list=modules_list,launcher=[]) ## Tools compliant with SlicerExecutionModel called from the Slicer environment (for shared lib compatibility) #launcher = ['/home/raid3/gorgolewski/software/slicer/Slicer', '--launch'] #generate_all_classes(modules_list=modules_list, launcher=launcher) #generate_all_classes(modules_list=['BRAINSABC'], launcher=[] ) nipype-0.9.2/nipype/interfaces/slicer/legacy/000077500000000000000000000000001227300005300211725ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/legacy/__init__.py000066400000000000000000000005631227300005300233070ustar00rootroot00000000000000from diffusion import * from segmentation import OtsuThresholdSegmentation from filtering import OtsuThresholdImageFilter, ResampleScalarVolume from converters import BSplineToDeformationField from registration import BSplineDeformableRegistration, AffineRegistration, MultiResolutionAffineRegistration, RigidRegistration, LinearRegistration, ExpertAutomatedRegistration nipype-0.9.2/nipype/interfaces/slicer/legacy/converters.py000066400000000000000000000025351227300005300237430ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class BSplineToDeformationFieldInputSpec(CommandLineInputSpec): tfm = File(exists=True, argstr="--tfm %s") refImage = File(exists=True, argstr="--refImage %s") defImage = traits.Either(traits.Bool, File(), hash_files=False, argstr="--defImage %s") class BSplineToDeformationFieldOutputSpec(TraitedSpec): defImage = File(exists=True) class BSplineToDeformationField(SEMLikeCommandLine): """title: BSpline to deformation field category: Legacy.Converters description: Create a dense deformation field from a bspline+bulk transform. version: 0.1.0.$Revision: 2104 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineToDeformationField contributor: Andrey Fedorov (SPL, BWH) acknowledgements: This work is funded by NIH grants R01 CA111288 and U01 CA151261. """ input_spec = BSplineToDeformationFieldInputSpec output_spec = BSplineToDeformationFieldOutputSpec _cmd = "BSplineToDeformationField " _outputs_filenames = {'defImage':'defImage.nii'} nipype-0.9.2/nipype/interfaces/slicer/legacy/diffusion/000077500000000000000000000000001227300005300231605ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/legacy/diffusion/__init__.py000066400000000000000000000000651227300005300252720ustar00rootroot00000000000000from denoising import DWIUnbiasedNonLocalMeansFilter nipype-0.9.2/nipype/interfaces/slicer/legacy/diffusion/denoising.py000066400000000000000000000067511227300005300255220ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class DWIUnbiasedNonLocalMeansFilterInputSpec(CommandLineInputSpec): rs = InputMultiPath(traits.Int, desc="The algorithm search for similar voxels in a neighborhood of this size (larger sizes than the default one are extremely slow).", sep=",", argstr="--rs %s") rc = InputMultiPath(traits.Int, desc="Similarity between blocks is measured using windows of this size.", sep=",", argstr="--rc %s") hp = traits.Float(desc="This parameter is related to noise; the larger the parameter, the more agressive the filtering. Should be near 1, and only values between 0.8 and 1.2 are allowed", argstr="--hp %f") ng = traits.Int(desc="The number of the closest gradients that are used to jointly filter a given gradient direction (a maximum of 5 is allowed).", argstr="--ng %d") re = InputMultiPath(traits.Int, desc="A neighborhood of this size is used to compute the statistics for noise estimation.", sep=",", argstr="--re %s") inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", argstr="%s") class DWIUnbiasedNonLocalMeansFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) class DWIUnbiasedNonLocalMeansFilter(SEMLikeCommandLine): """title: DWI Unbiased Non Local Means Filter category: Legacy.Diffusion.Denoising description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the images using a Unbiased Non Local Means for Rician noise algorithm. It exploits not only the spatial redundancy, but the redundancy in similar gradient directions as well; it takes into account the N closest gradient directions to the direction being processed (a maximum of 5 gradient directions is allowed to keep a reasonable computational load, since we do not use neither similarity maps nor block-wise implementation). The noise parameter is automatically estimated in the same way as in the jointLMMSE module. A complete description of the algorithm may be found in: Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. Please, note that the execution of this filter is extremely slow, son only very conservative parameters (block size and search size as small as possible) should be used. Even so, its execution may take several hours. The advantage of this filter over joint LMMSE is its better preservation of edges and fine structures. version: 0.0.1.$Revision: 1 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/UnbiasedNonLocalMeansFilterForDWI contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). """ input_spec = DWIUnbiasedNonLocalMeansFilterInputSpec output_spec = DWIUnbiasedNonLocalMeansFilterOutputSpec _cmd = "DWIUnbiasedNonLocalMeansFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/legacy/diffusion/tests/000077500000000000000000000000001227300005300243225ustar00rootroot00000000000000test_auto_DWIUnbiasedNonLocalMeansFilter.py000066400000000000000000000026251227300005300345670ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/legacy/diffusion/tests# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.diffusion.denoising import DWIUnbiasedNonLocalMeansFilter def test_DWIUnbiasedNonLocalMeansFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), hp=dict(argstr='--hp %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), ng=dict(argstr='--ng %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), rc=dict(argstr='--rc %s', sep=',', ), re=dict(argstr='--re %s', sep=',', ), rs=dict(argstr='--rs %s', sep=',', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = DWIUnbiasedNonLocalMeansFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DWIUnbiasedNonLocalMeansFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/filtering.py000066400000000000000000000126421227300005300235340ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class OtsuThresholdImageFilterInputSpec(CommandLineInputSpec): insideValue = traits.Int(desc="The value assigned to pixels that are inside the computed threshold", argstr="--insideValue %d") outsideValue = traits.Int(desc="The value assigned to pixels that are outside the computed threshold", argstr="--outsideValue %d") numberOfBins = traits.Int(desc="This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", argstr="--numberOfBins %d") inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class OtsuThresholdImageFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class OtsuThresholdImageFilter(SEMLikeCommandLine): """title: Otsu Threshold Image Filter category: Legacy.Filtering description: This filter creates a binary thresholded image that separates an image into foreground and background components. The filter calculates the optimum threshold separating those two classes so that their combined spread (intra-class variance) is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter applies that threshold to the input image using the itkBinaryThresholdImageFilter. The numberOfHistogram bins can be set for the Otsu Calculator. The insideValue and outsideValue can be set for the BinaryThresholdImageFilter. The filter produces a labeled volume. The original reference is: N.Otsu, ‘‘A threshold selection method from gray level histograms,’’ IEEE Trans.Syst.ManCybern.SMC-9,62–66 1979. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdImageFilter contributor: Bill Lorensen (GE) acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium """ input_spec = OtsuThresholdImageFilterInputSpec output_spec = OtsuThresholdImageFilterOutputSpec _cmd = "OtsuThresholdImageFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class ResampleScalarVolumeInputSpec(CommandLineInputSpec): spacing = InputMultiPath(traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", argstr="--spacing %s") interpolation = traits.Enum("linear", "nearestNeighbor", "bspline", "hamming", "cosine", "welch", "lanczos", "blackman", desc="Sampling algorithm (linear, nearest neighbor, bspline(cubic) or windowed sinc). There are several sinc algorithms available as described in the following publication: Erik H. W. Meijering, Wiro J. Niessen, Josien P. W. Pluim, Max A. Viergever: Quantitative Comparison of Sinc-Approximating Kernels for Medical Image Interpolation. MICCAI 1999, pp. 210-217. Each window has a radius of 3;", argstr="--interpolation %s") InputVolume = File(position=-2, desc="Input volume to be resampled", exists=True, argstr="%s") OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", argstr="%s") class ResampleScalarVolumeOutputSpec(TraitedSpec): OutputVolume = File(position=-1, desc="Resampled Volume", exists=True) class ResampleScalarVolume(SEMLikeCommandLine): """title: Resample Scalar Volume category: Legacy.Filtering description: Resampling an image is an important task in image analysis. It is especially important in the frame of image registration. This module implements image resampling through the use of itk Transforms. This module uses an Identity Transform. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. Several interpolators are available: linear, nearest neighbor, bspline and five flavors of sinc. The sinc interpolators, although more precise, are much slower than the linear and nearest neighbor interpolator. To resample label volumnes, nearest neighbor interpolation should be used exclusively. version: 0.1.0.$Revision: 20594 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleVolume contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = ResampleScalarVolumeInputSpec output_spec = ResampleScalarVolumeOutputSpec _cmd = "ResampleScalarVolume " _outputs_filenames = {'OutputVolume':'OutputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/legacy/registration.py000066400000000000000000000657741227300005300243010ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class BSplineDeformableRegistrationInputSpec(CommandLineInputSpec): iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d") gridSize = traits.Int(desc="Number of grid points on interior of the fixed image. Larger grid sizes allow for finer registrations.", argstr="--gridSize %d") histogrambins = traits.Int(desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a deformable registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d") spatialsamples = traits.Int(desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d") constrain = traits.Bool(desc="Constrain the deformation to the amount specified in Maximum Deformation", argstr="--constrain ") maximumDeformation = traits.Float(desc="If Constrain Deformation is checked, limit the deformation to this amount.", argstr="--maximumDeformation %f") default = traits.Int(desc="Default pixel value used if resampling a pixel outside of the volume.", argstr="--default %d") initialtransform = File(desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. This transform should be an affine or rigid transform. It is used an a bulk transform for the BSpline. Optional.", exists=True, argstr="--initialtransform %s") FixedImageFileName = File(position=-2, desc="Fixed image to which to register", exists=True, argstr="%s") MovingImageFileName = File(position=-1, desc="Moving image", exists=True, argstr="%s") outputtransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s") outputwarp = traits.Either(traits.Bool, File(), hash_files=False, desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", argstr="--outputwarp %s") resampledmovingfilename = traits.Either(traits.Bool, File(), hash_files=False, desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s") class BSplineDeformableRegistrationOutputSpec(TraitedSpec): outputtransform = File(desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) outputwarp = File(desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", exists=True) resampledmovingfilename = File(desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) class BSplineDeformableRegistration(SEMLikeCommandLine): """title: BSpline Deformable Registration category: Legacy.Registration description: Registers two images together using BSpline transform and mutual information. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineDeformableRegistration contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = BSplineDeformableRegistrationInputSpec output_spec = BSplineDeformableRegistrationOutputSpec _cmd = "BSplineDeformableRegistration " _outputs_filenames = {'resampledmovingfilename':'resampledmovingfilename.nii','outputtransform':'outputtransform.txt','outputwarp':'outputwarp.nrrd'} class AffineRegistrationInputSpec(CommandLineInputSpec): fixedsmoothingfactor = traits.Int(desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--fixedsmoothingfactor %d") movingsmoothingfactor = traits.Int(desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--movingsmoothingfactor %d") histogrambins = traits.Int(desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d") spatialsamples = traits.Int(desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d") iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d") translationscale = traits.Float(desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used is 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", argstr="--translationscale %f") initialtransform = File(desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, argstr="--initialtransform %s") FixedImageFileName = File(position=-2, desc="Fixed image to which to register", exists=True, argstr="%s") MovingImageFileName = File(position=-1, desc="Moving image", exists=True, argstr="%s") outputtransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s") resampledmovingfilename = traits.Either(traits.Bool, File(), hash_files=False, desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s") class AffineRegistrationOutputSpec(TraitedSpec): outputtransform = File(desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) resampledmovingfilename = File(desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) class AffineRegistration(SEMLikeCommandLine): """title: Affine Registration category: Legacy.Registration description: Registers two images together using an affine transform and mutual information. This module is often used to align images of different subjects or images of the same subject from different modalities. This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/AffineRegistration contributor: Daniel Blezek (GE) acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = AffineRegistrationInputSpec output_spec = AffineRegistrationOutputSpec _cmd = "AffineRegistration " _outputs_filenames = {'resampledmovingfilename':'resampledmovingfilename.nii','outputtransform':'outputtransform.txt'} class MultiResolutionAffineRegistrationInputSpec(CommandLineInputSpec): fixedImage = File(position=-2, desc="Image which defines the space into which the moving image is registered", exists=True, argstr="%s") movingImage = File(position=-1, desc="The transform goes from the fixed image's space into the moving image's space", exists=True, argstr="%s") resampledImage = traits.Either(traits.Bool, File(), hash_files=False, desc="Registration results", argstr="--resampledImage %s") saveTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Save the output transform from the registration", argstr="--saveTransform %s") fixedImageMask = File(desc="Label image which defines a mask of interest for the fixed image", exists=True, argstr="--fixedImageMask %s") fixedImageROI = traits.List(desc="Label image which defines a ROI of interest for the fixed image", argstr="--fixedImageROI %s") numIterations = traits.Int(desc="Number of iterations to run at each resolution level.", argstr="--numIterations %d") numLineIterations = traits.Int(desc="Number of iterations to run at each resolution level.", argstr="--numLineIterations %d") stepSize = traits.Float(desc="The maximum step size of the optimizer in voxels", argstr="--stepSize %f") stepTolerance = traits.Float(desc="The maximum step size of the optimizer in voxels", argstr="--stepTolerance %f") metricTolerance = traits.Float(argstr="--metricTolerance %f") class MultiResolutionAffineRegistrationOutputSpec(TraitedSpec): resampledImage = File(desc="Registration results", exists=True) saveTransform = File(desc="Save the output transform from the registration", exists=True) class MultiResolutionAffineRegistration(SEMLikeCommandLine): """title: Robust Multiresolution Affine Registration category: Legacy.Registration description: Provides affine registration using multiple resolution levels and decomposed affine transforms. version: 0.1.0.$Revision: 2104 $(alpha) documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MultiResolutionAffineRegistration contributor: Casey B Goodlett (Utah) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = MultiResolutionAffineRegistrationInputSpec output_spec = MultiResolutionAffineRegistrationOutputSpec _cmd = "MultiResolutionAffineRegistration " _outputs_filenames = {'resampledImage':'resampledImage.nii','saveTransform':'saveTransform.txt'} class RigidRegistrationInputSpec(CommandLineInputSpec): fixedsmoothingfactor = traits.Int(desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--fixedsmoothingfactor %d") movingsmoothingfactor = traits.Int(desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--movingsmoothingfactor %d") testingmode = traits.Bool(desc="Enable testing mode. Input transform will be used to construct floating image. The floating image will be ignored if passed.", argstr="--testingmode ") histogrambins = traits.Int(desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d") spatialsamples = traits.Int(desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d") iterations = InputMultiPath(traits.Int, desc="Comma separated list of iterations. Must have the same number of elements as the learning rate.", sep=",", argstr="--iterations %s") learningrate = InputMultiPath(traits.Float, desc="Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", sep=",", argstr="--learningrate %s") translationscale = traits.Float(desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", argstr="--translationscale %f") initialtransform = File(desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, argstr="--initialtransform %s") FixedImageFileName = File(position=-2, desc="Fixed image to which to register", exists=True, argstr="%s") MovingImageFileName = File(position=-1, desc="Moving image", exists=True, argstr="%s") outputtransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s") resampledmovingfilename = traits.Either(traits.Bool, File(), hash_files=False, desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s") class RigidRegistrationOutputSpec(TraitedSpec): outputtransform = File(desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) resampledmovingfilename = File(desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) class RigidRegistration(SEMLikeCommandLine): """title: Rigid Registration category: Legacy.Registration description: Registers two images together using a rigid transform and mutual information. This module was originally distributed as "Linear registration" but has been renamed to eliminate confusion with the "Affine registration" module. This module is often used to align images of different subjects or images of the same subject from different modalities. This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RigidRegistration contributor: Daniel Blezek (GE) acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = RigidRegistrationInputSpec output_spec = RigidRegistrationOutputSpec _cmd = "RigidRegistration " _outputs_filenames = {'resampledmovingfilename':'resampledmovingfilename.nii','outputtransform':'outputtransform.txt'} class LinearRegistrationInputSpec(CommandLineInputSpec): fixedsmoothingfactor = traits.Int(desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--fixedsmoothingfactor %d") movingsmoothingfactor = traits.Int(desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--movingsmoothingfactor %d") histogrambins = traits.Int(desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d") spatialsamples = traits.Int(desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d") iterations = InputMultiPath(traits.Int, desc="Comma separated list of iterations. Must have the same number of elements as the learning rate.", sep=",", argstr="--iterations %s") learningrate = InputMultiPath(traits.Float, desc="Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", sep=",", argstr="--learningrate %s") translationscale = traits.Float(desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", argstr="--translationscale %f") initialtransform = File(desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, argstr="--initialtransform %s") FixedImageFileName = File(position=-2, desc="Fixed image to which to register", exists=True, argstr="%s") MovingImageFileName = File(position=-1, desc="Moving image", exists=True, argstr="%s") outputtransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s") resampledmovingfilename = traits.Either(traits.Bool, File(), hash_files=False, desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s") class LinearRegistrationOutputSpec(TraitedSpec): outputtransform = File(desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) resampledmovingfilename = File(desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True) class LinearRegistration(SEMLikeCommandLine): """title: Linear Registration category: Legacy.Registration description: Registers two images together using a rigid transform and mutual information. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LinearRegistration contributor: Daniel Blezek (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = LinearRegistrationInputSpec output_spec = LinearRegistrationOutputSpec _cmd = "LinearRegistration " _outputs_filenames = {'resampledmovingfilename':'resampledmovingfilename.nii','outputtransform':'outputtransform.txt'} class ExpertAutomatedRegistrationInputSpec(CommandLineInputSpec): fixedImage = File(position=-2, desc="Image which defines the space into which the moving image is registered", exists=True, argstr="%s") movingImage = File(position=-1, desc="The transform goes from the fixed image's space into the moving image's space", exists=True, argstr="%s") resampledImage = traits.Either(traits.Bool, File(), hash_files=False, desc="Registration results", argstr="--resampledImage %s") loadTransform = File(desc="Load a transform that is immediately applied to the moving image", exists=True, argstr="--loadTransform %s") saveTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Save the transform that results from registration", argstr="--saveTransform %s") initialization = traits.Enum("None", "Landmarks", "ImageCenters", "CentersOfMass", "SecondMoments", desc="Method to prime the registration process", argstr="--initialization %s") registration = traits.Enum("None", "Initial", "Rigid", "Affine", "BSpline", "PipelineRigid", "PipelineAffine", "PipelineBSpline", desc="Method for the registration process", argstr="--registration %s") metric = traits.Enum("MattesMI", "NormCorr", "MeanSqrd", desc="Method to quantify image match", argstr="--metric %s") expectedOffset = traits.Float(desc="Expected misalignment after initialization", argstr="--expectedOffset %f") expectedRotation = traits.Float(desc="Expected misalignment after initialization", argstr="--expectedRotation %f") expectedScale = traits.Float(desc="Expected misalignment after initialization", argstr="--expectedScale %f") expectedSkew = traits.Float(desc="Expected misalignment after initialization", argstr="--expectedSkew %f") verbosityLevel = traits.Enum("Silent", "Standard", "Verbose", desc="Level of detail of reporting progress", argstr="--verbosityLevel %s") sampleFromOverlap = traits.Bool(desc="Limit metric evaluation to the fixed image region overlapped by the moving image", argstr="--sampleFromOverlap ") fixedImageMask = File(desc="Image which defines a mask for the fixed image", exists=True, argstr="--fixedImageMask %s") randomNumberSeed = traits.Int(desc="Seed to generate a consistent random number sequence", argstr="--randomNumberSeed %d") numberOfThreads = traits.Int(desc="Number of CPU threads to use", argstr="--numberOfThreads %d") minimizeMemory = traits.Bool(desc="Reduce the amount of memory required at the cost of increased computation time", argstr="--minimizeMemory ") interpolation = traits.Enum("NearestNeighbor", "Linear", "BSpline", desc="Method for interpolation within the optimization process", argstr="--interpolation %s") fixedLandmarks = InputMultiPath(traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the fixed image", argstr="--fixedLandmarks %s...") movingLandmarks = InputMultiPath(traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the moving image", argstr="--movingLandmarks %s...") rigidMaxIterations = traits.Int(desc="Maximum number of rigid optimization iterations", argstr="--rigidMaxIterations %d") rigidSamplingRatio = traits.Float(desc="Portion of the image to use in computing the metric during rigid registration", argstr="--rigidSamplingRatio %f") affineMaxIterations = traits.Int(desc="Maximum number of affine optimization iterations", argstr="--affineMaxIterations %d") affineSamplingRatio = traits.Float(desc="Portion of the image to use in computing the metric during affine registration", argstr="--affineSamplingRatio %f") bsplineMaxIterations = traits.Int(desc="Maximum number of bspline optimization iterations", argstr="--bsplineMaxIterations %d") bsplineSamplingRatio = traits.Float(desc="Portion of the image to use in computing the metric during BSpline registration", argstr="--bsplineSamplingRatio %f") controlPointSpacing = traits.Int(desc="Number of pixels between control points", argstr="--controlPointSpacing %d") class ExpertAutomatedRegistrationOutputSpec(TraitedSpec): resampledImage = File(desc="Registration results", exists=True) saveTransform = File(desc="Save the transform that results from registration", exists=True) class ExpertAutomatedRegistration(SEMLikeCommandLine): """title: Expert Automated Registration category: Legacy.Registration description: Provides rigid, affine, and BSpline registration methods via a simple GUI version: 0.1.0.$Revision: 2104 $(alpha) documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExpertAutomatedRegistration contributor: Stephen R Aylward (Kitware), Casey B Goodlett (Kitware) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = ExpertAutomatedRegistrationInputSpec output_spec = ExpertAutomatedRegistrationOutputSpec _cmd = "ExpertAutomatedRegistration " _outputs_filenames = {'resampledImage':'resampledImage.nii','saveTransform':'saveTransform.txt'} nipype-0.9.2/nipype/interfaces/slicer/legacy/segmentation.py000066400000000000000000000056321227300005300242470ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class OtsuThresholdSegmentationInputSpec(CommandLineInputSpec): brightObjects = traits.Bool(desc="Segmenting bright objects on a dark background or dark objects on a bright background.", argstr="--brightObjects ") numberOfBins = traits.Int(desc="This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", argstr="--numberOfBins %d") faceConnected = traits.Bool(desc="This is an advanced parameter. Adjacent voxels are face connected. This affects the connected component algorithm. If this parameter is false, more regions are likely to be identified.", argstr="--faceConnected ") minimumObjectSize = traits.Int(desc="Minimum size of object to retain. This parameter can be used to get rid of small regions in noisy images.", argstr="--minimumObjectSize %d") inputVolume = File(position=-2, desc="Input volume to be segmented", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class OtsuThresholdSegmentationOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class OtsuThresholdSegmentation(SEMLikeCommandLine): """title: Otsu Threshold Segmentation category: Legacy.Segmentation description: This filter creates a labeled image from a grayscale image. First, it calculates an optimal threshold that separates the image into foreground and background. This threshold separates those two classes so that their intra-class variance is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter runs a connected component algorithm to generate unique labels for each connected region of the foreground. Finally, the resulting image is relabeled to provide consecutive numbering. version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdSegmentation contributor: Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = OtsuThresholdSegmentationInputSpec output_spec = OtsuThresholdSegmentationOutputSpec _cmd = "OtsuThresholdSegmentation " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/legacy/setup.py000066400000000000000000000007171227300005300227110ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('legacy', parent_package, top_path) config.add_data_dir('diffusion') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/000077500000000000000000000000001227300005300223345ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py000066400000000000000000000033751227300005300305700ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.registration import AffineRegistration def test_AffineRegistration_inputs(): input_map = dict(FixedImageFileName=dict(argstr='%s', position=-2, ), MovingImageFileName=dict(argstr='%s', position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), histogrambins=dict(argstr='--histogrambins %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', ), iterations=dict(argstr='--iterations %d', ), movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), outputtransform=dict(argstr='--outputtransform %s', hash_files=False, ), resampledmovingfilename=dict(argstr='--resampledmovingfilename %s', hash_files=False, ), spatialsamples=dict(argstr='--spatialsamples %d', ), terminal_output=dict(mandatory=True, nohash=True, ), translationscale=dict(argstr='--translationscale %f', ), ) inputs = AffineRegistration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_AffineRegistration_outputs(): output_map = dict(outputtransform=dict(), resampledmovingfilename=dict(), ) outputs = AffineRegistration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py000066400000000000000000000036271227300005300327150ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.registration import BSplineDeformableRegistration def test_BSplineDeformableRegistration_inputs(): input_map = dict(FixedImageFileName=dict(argstr='%s', position=-2, ), MovingImageFileName=dict(argstr='%s', position=-1, ), args=dict(argstr='%s', ), constrain=dict(argstr='--constrain ', ), default=dict(argstr='--default %d', ), environ=dict(nohash=True, usedefault=True, ), gridSize=dict(argstr='--gridSize %d', ), histogrambins=dict(argstr='--histogrambins %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', ), iterations=dict(argstr='--iterations %d', ), maximumDeformation=dict(argstr='--maximumDeformation %f', ), outputtransform=dict(argstr='--outputtransform %s', hash_files=False, ), outputwarp=dict(argstr='--outputwarp %s', hash_files=False, ), resampledmovingfilename=dict(argstr='--resampledmovingfilename %s', hash_files=False, ), spatialsamples=dict(argstr='--spatialsamples %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = BSplineDeformableRegistration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BSplineDeformableRegistration_outputs(): output_map = dict(outputtransform=dict(), outputwarp=dict(), resampledmovingfilename=dict(), ) outputs = BSplineDeformableRegistration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py000066400000000000000000000022221227300005300317660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.converters import BSplineToDeformationField def test_BSplineToDeformationField_inputs(): input_map = dict(args=dict(argstr='%s', ), defImage=dict(argstr='--defImage %s', hash_files=False, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), refImage=dict(argstr='--refImage %s', ), terminal_output=dict(mandatory=True, nohash=True, ), tfm=dict(argstr='--tfm %s', ), ) inputs = BSplineToDeformationField.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BSplineToDeformationField_outputs(): output_map = dict(defImage=dict(), ) outputs = BSplineToDeformationField.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py000066400000000000000000000054521227300005300325110ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.registration import ExpertAutomatedRegistration def test_ExpertAutomatedRegistration_inputs(): input_map = dict(affineMaxIterations=dict(argstr='--affineMaxIterations %d', ), affineSamplingRatio=dict(argstr='--affineSamplingRatio %f', ), args=dict(argstr='%s', ), bsplineMaxIterations=dict(argstr='--bsplineMaxIterations %d', ), bsplineSamplingRatio=dict(argstr='--bsplineSamplingRatio %f', ), controlPointSpacing=dict(argstr='--controlPointSpacing %d', ), environ=dict(nohash=True, usedefault=True, ), expectedOffset=dict(argstr='--expectedOffset %f', ), expectedRotation=dict(argstr='--expectedRotation %f', ), expectedScale=dict(argstr='--expectedScale %f', ), expectedSkew=dict(argstr='--expectedSkew %f', ), fixedImage=dict(argstr='%s', position=-2, ), fixedImageMask=dict(argstr='--fixedImageMask %s', ), fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), ignore_exception=dict(nohash=True, usedefault=True, ), initialization=dict(argstr='--initialization %s', ), interpolation=dict(argstr='--interpolation %s', ), loadTransform=dict(argstr='--loadTransform %s', ), metric=dict(argstr='--metric %s', ), minimizeMemory=dict(argstr='--minimizeMemory ', ), movingImage=dict(argstr='%s', position=-1, ), movingLandmarks=dict(argstr='--movingLandmarks %s...', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), randomNumberSeed=dict(argstr='--randomNumberSeed %d', ), registration=dict(argstr='--registration %s', ), resampledImage=dict(argstr='--resampledImage %s', hash_files=False, ), rigidMaxIterations=dict(argstr='--rigidMaxIterations %d', ), rigidSamplingRatio=dict(argstr='--rigidSamplingRatio %f', ), sampleFromOverlap=dict(argstr='--sampleFromOverlap ', ), saveTransform=dict(argstr='--saveTransform %s', hash_files=False, ), terminal_output=dict(mandatory=True, nohash=True, ), verbosityLevel=dict(argstr='--verbosityLevel %s', ), ) inputs = ExpertAutomatedRegistration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ExpertAutomatedRegistration_outputs(): output_map = dict(resampledImage=dict(), saveTransform=dict(), ) outputs = ExpertAutomatedRegistration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py000066400000000000000000000035201227300005300306020ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.registration import LinearRegistration def test_LinearRegistration_inputs(): input_map = dict(FixedImageFileName=dict(argstr='%s', position=-2, ), MovingImageFileName=dict(argstr='%s', position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), histogrambins=dict(argstr='--histogrambins %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', ), iterations=dict(argstr='--iterations %s', sep=',', ), learningrate=dict(argstr='--learningrate %s', sep=',', ), movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), outputtransform=dict(argstr='--outputtransform %s', hash_files=False, ), resampledmovingfilename=dict(argstr='--resampledmovingfilename %s', hash_files=False, ), spatialsamples=dict(argstr='--spatialsamples %d', ), terminal_output=dict(mandatory=True, nohash=True, ), translationscale=dict(argstr='--translationscale %f', ), ) inputs = LinearRegistration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_LinearRegistration_outputs(): output_map = dict(outputtransform=dict(), resampledmovingfilename=dict(), ) outputs = LinearRegistration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py000066400000000000000000000033651227300005300336660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.registration import MultiResolutionAffineRegistration def test_MultiResolutionAffineRegistration_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fixedImage=dict(argstr='%s', position=-2, ), fixedImageMask=dict(argstr='--fixedImageMask %s', ), fixedImageROI=dict(argstr='--fixedImageROI %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), metricTolerance=dict(argstr='--metricTolerance %f', ), movingImage=dict(argstr='%s', position=-1, ), numIterations=dict(argstr='--numIterations %d', ), numLineIterations=dict(argstr='--numLineIterations %d', ), resampledImage=dict(argstr='--resampledImage %s', hash_files=False, ), saveTransform=dict(argstr='--saveTransform %s', hash_files=False, ), stepSize=dict(argstr='--stepSize %f', ), stepTolerance=dict(argstr='--stepTolerance %f', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MultiResolutionAffineRegistration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MultiResolutionAffineRegistration_outputs(): output_map = dict(resampledImage=dict(), saveTransform=dict(), ) outputs = MultiResolutionAffineRegistration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py000066400000000000000000000024661227300005300317250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.filtering import OtsuThresholdImageFilter def test_OtsuThresholdImageFilter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), insideValue=dict(argstr='--insideValue %d', ), numberOfBins=dict(argstr='--numberOfBins %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), outsideValue=dict(argstr='--outsideValue %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = OtsuThresholdImageFilter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_OtsuThresholdImageFilter_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py000066400000000000000000000026031227300005300321630ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.segmentation import OtsuThresholdSegmentation def test_OtsuThresholdSegmentation_inputs(): input_map = dict(args=dict(argstr='%s', ), brightObjects=dict(argstr='--brightObjects ', ), environ=dict(nohash=True, usedefault=True, ), faceConnected=dict(argstr='--faceConnected ', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), minimumObjectSize=dict(argstr='--minimumObjectSize %d', ), numberOfBins=dict(argstr='--numberOfBins %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = OtsuThresholdSegmentation.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_OtsuThresholdSegmentation_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py000066400000000000000000000023601227300005300310640ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.filtering import ResampleScalarVolume def test_ResampleScalarVolume_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-2, ), OutputVolume=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), interpolation=dict(argstr='--interpolation %s', ), spacing=dict(argstr='--spacing %s', sep=',', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ResampleScalarVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ResampleScalarVolume_outputs(): output_map = dict(OutputVolume=dict(position=-1, ), ) outputs = ResampleScalarVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py000066400000000000000000000036001227300005300304250ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.legacy.registration import RigidRegistration def test_RigidRegistration_inputs(): input_map = dict(FixedImageFileName=dict(argstr='%s', position=-2, ), MovingImageFileName=dict(argstr='%s', position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), histogrambins=dict(argstr='--histogrambins %d', ), ignore_exception=dict(nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', ), iterations=dict(argstr='--iterations %s', sep=',', ), learningrate=dict(argstr='--learningrate %s', sep=',', ), movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), outputtransform=dict(argstr='--outputtransform %s', hash_files=False, ), resampledmovingfilename=dict(argstr='--resampledmovingfilename %s', hash_files=False, ), spatialsamples=dict(argstr='--spatialsamples %d', ), terminal_output=dict(mandatory=True, nohash=True, ), testingmode=dict(argstr='--testingmode ', ), translationscale=dict(argstr='--translationscale %f', ), ) inputs = RigidRegistration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_RigidRegistration_outputs(): output_map = dict(outputtransform=dict(), resampledmovingfilename=dict(), ) outputs = RigidRegistration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/quantification/000077500000000000000000000000001227300005300227445ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/quantification/__init__.py000066400000000000000000000002131227300005300250510ustar00rootroot00000000000000from changequantification import IntensityDifferenceMetric from petstandarduptakevaluecomputation import PETStandardUptakeValueComputation nipype-0.9.2/nipype/interfaces/slicer/quantification/changequantification.py000066400000000000000000000044141227300005300275050ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class IntensityDifferenceMetricInputSpec(CommandLineInputSpec): sensitivityThreshold = traits.Float(desc="This parameter should be between 0 and 1, and defines how sensitive the metric should be to the intensity changes.", argstr="--sensitivityThreshold %f") changingBandSize = traits.Int(desc="How far (in mm) from the boundary of the segmentation should the intensity changes be considered.", argstr="--changingBandSize %d") baselineVolume = File(position=-4, desc="Baseline volume to be compared to", exists=True, argstr="%s") baselineSegmentationVolume = File(position=-3, desc="Label volume that contains segmentation of the structure of interest in the baseline volume.", exists=True, argstr="%s") followupVolume = File(position=-2, desc="Followup volume to be compare to the baseline", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output volume to keep the results of change quantification.", argstr="%s") reportFileName = traits.Either(traits.Bool, File(), hash_files=False, desc="Report file name", argstr="--reportFileName %s") class IntensityDifferenceMetricOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output volume to keep the results of change quantification.", exists=True) reportFileName = File(desc="Report file name", exists=True) class IntensityDifferenceMetric(SEMLikeCommandLine): """title: Intensity Difference Change Detection (FAST) category: Quantification.ChangeQuantification description: Quantifies the changes between two spatially aligned images based on the pixel-wise difference of image intensities. version: 0.1 contributor: Andrey Fedorov acknowledgements: """ input_spec = IntensityDifferenceMetricInputSpec output_spec = IntensityDifferenceMetricOutputSpec _cmd = "IntensityDifferenceMetric " _outputs_filenames = {'outputVolume':'outputVolume.nii','reportFileName':'reportFileName'} nipype-0.9.2/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py000066400000000000000000000061501227300005300323630ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class PETStandardUptakeValueComputationInputSpec(CommandLineInputSpec): petDICOMPath = Directory(desc="Input path to a directory containing a PET volume containing DICOM header information for SUV computation", exists=True, argstr="--petDICOMPath %s") petVolume = File(desc="Input PET volume for SUVbw computation (must be the same volume as pointed to by the DICOM path!).", exists=True, argstr="--petVolume %s") labelMap = File(desc="Input label volume containing the volumes of interest", exists=True, argstr="--labelMap %s") color = File(desc="Color table to to map labels to colors and names", exists=True, argstr="--color %s") csvFile = traits.Either(traits.Bool, File(), hash_files=False, desc="A file holding the output SUV values in comma separated lines, one per label. Optional.", argstr="--csvFile %s") OutputLabel = traits.Str(desc="List of labels for which SUV values were computed", argstr="--OutputLabel %s") OutputLabelValue = traits.Str(desc="List of label values for which SUV values were computed", argstr="--OutputLabelValue %s") SUVMax = traits.Str(desc="SUV max for each label", argstr="--SUVMax %s") SUVMean = traits.Str(desc="SUV mean for each label", argstr="--SUVMean %s") SUVMin = traits.Str(desc="SUV minimum for each label", argstr="--SUVMin %s") class PETStandardUptakeValueComputationOutputSpec(TraitedSpec): csvFile = File(desc="A file holding the output SUV values in comma separated lines, one per label. Optional.", exists=True) class PETStandardUptakeValueComputation(SEMLikeCommandLine): """title: PET Standard Uptake Value Computation category: Quantification description: Computes the standardized uptake value based on body weight. Takes an input PET image in DICOM and NRRD format (DICOM header must contain Radiopharmaceutical parameters). Produces a CSV file that contains patientID, studyDate, dose, labelID, suvmin, suvmax, suvmean, labelName for each volume of interest. It also displays some of the information as output strings in the GUI, the CSV file is optional in that case. The CSV file is appended to on each execution of the CLI. version: 0.1.0.$Revision: 8595 $(alpha) documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ComputeSUVBodyWeight contributor: Wendy Plesniak (SPL, BWH), Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) acknowledgements: This work is funded by the Harvard Catalyst, and the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = PETStandardUptakeValueComputationInputSpec output_spec = PETStandardUptakeValueComputationOutputSpec _cmd = "PETStandardUptakeValueComputation " _outputs_filenames = {'csvFile':'csvFile.csv'} nipype-0.9.2/nipype/interfaces/slicer/quantification/tests/000077500000000000000000000000001227300005300241065ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py000066400000000000000000000030461227300005300336570ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.quantification.changequantification import IntensityDifferenceMetric def test_IntensityDifferenceMetric_inputs(): input_map = dict(args=dict(argstr='%s', ), baselineSegmentationVolume=dict(argstr='%s', position=-3, ), baselineVolume=dict(argstr='%s', position=-4, ), changingBandSize=dict(argstr='--changingBandSize %d', ), environ=dict(nohash=True, usedefault=True, ), followupVolume=dict(argstr='%s', position=-2, ), ignore_exception=dict(nohash=True, usedefault=True, ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), reportFileName=dict(argstr='--reportFileName %s', hash_files=False, ), sensitivityThreshold=dict(argstr='--sensitivityThreshold %f', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = IntensityDifferenceMetric.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_IntensityDifferenceMetric_outputs(): output_map = dict(outputVolume=dict(position=-1, ), reportFileName=dict(), ) outputs = IntensityDifferenceMetric.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value test_auto_PETStandardUptakeValueComputation.py000066400000000000000000000031071227300005300351740ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/quantification/tests# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.quantification.petstandarduptakevaluecomputation import PETStandardUptakeValueComputation def test_PETStandardUptakeValueComputation_inputs(): input_map = dict(OutputLabel=dict(argstr='--OutputLabel %s', ), OutputLabelValue=dict(argstr='--OutputLabelValue %s', ), SUVMax=dict(argstr='--SUVMax %s', ), SUVMean=dict(argstr='--SUVMean %s', ), SUVMin=dict(argstr='--SUVMin %s', ), args=dict(argstr='%s', ), color=dict(argstr='--color %s', ), csvFile=dict(argstr='--csvFile %s', hash_files=False, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), labelMap=dict(argstr='--labelMap %s', ), petDICOMPath=dict(argstr='--petDICOMPath %s', ), petVolume=dict(argstr='--petVolume %s', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = PETStandardUptakeValueComputation.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PETStandardUptakeValueComputation_outputs(): output_map = dict(csvFile=dict(), ) outputs = PETStandardUptakeValueComputation.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/registration/000077500000000000000000000000001227300005300224405ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/registration/__init__.py000066400000000000000000000002511227300005300245470ustar00rootroot00000000000000from specialized import ACPCTransform, FiducialRegistration, VBRAINSDemonWarp, BRAINSDemonWarp from brainsresample import BRAINSResample from brainsfit import BRAINSFit nipype-0.9.2/nipype/interfaces/slicer/registration/brainsfit.py000066400000000000000000000446241227300005300250050ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class BRAINSFitInputSpec(CommandLineInputSpec): fixedVolume = File(desc="The fixed image for registration by mutual information optimization.", exists=True, argstr="--fixedVolume %s") movingVolume = File(desc="The moving image for registration by mutual information optimization.", exists=True, argstr="--movingVolume %s") bsplineTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", argstr="--bsplineTransform %s") linearTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", argstr="--linearTransform %s") outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", argstr="--outputVolume %s") initialTransform = File(desc="Filename of transform used to initialize the registration. This CAN NOT be used with either CenterOfHeadLAlign, MomentsAlign, GeometryAlign, or initialTransform file.", exists=True, argstr="--initialTransform %s") initializeTransformMode = traits.Enum("Off", "useMomentsAlign", "useCenterOfHeadAlign", "useGeometryAlign", "useCenterOfROIAlign", desc="Determine how to initialize the transform center. GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. MomentsAlign assumes that the center of mass of the images represent similar structures. useCenterOfHeadAlign attempts to use the top of head and shape of neck to drive a center of mass estimate. Off assumes that the physical space of the images are close, and that centering in terms of the image Origins is a good starting point. This flag is mutually exclusive with the initialTransform flag.", argstr="--initializeTransformMode %s") useRigid = traits.Bool(desc="Perform a rigid registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", argstr="--useRigid ") useScaleVersor3D = traits.Bool(desc="Perform a ScaleVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", argstr="--useScaleVersor3D ") useScaleSkewVersor3D = traits.Bool(desc="Perform a ScaleSkewVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", argstr="--useScaleSkewVersor3D ") useAffine = traits.Bool(desc="Perform an Affine registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", argstr="--useAffine ") useBSpline = traits.Bool(desc="Perform a BSpline registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", argstr="--useBSpline ") numberOfSamples = traits.Int(desc="The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation.", argstr="--numberOfSamples %d") splineGridSize = InputMultiPath(traits.Int, desc="The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", sep=",", argstr="--splineGridSize %s") numberOfIterations = InputMultiPath(traits.Int, desc="The maximum number of iterations to try before failing to converge. Use an explicit limit like 500 or 1000 to manage risk of divergence", sep=",", argstr="--numberOfIterations %s") maskProcessingMode = traits.Enum("NOMASK", "ROIAUTO", "ROI", desc="What mode to use for using the masks. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. The Region Of Interest mode (choose ROI) uses the masks to define what parts of the image should be used for computing the transform.", argstr="--maskProcessingMode %s") fixedBinaryVolume = File(desc="Fixed Image binary mask volume, ONLY FOR MANUAL ROI mode.", exists=True, argstr="--fixedBinaryVolume %s") movingBinaryVolume = File(desc="Moving Image binary mask volume, ONLY FOR MANUAL ROI mode.", exists=True, argstr="--movingBinaryVolume %s") outputFixedVolumeROI = traits.Either(traits.Bool, File(), hash_files=False, desc="The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", argstr="--outputFixedVolumeROI %s") outputMovingVolumeROI = traits.Either(traits.Bool, File(), hash_files=False, desc="The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", argstr="--outputMovingVolumeROI %s") outputVolumePixelType = traits.Enum("float", "short", "ushort", "int", "uint", "uchar", desc="The output image Pixel Type is the scalar datatype for representation of the Output Volume.", argstr="--outputVolumePixelType %s") backgroundFillValue = traits.Float(desc="Background fill value for output image.", argstr="--backgroundFillValue %f") maskInferiorCutOffFromCenter = traits.Float(desc="For use with --useCenterOfHeadAlign (and --maskProcessingMode ROIAUTO): the cut-off below the image centers, in millimeters, ", argstr="--maskInferiorCutOffFromCenter %f") scaleOutputValues = traits.Bool(desc="If true, and the voxel values do not fit within the minimum and maximum values of the desired outputVolumePixelType, then linearly scale the min/max output image voxel values to fit within the min/max range of the outputVolumePixelType.", argstr="--scaleOutputValues ") interpolationMode = traits.Enum("NearestNeighbor", "Linear", "ResampleInPlace", "BSpline", "WindowedSinc", "Hamming", "Cosine", "Welch", "Lanczos", "Blackman", desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", argstr="--interpolationMode %s") minimumStepLength = InputMultiPath(traits.Float, desc="Each step in the optimization takes steps at least this big. When none are possible, registration is complete.", sep=",", argstr="--minimumStepLength %s") translationScale = traits.Float(desc="How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the search pattern.", argstr="--translationScale %f") reproportionScale = traits.Float(desc="ScaleVersor3D 'Scale' compensation factor. Increase this to put more rescaling in a ScaleVersor3D or ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", argstr="--reproportionScale %f") skewScale = traits.Float(desc="ScaleSkewVersor3D Skew compensation factor. Increase this to put more skew in a ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", argstr="--skewScale %f") maxBSplineDisplacement = traits.Float(desc=" Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", argstr="--maxBSplineDisplacement %f") histogramMatch = traits.Bool(desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile. Do NOT use if registering images from different modailties.", argstr="--histogramMatch ") numberOfHistogramBins = traits.Int(desc="The number of histogram levels", argstr="--numberOfHistogramBins %d") numberOfMatchPoints = traits.Int(desc="the number of match points", argstr="--numberOfMatchPoints %d") strippedOutputTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", argstr="--strippedOutputTransform %s") transformType = InputMultiPath(traits.Str, desc="Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, and BSpline. Specifiying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", sep=",", argstr="--transformType %s") outputTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", argstr="--outputTransform %s") fixedVolumeTimeIndex = traits.Int(desc="The index in the time series for the 3D fixed image to fit, if 4-dimensional.", argstr="--fixedVolumeTimeIndex %d") movingVolumeTimeIndex = traits.Int(desc="The index in the time series for the 3D moving image to fit, if 4-dimensional.", argstr="--movingVolumeTimeIndex %d") medianFilterSize = InputMultiPath(traits.Int, desc="The radius for the optional MedianImageFilter preprocessing in all 3 directions.", sep=",", argstr="--medianFilterSize %s") removeIntensityOutliers = traits.Float(desc="The half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the moduel will throw away 0.005 % of both tails, so 0.01% of intensities in total would be ignored in its statistic calculation. ", argstr="--removeIntensityOutliers %f") useCachingOfBSplineWeightsMode = traits.Enum("ON", "OFF", desc="This is a 5x speed advantage at the expense of requiring much more memory. Only relevant when transformType is BSpline.", argstr="--useCachingOfBSplineWeightsMode %s") useExplicitPDFDerivativesMode = traits.Enum("AUTO", "ON", "OFF", desc="Using mode AUTO means OFF for BSplineDeformableTransforms and ON for the linear transforms. The ON alternative uses more memory to sometimes do a better job.", argstr="--useExplicitPDFDerivativesMode %s") ROIAutoDilateSize = traits.Float(desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", argstr="--ROIAutoDilateSize %f") ROIAutoClosingSize = traits.Float(desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", argstr="--ROIAutoClosingSize %f") relaxationFactor = traits.Float(desc="Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", argstr="--relaxationFactor %f") maximumStepLength = traits.Float(desc="Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", argstr="--maximumStepLength %f") failureExitCode = traits.Int(desc="If the fit fails, exit with this status code. (It can be used to force a successfult exit status of (0) if the registration fails due to reaching the maximum number of iterations.", argstr="--failureExitCode %d") writeTransformOnFailure = traits.Bool(desc="Flag to save the final transform even if the numberOfIterations are reached without convergence. (Intended for use when --failureExitCode 0 )", argstr="--writeTransformOnFailure ") numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use. (default is auto-detected)", argstr="--numberOfThreads %d") forceMINumberOfThreads = traits.Int(desc="Force the the maximum number of threads to use for non thread safe MI metric. CAUTION: Inconsistent results my arise!", argstr="--forceMINumberOfThreads %d") debugLevel = traits.Int(desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", argstr="--debugLevel %d") costFunctionConvergenceFactor = traits.Float(desc=" From itkLBFGSBOptimizer.h: Set/Get the CostFunctionConvergenceFactor. Algorithm terminates when the reduction in cost function is less than (factor * epsmcj) where epsmch is the machine precision. Typical values for factor: 1e+12 for low accuracy; 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy. 1e+9 seems to work well., ", argstr="--costFunctionConvergenceFactor %f") projectedGradientTolerance = traits.Float(desc=" From itkLBFGSBOptimizer.h: Set/Get the ProjectedGradientTolerance. Algorithm terminates when the project gradient is below the tolerance. Default lbfgsb value is 1e-5, but 1e-4 seems to work well., ", argstr="--projectedGradientTolerance %f") gui = traits.Bool(desc="Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", argstr="--gui ") promptUser = traits.Bool(desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", argstr="--promptUser ") NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 = traits.Bool(desc="DO NOT USE THIS FLAG", argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ") NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 = traits.Bool(desc="DO NOT USE THIS FLAG", argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ") NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 = traits.Bool(desc="DO NOT USE THIS FLAG", argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ") permitParameterVariation = InputMultiPath(traits.Int, desc="A bit vector to permit linear transform parameters to vary under optimization. The vector order corresponds with transform parameters, and beyond the end ones fill in as a default. For instance, you can choose to rotate only in x (pitch) with 1,0,0; this is mostly for expert use in turning on and off individual degrees of freedom in rotation, translation or scaling without multiplying the number of transform representations; this trick is probably meaningless when tried with the general affine transform.", sep=",", argstr="--permitParameterVariation %s") costMetric = traits.Enum("MMI", "MSE", "NC", "MC", desc="The cost metric to be used during fitting. Defaults to MMI. Options are MMI (Mattes Mutual Information), MSE (Mean Square Error), NC (Normalized Correlation), MC (Match Cardinality for binary images)", argstr="--costMetric %s") class BRAINSFitOutputSpec(TraitedSpec): bsplineTransform = File(desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", exists=True) linearTransform = File(desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", exists=True) outputVolume = File(desc="(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", exists=True) outputFixedVolumeROI = File(desc="The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", exists=True) outputMovingVolumeROI = File(desc="The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", exists=True) strippedOutputTransform = File(desc="File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", exists=True) outputTransform = File(desc="(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", exists=True) class BRAINSFit(SEMLikeCommandLine): """title: General Registration (BRAINS) category: Registration description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 version: 3.0.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSFit license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5) 1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard """ input_spec = BRAINSFitInputSpec output_spec = BRAINSFitOutputSpec _cmd = "BRAINSFit " _outputs_filenames = {'outputVolume':'outputVolume.nii','bsplineTransform':'bsplineTransform.mat','outputTransform':'outputTransform.mat','outputFixedVolumeROI':'outputFixedVolumeROI.nii','strippedOutputTransform':'strippedOutputTransform.mat','outputMovingVolumeROI':'outputMovingVolumeROI.nii','linearTransform':'linearTransform.mat'} nipype-0.9.2/nipype/interfaces/slicer/registration/brainsresample.py000066400000000000000000000071421227300005300260250ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class BRAINSResampleInputSpec(CommandLineInputSpec): inputVolume = File(desc="Image To Warp", exists=True, argstr="--inputVolume %s") referenceVolume = File(desc="Reference image used only to define the output space. If not specified, the warping is done in the same space as the image to warp.", exists=True, argstr="--referenceVolume %s") outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Resulting deformed image", argstr="--outputVolume %s") pixelType = traits.Enum("float", "short", "ushort", "int", "uint", "uchar", "binary", desc="Specifies the pixel type for the input/output images. The \'binary\' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", argstr="--pixelType %s") deformationVolume = File(desc="Displacement Field to be used to warp the image", exists=True, argstr="--deformationVolume %s") warpTransform = File(desc="Filename for the BRAINSFit transform used in place of the deformation field", exists=True, argstr="--warpTransform %s") interpolationMode = traits.Enum("NearestNeighbor", "Linear", "ResampleInPlace", "BSpline", "WindowedSinc", "Hamming", "Cosine", "Welch", "Lanczos", "Blackman", desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", argstr="--interpolationMode %s") inverseTransform = traits.Bool(desc="True/False is to compute inverse of given transformation. Default is false", argstr="--inverseTransform ") defaultValue = traits.Float(desc="Default voxel value", argstr="--defaultValue %f") gridSpacing = InputMultiPath(traits.Int, desc="Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space ", sep=",", argstr="--gridSpacing %s") numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d") class BRAINSResampleOutputSpec(TraitedSpec): outputVolume = File(desc="Resulting deformed image", exists=True) class BRAINSResample(SEMLikeCommandLine): """title: Resample Image (BRAINS) category: Registration description: This program resamples an image image using a deformation field or a transform (BSpline, Affine, Rigid, etc.). version: 3.0.0 documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:BRAINSResample license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. """ input_spec = BRAINSResampleInputSpec output_spec = BRAINSResampleOutputSpec _cmd = "BRAINSResample " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/registration/specialized.py000066400000000000000000000534671227300005300253250ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class ACPCTransformInputSpec(CommandLineInputSpec): acpc = InputMultiPath(traits.List(traits.Float(), minlen=3, maxlen=3), desc="ACPC line, two fiducial points, one at the anterior commissure and one at the posterior commissure.", argstr="--acpc %s...") midline = InputMultiPath(traits.List(traits.Float(), minlen=3, maxlen=3), desc="The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane).", argstr="--midline %s...") outputTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="A transform filled in from the ACPC and Midline registration calculation", argstr="--outputTransform %s") debugSwitch = traits.Bool(desc="Click if wish to see debugging output", argstr="--debugSwitch ") class ACPCTransformOutputSpec(TraitedSpec): outputTransform = File(desc="A transform filled in from the ACPC and Midline registration calculation", exists=True) class ACPCTransform(SEMLikeCommandLine): """title: ACPC Transform category: Registration.Specialized description:

Calculate a transformation from two lists of fiducial points.

ACPC line is two fiducial points, one at the anterior commissure and one at the posterior commissure. The resulting transform will bring the line connecting them to horizontal to the AP axis.

The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane). The resulting transform will put the output volume with the mid sagittal plane lined up with the AS plane.

Use the Filtering moduleResample Scalar/Vector/DWI Volumeto apply the transformation to a volume.

version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ACPCTransform license: slicer3 contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = ACPCTransformInputSpec output_spec = ACPCTransformOutputSpec _cmd = "ACPCTransform " _outputs_filenames = {'outputTransform':'outputTransform.mat'} class FiducialRegistrationInputSpec(CommandLineInputSpec): fixedLandmarks = InputMultiPath(traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the fixed image", argstr="--fixedLandmarks %s...") movingLandmarks = InputMultiPath(traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the moving image", argstr="--movingLandmarks %s...") saveTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Save the transform that results from registration", argstr="--saveTransform %s") transformType = traits.Enum("Translation", "Rigid", "Similarity", desc="Type of transform to produce", argstr="--transformType %s") rms = traits.Float(desc="Display RMS Error.", argstr="--rms %f") outputMessage = traits.Str(desc="Provides more information on the output", argstr="--outputMessage %s") class FiducialRegistrationOutputSpec(TraitedSpec): saveTransform = File(desc="Save the transform that results from registration", exists=True) class FiducialRegistration(SEMLikeCommandLine): """title: Fiducial Registration category: Registration.Specialized description: Computes a rigid, similarity or affine transform from a matched list of fiducials version: 0.1.0.$Revision$ documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/TransformFromFiducials contributor: Casey B Goodlett (Kitware), Dominik Meier (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = FiducialRegistrationInputSpec output_spec = FiducialRegistrationOutputSpec _cmd = "FiducialRegistration " _outputs_filenames = {'saveTransform':'saveTransform.txt'} class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): movingVolume = InputMultiPath(File(exists=True), desc="Required: input moving image", argstr="--movingVolume %s...") fixedVolume = InputMultiPath(File(exists=True), desc="Required: input fixed (target) image", argstr="--fixedVolume %s...") inputPixelType = traits.Enum("float", "short", "ushort", "int", "uchar", desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar", argstr="--inputPixelType %s") outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", argstr="--outputVolume %s") outputDisplacementFieldVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", argstr="--outputDisplacementFieldVolume %s") outputPixelType = traits.Enum("float", "short", "ushort", "int", "uchar", desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar", argstr="--outputPixelType %s") interpolationMode = traits.Enum("NearestNeighbor", "Linear", "ResampleInPlace", "BSpline", "WindowedSinc", "Hamming", "Cosine", "Welch", "Lanczos", "Blackman", desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", argstr="--interpolationMode %s") registrationFilterType = traits.Enum("Demons", "FastSymmetricForces", "Diffeomorphic", "LogDemons", "SymmetricLogDemons", desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons", argstr="--registrationFilterType %s") smoothDisplacementFieldSigma = traits.Float(desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", argstr="--smoothDisplacementFieldSigma %f") numberOfPyramidLevels = traits.Int(desc="Number of image pyramid levels to use in the multi-resolution registration.", argstr="--numberOfPyramidLevels %d") minimumFixedPyramid = InputMultiPath(traits.Int, desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", argstr="--minimumFixedPyramid %s") minimumMovingPyramid = InputMultiPath(traits.Int, desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", argstr="--minimumMovingPyramid %s") arrayOfPyramidLevelIterations = InputMultiPath(traits.Int, desc="The number of iterations for each pyramid level", sep=",", argstr="--arrayOfPyramidLevelIterations %s") histogramMatch = traits.Bool(desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", argstr="--histogramMatch ") numberOfHistogramBins = traits.Int(desc="The number of histogram levels", argstr="--numberOfHistogramBins %d") numberOfMatchPoints = traits.Int(desc="The number of match points for histrogramMatch", argstr="--numberOfMatchPoints %d") medianFilterSize = InputMultiPath(traits.Int, desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", sep=",", argstr="--medianFilterSize %s") initializeWithDisplacementField = File(desc="Initial deformation field vector image file name", exists=True, argstr="--initializeWithDisplacementField %s") initializeWithTransform = File(desc="Initial Transform filename", exists=True, argstr="--initializeWithTransform %s") makeBOBF = traits.Bool(desc="Flag to make Brain-Only Background-Filled versions of the input and target volumes.", argstr="--makeBOBF ") fixedBinaryVolume = File(desc="Mask filename for desired region of interest in the Fixed image.", exists=True, argstr="--fixedBinaryVolume %s") movingBinaryVolume = File(desc="Mask filename for desired region of interest in the Moving image.", exists=True, argstr="--movingBinaryVolume %s") lowerThresholdForBOBF = traits.Int(desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d") upperThresholdForBOBF = traits.Int(desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d") backgroundFillValue = traits.Int(desc="Replacement value to overwrite background when performing BOBF", argstr="--backgroundFillValue %d") seedForBOBF = InputMultiPath(traits.Int, desc="coordinates in all 3 directions for Seed when performing BOBF", sep=",", argstr="--seedForBOBF %s") neighborhoodForBOBF = InputMultiPath(traits.Int, desc="neighborhood in all 3 directions to be included when performing BOBF", sep=",", argstr="--neighborhoodForBOBF %s") outputDisplacementFieldPrefix = traits.Str(desc="Displacement field filename prefix for writing separate x, y, and z component images", argstr="--outputDisplacementFieldPrefix %s") outputCheckerboardVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", argstr="--outputCheckerboardVolume %s") checkerboardPatternSubdivisions = InputMultiPath(traits.Int, desc="Number of Checkerboard subdivisions in all 3 directions", sep=",", argstr="--checkerboardPatternSubdivisions %s") outputNormalized = traits.Bool(desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", argstr="--outputNormalized ") outputDebug = traits.Bool(desc="Flag to write debugging images after each step.", argstr="--outputDebug ") weightFactors = InputMultiPath(traits.Float, desc="Weight fatctors for each input images", sep=",", argstr="--weightFactors %s") gradient_type = traits.Enum("0", "1", "2", desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", argstr="--gradient_type %s") upFieldSmoothing = traits.Float(desc="Smoothing sigma for the update field at each iteration", argstr="--upFieldSmoothing %f") max_step_length = traits.Float(desc="Maximum length of an update vector (0: no restriction)", argstr="--max_step_length %f") use_vanilla_dem = traits.Bool(desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") gui = traits.Bool(desc="Display intermediate image volumes for debugging", argstr="--gui ") promptUser = traits.Bool(desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", argstr="--promptUser ") numberOfBCHApproximationTerms = traits.Int(desc="Number of terms in the BCH expansion", argstr="--numberOfBCHApproximationTerms %d") numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d") class VBRAINSDemonWarpOutputSpec(TraitedSpec): outputVolume = File(desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", exists=True) outputDisplacementFieldVolume = File(desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", exists=True) outputCheckerboardVolume = File(desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", exists=True) class VBRAINSDemonWarp(SEMLikeCommandLine): """title: Vector Demon Registration (BRAINS) category: Registration.Specialized description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. version: 3.0.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: This tool was developed by Hans J. Johnson and Greg Harris. acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. """ input_spec = VBRAINSDemonWarpInputSpec output_spec = VBRAINSDemonWarpOutputSpec _cmd = "VBRAINSDemonWarp " _outputs_filenames = {'outputVolume':'outputVolume.nii','outputCheckerboardVolume':'outputCheckerboardVolume.nii','outputDisplacementFieldVolume':'outputDisplacementFieldVolume.nrrd'} class BRAINSDemonWarpInputSpec(CommandLineInputSpec): movingVolume = File(desc="Required: input moving image", exists=True, argstr="--movingVolume %s") fixedVolume = File(desc="Required: input fixed (target) image", exists=True, argstr="--fixedVolume %s") inputPixelType = traits.Enum("float", "short", "ushort", "int", "uchar", desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar", argstr="--inputPixelType %s") outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", argstr="--outputVolume %s") outputDisplacementFieldVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", argstr="--outputDisplacementFieldVolume %s") outputPixelType = traits.Enum("float", "short", "ushort", "int", "uchar", desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar", argstr="--outputPixelType %s") interpolationMode = traits.Enum("NearestNeighbor", "Linear", "ResampleInPlace", "BSpline", "WindowedSinc", "Hamming", "Cosine", "Welch", "Lanczos", "Blackman", desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", argstr="--interpolationMode %s") registrationFilterType = traits.Enum("Demons", "FastSymmetricForces", "Diffeomorphic", desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic", argstr="--registrationFilterType %s") smoothDisplacementFieldSigma = traits.Float(desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", argstr="--smoothDisplacementFieldSigma %f") numberOfPyramidLevels = traits.Int(desc="Number of image pyramid levels to use in the multi-resolution registration.", argstr="--numberOfPyramidLevels %d") minimumFixedPyramid = InputMultiPath(traits.Int, desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", argstr="--minimumFixedPyramid %s") minimumMovingPyramid = InputMultiPath(traits.Int, desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", argstr="--minimumMovingPyramid %s") arrayOfPyramidLevelIterations = InputMultiPath(traits.Int, desc="The number of iterations for each pyramid level", sep=",", argstr="--arrayOfPyramidLevelIterations %s") histogramMatch = traits.Bool(desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", argstr="--histogramMatch ") numberOfHistogramBins = traits.Int(desc="The number of histogram levels", argstr="--numberOfHistogramBins %d") numberOfMatchPoints = traits.Int(desc="The number of match points for histrogramMatch", argstr="--numberOfMatchPoints %d") medianFilterSize = InputMultiPath(traits.Int, desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", sep=",", argstr="--medianFilterSize %s") initializeWithDisplacementField = File(desc="Initial deformation field vector image file name", exists=True, argstr="--initializeWithDisplacementField %s") initializeWithTransform = File(desc="Initial Transform filename", exists=True, argstr="--initializeWithTransform %s") maskProcessingMode = traits.Enum("NOMASK", "ROIAUTO", "ROI", "BOBF", desc="What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", argstr="--maskProcessingMode %s") fixedBinaryVolume = File(desc="Mask filename for desired region of interest in the Fixed image.", exists=True, argstr="--fixedBinaryVolume %s") movingBinaryVolume = File(desc="Mask filename for desired region of interest in the Moving image.", exists=True, argstr="--movingBinaryVolume %s") lowerThresholdForBOBF = traits.Int(desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d") upperThresholdForBOBF = traits.Int(desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d") backgroundFillValue = traits.Int(desc="Replacement value to overwrite background when performing BOBF", argstr="--backgroundFillValue %d") seedForBOBF = InputMultiPath(traits.Int, desc="coordinates in all 3 directions for Seed when performing BOBF", sep=",", argstr="--seedForBOBF %s") neighborhoodForBOBF = InputMultiPath(traits.Int, desc="neighborhood in all 3 directions to be included when performing BOBF", sep=",", argstr="--neighborhoodForBOBF %s") outputDisplacementFieldPrefix = traits.Str(desc="Displacement field filename prefix for writing separate x, y, and z component images", argstr="--outputDisplacementFieldPrefix %s") outputCheckerboardVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", argstr="--outputCheckerboardVolume %s") checkerboardPatternSubdivisions = InputMultiPath(traits.Int, desc="Number of Checkerboard subdivisions in all 3 directions", sep=",", argstr="--checkerboardPatternSubdivisions %s") outputNormalized = traits.Bool(desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", argstr="--outputNormalized ") outputDebug = traits.Bool(desc="Flag to write debugging images after each step.", argstr="--outputDebug ") gradient_type = traits.Enum("0", "1", "2", desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", argstr="--gradient_type %s") upFieldSmoothing = traits.Float(desc="Smoothing sigma for the update field at each iteration", argstr="--upFieldSmoothing %f") max_step_length = traits.Float(desc="Maximum length of an update vector (0: no restriction)", argstr="--max_step_length %f") use_vanilla_dem = traits.Bool(desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") gui = traits.Bool(desc="Display intermediate image volumes for debugging", argstr="--gui ") promptUser = traits.Bool(desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", argstr="--promptUser ") numberOfBCHApproximationTerms = traits.Int(desc="Number of terms in the BCH expansion", argstr="--numberOfBCHApproximationTerms %d") numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d") class BRAINSDemonWarpOutputSpec(TraitedSpec): outputVolume = File(desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", exists=True) outputDisplacementFieldVolume = File(desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", exists=True) outputCheckerboardVolume = File(desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", exists=True) class BRAINSDemonWarp(SEMLikeCommandLine): """title: Demon Registration (BRAINS) category: Registration.Specialized description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. version: 3.0.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: This tool was developed by Hans J. Johnson and Greg Harris. acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. """ input_spec = BRAINSDemonWarpInputSpec output_spec = BRAINSDemonWarpOutputSpec _cmd = "BRAINSDemonWarp " _outputs_filenames = {'outputVolume':'outputVolume.nii','outputCheckerboardVolume':'outputCheckerboardVolume.nii','outputDisplacementFieldVolume':'outputDisplacementFieldVolume.nrrd'} nipype-0.9.2/nipype/interfaces/slicer/registration/tests/000077500000000000000000000000001227300005300236025ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py000066400000000000000000000022551227300005300306510ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.specialized import ACPCTransform def test_ACPCTransform_inputs(): input_map = dict(acpc=dict(argstr='--acpc %s...', ), args=dict(argstr='%s', ), debugSwitch=dict(argstr='--debugSwitch ', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), midline=dict(argstr='--midline %s...', ), outputTransform=dict(argstr='--outputTransform %s', hash_files=False, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ACPCTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ACPCTransform_outputs(): output_map = dict(outputTransform=dict(), ) outputs = ACPCTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py000066400000000000000000000076441227300005300310510ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.specialized import BRAINSDemonWarp def test_BRAINSDemonWarp_inputs(): input_map = dict(args=dict(argstr='%s', ), arrayOfPyramidLevelIterations=dict(argstr='--arrayOfPyramidLevelIterations %s', sep=',', ), backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), checkerboardPatternSubdivisions=dict(argstr='--checkerboardPatternSubdivisions %s', sep=',', ), environ=dict(nohash=True, usedefault=True, ), fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), fixedVolume=dict(argstr='--fixedVolume %s', ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), ignore_exception=dict(nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', ), initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), max_step_length=dict(argstr='--max_step_length %f', ), medianFilterSize=dict(argstr='--medianFilterSize %s', sep=',', ), minimumFixedPyramid=dict(argstr='--minimumFixedPyramid %s', sep=',', ), minimumMovingPyramid=dict(argstr='--minimumMovingPyramid %s', sep=',', ), movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), movingVolume=dict(argstr='--movingVolume %s', ), neighborhoodForBOBF=dict(argstr='--neighborhoodForBOBF %s', sep=',', ), numberOfBCHApproximationTerms=dict(argstr='--numberOfBCHApproximationTerms %d', ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputCheckerboardVolume=dict(argstr='--outputCheckerboardVolume %s', hash_files=False, ), outputDebug=dict(argstr='--outputDebug ', ), outputDisplacementFieldPrefix=dict(argstr='--outputDisplacementFieldPrefix %s', ), outputDisplacementFieldVolume=dict(argstr='--outputDisplacementFieldVolume %s', hash_files=False, ), outputNormalized=dict(argstr='--outputNormalized ', ), outputPixelType=dict(argstr='--outputPixelType %s', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), promptUser=dict(argstr='--promptUser ', ), registrationFilterType=dict(argstr='--registrationFilterType %s', ), seedForBOBF=dict(argstr='--seedForBOBF %s', sep=',', ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), terminal_output=dict(mandatory=True, nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), ) inputs = BRAINSDemonWarp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BRAINSDemonWarp_outputs(): output_map = dict(outputCheckerboardVolume=dict(), outputDisplacementFieldVolume=dict(), outputVolume=dict(), ) outputs = BRAINSDemonWarp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py000066400000000000000000000126161227300005300276720ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.brainsfit import BRAINSFit def test_BRAINSFit_inputs(): input_map = dict(NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00=dict(argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ', ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01=dict(argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ', ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02=dict(argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ', ), ROIAutoClosingSize=dict(argstr='--ROIAutoClosingSize %f', ), ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), args=dict(argstr='%s', ), backgroundFillValue=dict(argstr='--backgroundFillValue %f', ), bsplineTransform=dict(argstr='--bsplineTransform %s', hash_files=False, ), costFunctionConvergenceFactor=dict(argstr='--costFunctionConvergenceFactor %f', ), costMetric=dict(argstr='--costMetric %s', ), debugLevel=dict(argstr='--debugLevel %d', ), environ=dict(nohash=True, usedefault=True, ), failureExitCode=dict(argstr='--failureExitCode %d', ), fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), fixedVolume=dict(argstr='--fixedVolume %s', ), fixedVolumeTimeIndex=dict(argstr='--fixedVolumeTimeIndex %d', ), forceMINumberOfThreads=dict(argstr='--forceMINumberOfThreads %d', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), ignore_exception=dict(nohash=True, usedefault=True, ), initialTransform=dict(argstr='--initialTransform %s', ), initializeTransformMode=dict(argstr='--initializeTransformMode %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), linearTransform=dict(argstr='--linearTransform %s', hash_files=False, ), maskInferiorCutOffFromCenter=dict(argstr='--maskInferiorCutOffFromCenter %f', ), maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), maximumStepLength=dict(argstr='--maximumStepLength %f', ), medianFilterSize=dict(argstr='--medianFilterSize %s', sep=',', ), minimumStepLength=dict(argstr='--minimumStepLength %s', sep=',', ), movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), movingVolume=dict(argstr='--movingVolume %s', ), movingVolumeTimeIndex=dict(argstr='--movingVolumeTimeIndex %d', ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfIterations=dict(argstr='--numberOfIterations %s', sep=',', ), numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), numberOfSamples=dict(argstr='--numberOfSamples %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputFixedVolumeROI=dict(argstr='--outputFixedVolumeROI %s', hash_files=False, ), outputMovingVolumeROI=dict(argstr='--outputMovingVolumeROI %s', hash_files=False, ), outputTransform=dict(argstr='--outputTransform %s', hash_files=False, ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), permitParameterVariation=dict(argstr='--permitParameterVariation %s', sep=',', ), projectedGradientTolerance=dict(argstr='--projectedGradientTolerance %f', ), promptUser=dict(argstr='--promptUser ', ), relaxationFactor=dict(argstr='--relaxationFactor %f', ), removeIntensityOutliers=dict(argstr='--removeIntensityOutliers %f', ), reproportionScale=dict(argstr='--reproportionScale %f', ), scaleOutputValues=dict(argstr='--scaleOutputValues ', ), skewScale=dict(argstr='--skewScale %f', ), splineGridSize=dict(argstr='--splineGridSize %s', sep=',', ), strippedOutputTransform=dict(argstr='--strippedOutputTransform %s', hash_files=False, ), terminal_output=dict(mandatory=True, nohash=True, ), transformType=dict(argstr='--transformType %s', sep=',', ), translationScale=dict(argstr='--translationScale %f', ), useAffine=dict(argstr='--useAffine ', ), useBSpline=dict(argstr='--useBSpline ', ), useCachingOfBSplineWeightsMode=dict(argstr='--useCachingOfBSplineWeightsMode %s', ), useExplicitPDFDerivativesMode=dict(argstr='--useExplicitPDFDerivativesMode %s', ), useRigid=dict(argstr='--useRigid ', ), useScaleSkewVersor3D=dict(argstr='--useScaleSkewVersor3D ', ), useScaleVersor3D=dict(argstr='--useScaleVersor3D ', ), writeTransformOnFailure=dict(argstr='--writeTransformOnFailure ', ), ) inputs = BRAINSFit.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BRAINSFit_outputs(): output_map = dict(bsplineTransform=dict(), linearTransform=dict(), outputFixedVolumeROI=dict(), outputMovingVolumeROI=dict(), outputTransform=dict(), outputVolume=dict(), strippedOutputTransform=dict(), ) outputs = BRAINSFit.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py000066400000000000000000000031761227300005300307210ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.brainsresample import BRAINSResample def test_BRAINSResample_inputs(): input_map = dict(args=dict(argstr='%s', ), defaultValue=dict(argstr='--defaultValue %f', ), deformationVolume=dict(argstr='--deformationVolume %s', ), environ=dict(nohash=True, usedefault=True, ), gridSpacing=dict(argstr='--gridSpacing %s', sep=',', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), inverseTransform=dict(argstr='--inverseTransform ', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), pixelType=dict(argstr='--pixelType %s', ), referenceVolume=dict(argstr='--referenceVolume %s', ), terminal_output=dict(mandatory=True, nohash=True, ), warpTransform=dict(argstr='--warpTransform %s', ), ) inputs = BRAINSResample.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BRAINSResample_outputs(): output_map = dict(outputVolume=dict(), ) outputs = BRAINSResample.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py000066400000000000000000000025261227300005300323630ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.specialized import FiducialRegistration def test_FiducialRegistration_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), ignore_exception=dict(nohash=True, usedefault=True, ), movingLandmarks=dict(argstr='--movingLandmarks %s...', ), outputMessage=dict(argstr='--outputMessage %s', ), rms=dict(argstr='--rms %f', ), saveTransform=dict(argstr='--saveTransform %s', hash_files=False, ), terminal_output=dict(mandatory=True, nohash=True, ), transformType=dict(argstr='--transformType %s', ), ) inputs = FiducialRegistration.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FiducialRegistration_outputs(): output_map = dict(saveTransform=dict(), ) outputs = FiducialRegistration.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py000066400000000000000000000077411227300005300311750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.specialized import VBRAINSDemonWarp def test_VBRAINSDemonWarp_inputs(): input_map = dict(args=dict(argstr='%s', ), arrayOfPyramidLevelIterations=dict(argstr='--arrayOfPyramidLevelIterations %s', sep=',', ), backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), checkerboardPatternSubdivisions=dict(argstr='--checkerboardPatternSubdivisions %s', sep=',', ), environ=dict(nohash=True, usedefault=True, ), fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), fixedVolume=dict(argstr='--fixedVolume %s...', ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), ignore_exception=dict(nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', ), initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), makeBOBF=dict(argstr='--makeBOBF ', ), max_step_length=dict(argstr='--max_step_length %f', ), medianFilterSize=dict(argstr='--medianFilterSize %s', sep=',', ), minimumFixedPyramid=dict(argstr='--minimumFixedPyramid %s', sep=',', ), minimumMovingPyramid=dict(argstr='--minimumMovingPyramid %s', sep=',', ), movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), movingVolume=dict(argstr='--movingVolume %s...', ), neighborhoodForBOBF=dict(argstr='--neighborhoodForBOBF %s', sep=',', ), numberOfBCHApproximationTerms=dict(argstr='--numberOfBCHApproximationTerms %d', ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputCheckerboardVolume=dict(argstr='--outputCheckerboardVolume %s', hash_files=False, ), outputDebug=dict(argstr='--outputDebug ', ), outputDisplacementFieldPrefix=dict(argstr='--outputDisplacementFieldPrefix %s', ), outputDisplacementFieldVolume=dict(argstr='--outputDisplacementFieldVolume %s', hash_files=False, ), outputNormalized=dict(argstr='--outputNormalized ', ), outputPixelType=dict(argstr='--outputPixelType %s', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), promptUser=dict(argstr='--promptUser ', ), registrationFilterType=dict(argstr='--registrationFilterType %s', ), seedForBOBF=dict(argstr='--seedForBOBF %s', sep=',', ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), terminal_output=dict(mandatory=True, nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), weightFactors=dict(argstr='--weightFactors %s', sep=',', ), ) inputs = VBRAINSDemonWarp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_VBRAINSDemonWarp_outputs(): output_map = dict(outputCheckerboardVolume=dict(), outputDisplacementFieldVolume=dict(), outputVolume=dict(), ) outputs = VBRAINSDemonWarp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/segmentation/000077500000000000000000000000001227300005300224235ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/segmentation/__init__.py000066400000000000000000000002431227300005300245330ustar00rootroot00000000000000from specialized import RobustStatisticsSegmenter, EMSegmentCommandLine, BRAINSROIAuto from simpleregiongrowingsegmentation import SimpleRegionGrowingSegmentation nipype-0.9.2/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py000066400000000000000000000053521227300005300315120ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class SimpleRegionGrowingSegmentationInputSpec(CommandLineInputSpec): smoothingIterations = traits.Int(desc="Number of smoothing iterations", argstr="--smoothingIterations %d") timestep = traits.Float(desc="Timestep for curvature flow", argstr="--timestep %f") iterations = traits.Int(desc="Number of iterations of region growing", argstr="--iterations %d") multiplier = traits.Float(desc="Number of standard deviations to include in intensity model", argstr="--multiplier %f") neighborhood = traits.Int(desc="The radius of the neighborhood over which to calculate intensity model", argstr="--neighborhood %d") labelvalue = traits.Int(desc="The integer value (0-255) to use for the segmentation results. This will determine the color of the segmentation that will be generated by the Region growing algorithm", argstr="--labelvalue %d") seed = InputMultiPath(traits.List(traits.Float(), minlen=3, maxlen=3), desc="Seed point(s) for region growing", argstr="--seed %s...") inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s") class SimpleRegionGrowingSegmentationOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output filtered", exists=True) class SimpleRegionGrowingSegmentation(SEMLikeCommandLine): """title: Simple Region Growing Segmentation category: Segmentation description: A simple region growing segmentation algorithm based on intensity statistics. To create a list of fiducials (Seeds) for this algorithm, click on the tool bar icon of an arrow pointing to a starburst fiducial to enter the 'place a new object mode' and then use the fiducials module. This module uses the Slicer Command Line Interface (CLI) and the ITK filters CurvatureFlowImageFilter and ConfidenceConnectedImageFilter. version: 0.1.0.$Revision: 19904 $(alpha) documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/SimpleRegionGrowingSegmentation contributor: Jim Miller (GE) acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium """ input_spec = SimpleRegionGrowingSegmentationInputSpec output_spec = SimpleRegionGrowingSegmentationOutputSpec _cmd = "SimpleRegionGrowingSegmentation " _outputs_filenames = {'outputVolume':'outputVolume.nii'} nipype-0.9.2/nipype/interfaces/slicer/segmentation/specialized.py000066400000000000000000000253301227300005300252740ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class RobustStatisticsSegmenterInputSpec(CommandLineInputSpec): expectedVolume = traits.Float(desc="The approximate volume of the object, in mL.", argstr="--expectedVolume %f") intensityHomogeneity = traits.Float(desc="What is the homogeneity of intensity within the object? Given constant intensity at 1.0 score and extreme fluctuating intensity at 0.", argstr="--intensityHomogeneity %f") curvatureWeight = traits.Float(desc="Given sphere 1.0 score and extreme rough bounday/surface 0 score, what is the expected smoothness of the object?", argstr="--curvatureWeight %f") labelValue = traits.Int(desc="Label value of the output image", argstr="--labelValue %d") maxRunningTime = traits.Float(desc="The program will stop if this time is reached.", argstr="--maxRunningTime %f") originalImageFileName = File(position=-3, desc="Original image to be segmented", exists=True, argstr="%s") labelImageFileName = File(position=-2, desc="Label image for initialization", exists=True, argstr="%s") segmentedImageFileName = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Segmented image", argstr="%s") class RobustStatisticsSegmenterOutputSpec(TraitedSpec): segmentedImageFileName = File(position=-1, desc="Segmented image", exists=True) class RobustStatisticsSegmenter(SEMLikeCommandLine): """title: Robust Statistics Segmenter category: Segmentation.Specialized description: Active contour segmentation using robust statistic. version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RobustStatisticsSegmenter contributor: Yi Gao (gatech), Allen Tannenbaum (gatech), Ron Kikinis (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health """ input_spec = RobustStatisticsSegmenterInputSpec output_spec = RobustStatisticsSegmenterOutputSpec _cmd = "RobustStatisticsSegmenter " _outputs_filenames = {'segmentedImageFileName':'segmentedImageFileName.nii'} class EMSegmentCommandLineInputSpec(CommandLineInputSpec): mrmlSceneFileName = File(desc="Active MRML scene that contains EMSegment algorithm parameters.", exists=True, argstr="--mrmlSceneFileName %s") resultVolumeFileName = traits.Either(traits.Bool, File(), hash_files=False, desc="The file name that the segmentation result volume will be written to.", argstr="--resultVolumeFileName %s") targetVolumeFileNames = InputMultiPath(File(exists=True), desc="File names of target volumes (to be segmented). The number of target images must be equal to the number of target images specified in the parameter set, and these images must be spatially aligned.", argstr="--targetVolumeFileNames %s...") intermediateResultsDirectory = Directory(desc="Directory where EMSegmenter will write intermediate data (e.g., aligned atlas data).", exists=True, argstr="--intermediateResultsDirectory %s") parametersMRMLNodeName = traits.Str(desc="The name of the EMSegment parameters node within the active MRML scene. Leave blank for default.", argstr="--parametersMRMLNodeName %s") disableMultithreading = traits.Int(desc="Disable multithreading for the EMSegmenter algorithm only! Preprocessing might still run in multi-threaded mode. -1: Do not overwrite default value. 0: Disable. 1: Enable.", argstr="--disableMultithreading %d") dontUpdateIntermediateData = traits.Int(desc="Disable update of intermediate results. -1: Do not overwrite default value. 0: Disable. 1: Enable.", argstr="--dontUpdateIntermediateData %d") verbose = traits.Bool(desc="Enable verbose output.", argstr="--verbose ") loadTargetCentered = traits.Bool(desc="Read target files centered.", argstr="--loadTargetCentered ") loadAtlasNonCentered = traits.Bool(desc="Read atlas files non-centered.", argstr="--loadAtlasNonCentered ") taskPreProcessingSetting = traits.Str(desc="Specifies the different task parameter. Leave blank for default.", argstr="--taskPreProcessingSetting %s") keepTempFiles = traits.Bool(desc="If flag is set then at the end of command the temporary files are not removed", argstr="--keepTempFiles ") resultStandardVolumeFileName = File(desc="Used for testing. Compare segmentation results to this image and return EXIT_FAILURE if they do not match.", exists=True, argstr="--resultStandardVolumeFileName %s") dontWriteResults = traits.Bool(desc="Used for testing. Don't actually write the resulting labelmap to disk.", argstr="--dontWriteResults ") generateEmptyMRMLSceneAndQuit = traits.Either(traits.Bool, File(), hash_files=False, desc="Used for testing. Only write a scene with default mrml parameters.", argstr="--generateEmptyMRMLSceneAndQuit %s") resultMRMLSceneFileName = traits.Either(traits.Bool, File(), hash_files=False, desc="Write out the MRML scene after command line substitutions have been made.", argstr="--resultMRMLSceneFileName %s") disableCompression = traits.Bool(desc="Don't use compression when writing result image to disk.", argstr="--disableCompression ") atlasVolumeFileNames = InputMultiPath(File(exists=True), desc="Use an alternative atlas to the one that is specified by the mrml file - note the order matters ! ", argstr="--atlasVolumeFileNames %s...") registrationPackage = traits.Str(desc="specify the registration package for preprocessing (CMTK or BRAINS or PLASTIMATCH or DEMONS)", argstr="--registrationPackage %s") registrationAffineType = traits.Int(desc="specify the accuracy of the affine registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", argstr="--registrationAffineType %d") registrationDeformableType = traits.Int(desc="specify the accuracy of the deformable registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", argstr="--registrationDeformableType %d") class EMSegmentCommandLineOutputSpec(TraitedSpec): resultVolumeFileName = File(desc="The file name that the segmentation result volume will be written to.", exists=True) generateEmptyMRMLSceneAndQuit = File(desc="Used for testing. Only write a scene with default mrml parameters.", exists=True) resultMRMLSceneFileName = File(desc="Write out the MRML scene after command line substitutions have been made.", exists=True) class EMSegmentCommandLine(SEMLikeCommandLine): """title: EMSegment Command-line category: Segmentation.Specialized description: This module is used to simplify the process of segmenting large collections of images by providing a command line interface to the EMSegment algorithm for script and batch processing. documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.0/EMSegment_Command-line contributor: Sebastien Barre, Brad Davis, Kilian Pohl, Polina Golland, Yumin Yuan, Daniel Haehn acknowledgements: Many people and organizations have contributed to the funding, design, and development of the EMSegment algorithm and its various implementations. """ input_spec = EMSegmentCommandLineInputSpec output_spec = EMSegmentCommandLineOutputSpec _cmd = "EMSegmentCommandLine " _outputs_filenames = {'generateEmptyMRMLSceneAndQuit':'generateEmptyMRMLSceneAndQuit','resultMRMLSceneFileName':'resultMRMLSceneFileName','resultVolumeFileName':'resultVolumeFileName.mhd'} class BRAINSROIAutoInputSpec(CommandLineInputSpec): inputVolume = File(desc="The input image for finding the largest region filled mask.", exists=True, argstr="--inputVolume %s") outputROIMaskVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="The ROI automatically found from the input image.", argstr="--outputROIMaskVolume %s") outputClippedVolumeROI = traits.Either(traits.Bool, File(), hash_files=False, desc="The inputVolume clipped to the region of the brain mask.", argstr="--outputClippedVolumeROI %s") otsuPercentileThreshold = traits.Float(desc="Parameter to the Otsu threshold algorithm.", argstr="--otsuPercentileThreshold %f") thresholdCorrectionFactor = traits.Float(desc="A factor to scale the Otsu algorithm's result threshold, in case clipping mangles the image.", argstr="--thresholdCorrectionFactor %f") closingSize = traits.Float(desc="The Closing Size (in millimeters) for largest connected filled mask. This value is divided by image spacing and rounded to the next largest voxel number.", argstr="--closingSize %f") ROIAutoDilateSize = traits.Float(desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", argstr="--ROIAutoDilateSize %f") outputVolumePixelType = traits.Enum("float", "short", "ushort", "int", "uint", "uchar", desc="The output image Pixel Type is the scalar datatype for representation of the Output Volume.", argstr="--outputVolumePixelType %s") numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d") class BRAINSROIAutoOutputSpec(TraitedSpec): outputROIMaskVolume = File(desc="The ROI automatically found from the input image.", exists=True) outputClippedVolumeROI = File(desc="The inputVolume clipped to the region of the brain mask.", exists=True) class BRAINSROIAuto(SEMLikeCommandLine): """title: Foreground masking (BRAINS) category: Segmentation.Specialized description: This tool uses a combination of otsu thresholding and a closing operations to identify the most prominant foreground region in an image. version: 2.4.1 license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5), fedorov -at- bwh.harvard.edu (Slicer integration); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard) """ input_spec = BRAINSROIAutoInputSpec output_spec = BRAINSROIAutoOutputSpec _cmd = "BRAINSROIAuto " _outputs_filenames = {'outputROIMaskVolume':'outputROIMaskVolume.nii','outputClippedVolumeROI':'outputClippedVolumeROI.nii'} nipype-0.9.2/nipype/interfaces/slicer/segmentation/tests/000077500000000000000000000000001227300005300235655ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py000066400000000000000000000032111227300005300304040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.segmentation.specialized import BRAINSROIAuto def test_BRAINSROIAuto_inputs(): input_map = dict(ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), args=dict(argstr='%s', ), closingSize=dict(argstr='--closingSize %f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), outputClippedVolumeROI=dict(argstr='--outputClippedVolumeROI %s', hash_files=False, ), outputROIMaskVolume=dict(argstr='--outputROIMaskVolume %s', hash_files=False, ), outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), terminal_output=dict(mandatory=True, nohash=True, ), thresholdCorrectionFactor=dict(argstr='--thresholdCorrectionFactor %f', ), ) inputs = BRAINSROIAuto.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BRAINSROIAuto_outputs(): output_map = dict(outputClippedVolumeROI=dict(), outputROIMaskVolume=dict(), ) outputs = BRAINSROIAuto.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py000066400000000000000000000052301227300005300321610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.segmentation.specialized import EMSegmentCommandLine def test_EMSegmentCommandLine_inputs(): input_map = dict(args=dict(argstr='%s', ), atlasVolumeFileNames=dict(argstr='--atlasVolumeFileNames %s...', ), disableCompression=dict(argstr='--disableCompression ', ), disableMultithreading=dict(argstr='--disableMultithreading %d', ), dontUpdateIntermediateData=dict(argstr='--dontUpdateIntermediateData %d', ), dontWriteResults=dict(argstr='--dontWriteResults ', ), environ=dict(nohash=True, usedefault=True, ), generateEmptyMRMLSceneAndQuit=dict(argstr='--generateEmptyMRMLSceneAndQuit %s', hash_files=False, ), ignore_exception=dict(nohash=True, usedefault=True, ), intermediateResultsDirectory=dict(argstr='--intermediateResultsDirectory %s', ), keepTempFiles=dict(argstr='--keepTempFiles ', ), loadAtlasNonCentered=dict(argstr='--loadAtlasNonCentered ', ), loadTargetCentered=dict(argstr='--loadTargetCentered ', ), mrmlSceneFileName=dict(argstr='--mrmlSceneFileName %s', ), parametersMRMLNodeName=dict(argstr='--parametersMRMLNodeName %s', ), registrationAffineType=dict(argstr='--registrationAffineType %d', ), registrationDeformableType=dict(argstr='--registrationDeformableType %d', ), registrationPackage=dict(argstr='--registrationPackage %s', ), resultMRMLSceneFileName=dict(argstr='--resultMRMLSceneFileName %s', hash_files=False, ), resultStandardVolumeFileName=dict(argstr='--resultStandardVolumeFileName %s', ), resultVolumeFileName=dict(argstr='--resultVolumeFileName %s', hash_files=False, ), targetVolumeFileNames=dict(argstr='--targetVolumeFileNames %s...', ), taskPreProcessingSetting=dict(argstr='--taskPreProcessingSetting %s', ), terminal_output=dict(mandatory=True, nohash=True, ), verbose=dict(argstr='--verbose ', ), ) inputs = EMSegmentCommandLine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EMSegmentCommandLine_outputs(): output_map = dict(generateEmptyMRMLSceneAndQuit=dict(), resultMRMLSceneFileName=dict(), resultVolumeFileName=dict(), ) outputs = EMSegmentCommandLine.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py000066400000000000000000000030601227300005300334300ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.segmentation.specialized import RobustStatisticsSegmenter def test_RobustStatisticsSegmenter_inputs(): input_map = dict(args=dict(argstr='%s', ), curvatureWeight=dict(argstr='--curvatureWeight %f', ), environ=dict(nohash=True, usedefault=True, ), expectedVolume=dict(argstr='--expectedVolume %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), intensityHomogeneity=dict(argstr='--intensityHomogeneity %f', ), labelImageFileName=dict(argstr='%s', position=-2, ), labelValue=dict(argstr='--labelValue %d', ), maxRunningTime=dict(argstr='--maxRunningTime %f', ), originalImageFileName=dict(argstr='%s', position=-3, ), segmentedImageFileName=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = RobustStatisticsSegmenter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_RobustStatisticsSegmenter_outputs(): output_map = dict(segmentedImageFileName=dict(position=-1, ), ) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value test_auto_SimpleRegionGrowingSegmentation.py000066400000000000000000000031101227300005300344520ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/segmentation/tests# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.segmentation.simpleregiongrowingsegmentation import SimpleRegionGrowingSegmentation def test_SimpleRegionGrowingSegmentation_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), iterations=dict(argstr='--iterations %d', ), labelvalue=dict(argstr='--labelvalue %d', ), multiplier=dict(argstr='--multiplier %f', ), neighborhood=dict(argstr='--neighborhood %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), seed=dict(argstr='--seed %s...', ), smoothingIterations=dict(argstr='--smoothingIterations %d', ), terminal_output=dict(mandatory=True, nohash=True, ), timestep=dict(argstr='--timestep %f', ), ) inputs = SimpleRegionGrowingSegmentation.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SimpleRegionGrowingSegmentation_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/setup.py000066400000000000000000000012201227300005300214330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('slicer', parent_package, top_path) config.add_data_dir('diffusion') config.add_data_dir('segmentation') config.add_data_dir('filtering') config.add_data_dir('quantification') config.add_data_dir('legacy') config.add_data_dir('registration') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/slicer/surface.py000066400000000000000000000375101227300005300217360ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class MergeModelsInputSpec(CommandLineInputSpec): Model1 = File(position=-3, desc="Model", exists=True, argstr="%s") Model2 = File(position=-2, desc="Model", exists=True, argstr="%s") ModelOutput = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Model", argstr="%s") class MergeModelsOutputSpec(TraitedSpec): ModelOutput = File(position=-1, desc="Model", exists=True) class MergeModels(SEMLikeCommandLine): """title: Merge Models category: Surface Models description: Merge the polydata from two input models and output a new model with the added polydata. Uses the vtkAppendPolyData filter. Works on .vtp and .vtk surface files. version: $Revision$ documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MergeModels contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Daniel Haehn (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = MergeModelsInputSpec output_spec = MergeModelsOutputSpec _cmd = "MergeModels " _outputs_filenames = {'ModelOutput':'ModelOutput.vtk'} class ModelToLabelMapInputSpec(CommandLineInputSpec): distance = traits.Float(desc="Sample distance", argstr="--distance %f") InputVolume = File(position=-3, desc="Input volume", exists=True, argstr="%s") surface = File(position=-2, desc="Model", exists=True, argstr="%s") OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="The label volume", argstr="%s") class ModelToLabelMapOutputSpec(TraitedSpec): OutputVolume = File(position=-1, desc="The label volume", exists=True) class ModelToLabelMap(SEMLikeCommandLine): """title: Model To Label Map category: Surface Models description: Intersects an input model with an reference volume and produces an output label map. version: 0.1.0.$Revision: 8643 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/PolyDataToLabelMap contributor: Nicole Aucoin (SPL, BWH), Xiaodong Tao (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = ModelToLabelMapInputSpec output_spec = ModelToLabelMapOutputSpec _cmd = "ModelToLabelMap " _outputs_filenames = {'OutputVolume':'OutputVolume.nii'} class GrayscaleModelMakerInputSpec(CommandLineInputSpec): InputVolume = File(position=-2, desc="Volume containing the input grayscale data.", exists=True, argstr="%s") OutputGeometry = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output that contains geometry model.", argstr="%s") threshold = traits.Float(desc="Grayscale threshold of isosurface. The resulting surface of triangles separates the volume into voxels that lie above (inside) and below (outside) the threshold.", argstr="--threshold %f") name = traits.Str(desc="Name to use for this model.", argstr="--name %s") smooth = traits.Int(desc="Number of smoothing iterations. If 0, no smoothing will be done.", argstr="--smooth %d") decimate = traits.Float(desc="Target reduction during decimation, as a decimal percentage reduction in the number of polygons. If 0, no decimation will be done.", argstr="--decimate %f") splitnormals = traits.Bool(desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affect measurements", argstr="--splitnormals ") pointnormals = traits.Bool(desc="Calculate the point normals? Calculated point normals make the surface appear smooth. Without point normals, the surface will appear faceted.", argstr="--pointnormals ") class GrayscaleModelMakerOutputSpec(TraitedSpec): OutputGeometry = File(position=-1, desc="Output that contains geometry model.", exists=True) class GrayscaleModelMaker(SEMLikeCommandLine): """title: Grayscale Model Maker category: Surface Models description: Create 3D surface models from grayscale data. This module uses Marching Cubes to create an isosurface at a given threshold. The resulting surface consists of triangles that separate a volume into regions below and above the threshold. The resulting surface can be smoothed and decimated. This model works on continuous data while the module Model Maker works on labeled (or discrete) data. version: 3.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleModelMaker license: slicer3 contributor: Nicole Aucoin (SPL, BWH), Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = GrayscaleModelMakerInputSpec output_spec = GrayscaleModelMakerOutputSpec _cmd = "GrayscaleModelMaker " _outputs_filenames = {'OutputGeometry':'OutputGeometry.vtk'} class ProbeVolumeWithModelInputSpec(CommandLineInputSpec): InputVolume = File(position=-3, desc="Volume to use to 'paint' the model", exists=True, argstr="%s") InputModel = File(position=-2, desc="Input model", exists=True, argstr="%s") OutputModel = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output 'painted' model", argstr="%s") class ProbeVolumeWithModelOutputSpec(TraitedSpec): OutputModel = File(position=-1, desc="Output 'painted' model", exists=True) class ProbeVolumeWithModel(SEMLikeCommandLine): """title: Probe Volume With Model category: Surface Models description: Paint a model by a volume (using vtkProbeFilter). version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ProbeVolumeWithModel contributor: Lauren O'Donnell (SPL, BWH) acknowledgements: BWH, NCIGT/LMI """ input_spec = ProbeVolumeWithModelInputSpec output_spec = ProbeVolumeWithModelOutputSpec _cmd = "ProbeVolumeWithModel " _outputs_filenames = {'OutputModel':'OutputModel.vtk'} class LabelMapSmoothingInputSpec(CommandLineInputSpec): labelToSmooth = traits.Int(desc="The label to smooth. All others will be ignored. If no label is selected by the user, the maximum label in the image is chosen by default.", argstr="--labelToSmooth %d") numberOfIterations = traits.Int(desc="The number of iterations of the level set AntiAliasing algorithm", argstr="--numberOfIterations %d") maxRMSError = traits.Float(desc="The maximum RMS error.", argstr="--maxRMSError %f") gaussianSigma = traits.Float(desc="The standard deviation of the Gaussian kernel", argstr="--gaussianSigma %f") inputVolume = File(position=-2, desc="Input label map to smooth", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Smoothed label map", argstr="%s") class LabelMapSmoothingOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Smoothed label map", exists=True) class LabelMapSmoothing(SEMLikeCommandLine): """title: Label Map Smoothing category: Surface Models description: This filter smoothes a binary label map. With a label map as input, this filter runs an anti-alising algorithm followed by a Gaussian smoothing algorithm. The output is a smoothed label map. version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LabelMapSmoothing contributor: Dirk Padfield (GE), Josh Cates (Utah), Ross Whitaker (Utah) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. This filter is based on work developed at the University of Utah, and implemented at GE Research. """ input_spec = LabelMapSmoothingInputSpec output_spec = LabelMapSmoothingOutputSpec _cmd = "LabelMapSmoothing " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class ModelMakerInputSpec(CommandLineInputSpec): InputVolume = File(position=-1, desc="Input label map. The Input Volume drop down menu is populated with the label map volumes that are present in the scene, select one from which to generate models.", exists=True, argstr="%s") color = File(desc="Color table to make labels to colors and objects", exists=True, argstr="--color %s") modelSceneFile = traits.Either(traits.Bool, InputMultiPath(File(), ), hash_files=False, desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", argstr="--modelSceneFile %s...") name = traits.Str(desc="Name to use for this model. Any text entered in the entry box will be the starting string for the created model file names. The label number and the color name will also be part of the file name. If making multiple models, use this as a prefix to the label and color name.", argstr="--name %s") generateAll = traits.Bool(desc="Generate models for all labels in the input volume. select this option if you want to create all models that correspond to all values in a labelmap volume (using the Joint Smoothing option below is useful with this option). Ignores Labels, Start Label, End Label settings. Skips label 0.", argstr="--generateAll ") labels = InputMultiPath(traits.Int, desc="A comma separated list of label values from which to make models. f you specify a list of Labels, it will override any start/end label settings. If you click Generate All Models it will override the list of labels and any start/end label settings.", sep=",", argstr="--labels %s") start = traits.Int(desc="If you want to specify a continuous range of labels from which to generate models, enter the lower label here. Voxel value from which to start making models. Used instead of the label list to specify a range (make sure the label list is empty or it will over ride this).", argstr="--start %d") end = traits.Int(desc="If you want to specify a continuous range of labels from which to generate models, enter the higher label here. Voxel value up to which to continue making models. Skip any values with zero voxels.", argstr="--end %d") skipUnNamed = traits.Bool(desc="Select this to not generate models from labels that do not have names defined in the color look up table associated with the input label map. If true, only models which have an entry in the color table will be generated. If false, generate all models that exist within the label range.", argstr="--skipUnNamed ") jointsmooth = traits.Bool(desc="This will ensure that all resulting models fit together smoothly, like jigsaw puzzle pieces. Otherwise the models will be smoothed independently and may overlap.", argstr="--jointsmooth ") smooth = traits.Int(desc="Here you can set the number of smoothing iterations for Laplacian smoothing, or the degree of the polynomial approximating the windowed Sinc function. Use 0 if you wish no smoothing. ", argstr="--smooth %d") filtertype = traits.Enum("Sinc", "Laplacian", desc="You can control the type of smoothing done on the models by selecting a filter type of either Sinc or Laplacian.", argstr="--filtertype %s") decimate = traits.Float(desc="Chose the target reduction in number of polygons as a decimal percentage (between 0 and 1) of the number of polygons. Specifies the percentage of triangles to be removed. For example, 0.1 means 10% reduction and 0.9 means 90% reduction.", argstr="--decimate %f") splitnormals = traits.Bool(desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affects measurements.", argstr="--splitnormals ") pointnormals = traits.Bool(desc="Turn this flag on if you wish to calculate the normal vectors for the points.", argstr="--pointnormals ") pad = traits.Bool(desc="Pad the input volume with zero value voxels on all 6 faces in order to ensure the production of closed surfaces. Sets the origin translation and extent translation so that the models still line up with the unpadded input volume.", argstr="--pad ") saveIntermediateModels = traits.Bool(desc="You can save a copy of the models after each of the intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation). These intermediate models are not saved in the mrml file, you have to load them manually after turning off deleting temporary files in they python console (View ->Python Interactor) using the following command slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff().", argstr="--saveIntermediateModels ") debug = traits.Bool(desc="turn this flag on in order to see debugging output (look in the Error Log window that is accessed via the View menu)", argstr="--debug ") class ModelMakerOutputSpec(TraitedSpec): modelSceneFile = OutputMultiPath(File(exists=True), desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", exists=True) class ModelMaker(SEMLikeCommandLine): """title: Model Maker category: Surface Models description: Create 3D surface models from segmented data.

Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).

Create Multiple:

If you specify a list of Labels, it will over ride any start/end label settings.

If you clickGenerate Allit will over ride the list of lables and any start/end label settings.

Model Maker Settings:

You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.
You can set the flags to split normals or generate point normals in this pane as well.
You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:
slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()

version: 4.1 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ModelMaker license: slicer4 contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Bill Lorensen (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = ModelMakerInputSpec output_spec = ModelMakerOutputSpec _cmd = "ModelMaker " _outputs_filenames = {'modelSceneFile':'modelSceneFile.mrml'} nipype-0.9.2/nipype/interfaces/slicer/tests/000077500000000000000000000000001227300005300210705ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py000066400000000000000000000027751227300005300276200ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.converters import DicomToNrrdConverter def test_DicomToNrrdConverter_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', ), outputDirectory=dict(argstr='--outputDirectory %s', hash_files=False, ), outputVolume=dict(argstr='--outputVolume %s', ), smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), terminal_output=dict(mandatory=True, nohash=True, ), useBMatrixGradientDirections=dict(argstr='--useBMatrixGradientDirections ', ), useIdentityMeaseurementFrame=dict(argstr='--useIdentityMeaseurementFrame ', ), writeProtocolGradientsFile=dict(argstr='--writeProtocolGradientsFile ', ), ) inputs = DicomToNrrdConverter.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DicomToNrrdConverter_outputs(): output_map = dict(outputDirectory=dict(), ) outputs = DicomToNrrdConverter.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py000066400000000000000000000023361227300005300314030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.utilities import EMSegmentTransformToNewFormat def test_EMSegmentTransformToNewFormat_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputMRMLFileName=dict(argstr='--inputMRMLFileName %s', ), outputMRMLFileName=dict(argstr='--outputMRMLFileName %s', hash_files=False, ), templateFlag=dict(argstr='--templateFlag ', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = EMSegmentTransformToNewFormat.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EMSegmentTransformToNewFormat_outputs(): output_map = dict(outputMRMLFileName=dict(), ) outputs = EMSegmentTransformToNewFormat.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py000066400000000000000000000026271227300005300274130ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.surface import GrayscaleModelMaker def test_GrayscaleModelMaker_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-2, ), OutputGeometry=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), decimate=dict(argstr='--decimate %f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), name=dict(argstr='--name %s', ), pointnormals=dict(argstr='--pointnormals ', ), smooth=dict(argstr='--smooth %d', ), splitnormals=dict(argstr='--splitnormals ', ), terminal_output=dict(mandatory=True, nohash=True, ), threshold=dict(argstr='--threshold %f', ), ) inputs = GrayscaleModelMaker.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GrayscaleModelMaker_outputs(): output_map = dict(OutputGeometry=dict(position=-1, ), ) outputs = GrayscaleModelMaker.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py000066400000000000000000000025231227300005300271000ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.surface import LabelMapSmoothing def test_LabelMapSmoothing_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), gaussianSigma=dict(argstr='--gaussianSigma %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', position=-2, ), labelToSmooth=dict(argstr='--labelToSmooth %d', ), maxRMSError=dict(argstr='--maxRMSError %f', ), numberOfIterations=dict(argstr='--numberOfIterations %d', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = LabelMapSmoothing.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_LabelMapSmoothing_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = LabelMapSmoothing.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_MergeModels.py000066400000000000000000000021611227300005300257340ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.surface import MergeModels def test_MergeModels_inputs(): input_map = dict(Model1=dict(argstr='%s', position=-3, ), Model2=dict(argstr='%s', position=-2, ), ModelOutput=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MergeModels.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MergeModels_outputs(): output_map = dict(ModelOutput=dict(position=-1, ), ) outputs = MergeModels.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py000066400000000000000000000035361227300005300255600ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.surface import ModelMaker def test_ModelMaker_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-1, ), args=dict(argstr='%s', ), color=dict(argstr='--color %s', ), debug=dict(argstr='--debug ', ), decimate=dict(argstr='--decimate %f', ), end=dict(argstr='--end %d', ), environ=dict(nohash=True, usedefault=True, ), filtertype=dict(argstr='--filtertype %s', ), generateAll=dict(argstr='--generateAll ', ), ignore_exception=dict(nohash=True, usedefault=True, ), jointsmooth=dict(argstr='--jointsmooth ', ), labels=dict(argstr='--labels %s', sep=',', ), modelSceneFile=dict(argstr='--modelSceneFile %s...', hash_files=False, ), name=dict(argstr='--name %s', ), pad=dict(argstr='--pad ', ), pointnormals=dict(argstr='--pointnormals ', ), saveIntermediateModels=dict(argstr='--saveIntermediateModels ', ), skipUnNamed=dict(argstr='--skipUnNamed ', ), smooth=dict(argstr='--smooth %d', ), splitnormals=dict(argstr='--splitnormals ', ), start=dict(argstr='--start %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ModelMaker.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ModelMaker_outputs(): output_map = dict(modelSceneFile=dict(exists=True, ), ) outputs = ModelMaker.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py000066400000000000000000000022761227300005300265010ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.surface import ModelToLabelMap def test_ModelToLabelMap_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-3, ), OutputVolume=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), distance=dict(argstr='--distance %f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), surface=dict(argstr='%s', position=-2, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ModelToLabelMap.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ModelToLabelMap_outputs(): output_map = dict(OutputVolume=dict(position=-1, ), ) outputs = ModelToLabelMap.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py000066400000000000000000000022411227300005300273060ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.converters import OrientScalarVolume def test_OrientScalarVolume_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', position=-2, ), orientation=dict(argstr='--orientation %s', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = OrientScalarVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_OrientScalarVolume_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = OrientScalarVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py000066400000000000000000000022471227300005300276120ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.surface import ProbeVolumeWithModel def test_ProbeVolumeWithModel_inputs(): input_map = dict(InputModel=dict(argstr='%s', position=-2, ), InputVolume=dict(argstr='%s', position=-3, ), OutputModel=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ProbeVolumeWithModel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ProbeVolumeWithModel_outputs(): output_map = dict(OutputModel=dict(position=-1, ), ) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py000066400000000000000000000012171227300005300270620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.base import SlicerCommandLine def test_SlicerCommandLine_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SlicerCommandLine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/slicer/utilities.py000066400000000000000000000034731227300005300223220ustar00rootroot00000000000000# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class EMSegmentTransformToNewFormatInputSpec(CommandLineInputSpec): inputMRMLFileName = File(desc="Active MRML scene that contains EMSegment algorithm parameters in the format before 3.6.3 - please include absolute file name in path.", exists=True, argstr="--inputMRMLFileName %s") outputMRMLFileName = traits.Either(traits.Bool, File(), hash_files=False, desc="Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", argstr="--outputMRMLFileName %s") templateFlag = traits.Bool(desc="Set to true if the transformed mrml file should be used as template file ", argstr="--templateFlag ") class EMSegmentTransformToNewFormatOutputSpec(TraitedSpec): outputMRMLFileName = File(desc="Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", exists=True) class EMSegmentTransformToNewFormat(SEMLikeCommandLine): """title: Transform MRML Files to New EMSegmenter Standard category: Utilities description: Transform MRML Files to New EMSegmenter Standard """ input_spec = EMSegmentTransformToNewFormatInputSpec output_spec = EMSegmentTransformToNewFormatOutputSpec _cmd = "EMSegmentTransformToNewFormat " _outputs_filenames = {'outputMRMLFileName':'outputMRMLFileName.mrml'} nipype-0.9.2/nipype/interfaces/spm/000077500000000000000000000000001227300005300172445ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/spm/__init__.py000066400000000000000000000014571227300005300213640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for spm.""" from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (SliceTiming, Realign, Coregister, Normalize, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation,ResliceToReference,DicomImport) nipype-0.9.2/nipype/interfaces/spm/base.py000066400000000000000000000432461227300005300205410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The spm module provides basic functions for interfacing with SPM tools. In order to use the standalone MCR version of spm, you need to ensure that the following commands are executed at the beginning of your script:: from nipype import spm matlab_cmd = '/path/to/run_spm8.sh /path/to/Compiler_Runtime/v713/ script' spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True) you can test by calling:: spm.SPMCommand().version """ __docformat__ = 'restructuredtext' # Standard library imports import os from copy import deepcopy # Third-party imports from nibabel import load import numpy as np from scipy.io import savemat # Local imports from ..base import (BaseInterface, traits, isdefined, InputMultiPath, BaseInterfaceInputSpec, Directory, Undefined) from ..matlab import MatlabCommand from ...utils import spm_docs as sd from ... import logging logger = logging.getLogger('interface') def func_is_3d(in_file): """Checks if input functional files are 3d.""" if isinstance(in_file, list): return func_is_3d(in_file[0]) else: img = load(in_file) shape = img.get_shape() if len(shape) == 3 or (len(shape) == 4 and shape[3] == 1): return True else: return False def get_first_3dfile(in_files): if not func_is_3d(in_files): return None if isinstance(in_files[0], list): return in_files[0] return in_files def scans_for_fname(fname): """Reads a nifti file and converts it to a numpy array storing individual nifti volumes. Opens images so will fail if they are not found. """ if isinstance(fname, list): scans = np.zeros((len(fname),), dtype=object) for sno, f in enumerate(fname): scans[sno] = '%s,1' % f return scans img = load(fname) if len(img.get_shape()) == 3: return np.array(('%s,1' % fname,), dtype=object) else: n_scans = img.get_shape()[3] scans = np.zeros((n_scans,), dtype=object) for sno in range(n_scans): scans[sno] = '%s,%d' % (fname, sno + 1) return scans def scans_for_fnames(fnames, keep4d=False, separate_sessions=False): """Converts a list of files to a concatenated numpy array for each volume. keep4d : boolean keeps the entries of the numpy array as 4d files instead of extracting the individual volumes. separate_sessions: boolean if 4d nifti files are being used, then separate_sessions ensures a cell array per session is created in the structure. """ flist = None if not isinstance(fnames[0], list): if func_is_3d(fnames[0]): fnames = [fnames] if separate_sessions or keep4d: flist = np.zeros((len(fnames),), dtype=object) for i, f in enumerate(fnames): if separate_sessions: if keep4d: if isinstance(f, list): flist[i] = np.array(f, dtype=object) else: flist[i] = np.array([f], dtype=object) else: flist[i] = scans_for_fname(f) else: if keep4d: flist[i] = f else: scans = scans_for_fname(f) if flist is None: flist = scans else: flist = np.concatenate((flist, scans)) return flist class Info(object): """Handles SPM version information """ @staticmethod def version(matlab_cmd=None, paths=None, use_mcr=None): """Returns the path to the SPM directory in the Matlab path If path not found, returns None. Parameters ---------- matlab_cmd: str Sets the default matlab command. If None, the value of the environment variable SPMMCRCMD will be used if set and use_mcr is True or the environment variable FORCE_SPMMCR is set. If one of FORCE_SPMMCR or SPMMCRCMD is not set, the existence of the environment variable MATLABCMD is checked and its value is used as the matlab command if possible. If none of the above was successful, the fallback value of 'matlab -nodesktop -nosplash' will be used. paths : str use_mcr : bool Returns ------- spm_path : string representing path to SPM directory returns None of path not found """ if use_mcr or 'FORCE_SPMMCR' in os.environ: use_mcr = True if matlab_cmd is None: try: matlab_cmd = os.environ['SPMMCRCMD'] except KeyError: pass if matlab_cmd is None: try: matlab_cmd = os.environ['MATLABCMD'] except KeyError: matlab_cmd = 'matlab -nodesktop -nosplash' mlab = MatlabCommand(matlab_cmd=matlab_cmd) mlab.inputs.mfile = False if paths: mlab.inputs.paths = paths if use_mcr: mlab.inputs.nodesktop = Undefined mlab.inputs.nosplash = Undefined mlab.inputs.single_comp_thread = Undefined mlab.inputs.mfile = True mlab.inputs.uses_mcr = True mlab.inputs.script = """ if isempty(which('spm')), throw(MException('SPMCheck:NotFound','SPM not in matlab path')); end; spm_path = spm('dir'); [name, version] = spm('ver'); fprintf(1, 'NIPYPE path:%s|name:%s|release:%s', spm_path, name, version); exit; """ try: out = mlab.run() except (IOError, RuntimeError), e: # if no Matlab at all -- exception could be raised # No Matlab -- no spm logger.debug(str(e)) return None else: out = sd._strip_header(out.runtime.stdout) out_dict = {} for part in out.split('|'): key, val = part.split(':') out_dict[key] = val return out_dict def no_spm(): """ Checks if SPM is NOT installed used with nosetests skipif to skip tests that will fail if spm is not installed""" if Info.version() is None or 'NIPYPE_NO_MATLAB' in os.environ: return True else: return False class SPMCommandInputSpec(BaseInterfaceInputSpec): matlab_cmd = traits.Str(desc='matlab command to use') paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath') mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True) use_mcr = traits.Bool(desc='Run m-code using SPM MCR') use_v8struct = traits.Bool(True, min_ver='8', usedefault=True, desc=('Generate SPM8 and higher compatible jobs') ) class SPMCommand(BaseInterface): """Extends `BaseInterface` class to implement SPM specific interfaces. WARNING: Pseudo prototype class, meant to be subclassed """ input_spec = SPMCommandInputSpec _additional_metadata = ['field'] _jobtype = 'basetype' _jobname = 'basename' _matlab_cmd = None _paths = None _use_mcr = None def __init__(self, **inputs): super(SPMCommand, self).__init__(**inputs) self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd', 'mfile', 'paths', 'use_mcr']) self._find_mlab_cmd_defaults() self._check_mlab_inputs() self._matlab_cmd_update() @classmethod def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None): cls._matlab_cmd = matlab_cmd cls._paths = paths cls._use_mcr = use_mcr def _find_mlab_cmd_defaults(self): # check if the user has set environment variables to enforce # the standalone (MCR) version of SPM if self._use_mcr or 'FORCE_SPMMCR' in os.environ: self._use_mcr = True if self._matlab_cmd is None: try: self._matlab_cmd = os.environ['SPMMCRCMD'] except KeyError: pass def _matlab_cmd_update(self): # MatlabCommand has to be created here, # because matlab_cmb is not a proper input # and can be set only during init self.mlab = MatlabCommand(matlab_cmd=self.inputs.matlab_cmd, mfile=self.inputs.mfile, paths=self.inputs.paths) self.mlab.inputs.script_file = 'pyscript_%s.m' % \ self.__class__.__name__.split('.')[-1].lower() if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr: self.mlab.inputs.nodesktop = Undefined self.mlab.inputs.nosplash = Undefined self.mlab.inputs.single_comp_thread = Undefined self.mlab.inputs.uses_mcr = True self.mlab.inputs.mfile = True @property def version(self): version_dict = Info.version(matlab_cmd=self._matlab_cmd, paths=self._paths, use_mcr=self._use_mcr) if version_dict: return '.'.join((version_dict['name'].split('SPM')[-1], version_dict['release'])) return version_dict @property def jobtype(self): return self._jobtype @property def jobname(self): return self._jobname def _check_mlab_inputs(self): if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd: self.inputs.matlab_cmd = self._matlab_cmd if not isdefined(self.inputs.paths) and self._paths: self.inputs.paths = self._paths if not isdefined(self.inputs.use_mcr) and self._use_mcr: self.inputs.use_mcr = self._use_mcr def _run_interface(self, runtime): """Executes the SPM function using MATLAB.""" self.mlab.inputs.script = self._make_matlab_command( deepcopy(self._parse_inputs())) results = self.mlab.run() runtime.returncode = results.runtime.returncode if self.mlab.inputs.uses_mcr: if 'Skipped' in results.runtime.stdout: self.raise_exception(runtime) runtime.stdout = results.runtime.stdout runtime.stderr = results.runtime.stderr runtime.merged = results.runtime.merged return runtime def _list_outputs(self): """Determine the expected outputs based on inputs.""" raise NotImplementedError def _format_arg(self, opt, spec, val): """Convert input to appropriate format for SPM.""" if spec.is_trait_type(traits.Bool): return int(val) else: return val def _parse_inputs(self, skip=()): spmdict = {} metadata = dict(field=lambda t: t is not None) for name, spec in self.inputs.traits(**metadata).items(): if skip and name in skip: continue value = getattr(self.inputs, name) if not isdefined(value): continue field = spec.field if '.' in field: fields = field.split('.') dictref = spmdict for f in fields[:-1]: if f not in dictref.keys(): dictref[f] = {} dictref = dictref[f] dictref[fields[-1]] = self._format_arg(name, spec, value) else: spmdict[field] = self._format_arg(name, spec, value) return [spmdict] def _reformat_dict_for_savemat(self, contents): """Encloses a dict representation within hierarchical lists. In order to create an appropriate SPM job structure, a Python dict storing the job needs to be modified so that each dict embedded in dict needs to be enclosed as a list element. Examples -------- >>> a = SPMCommand()._reformat_dict_for_savemat(dict(a=1, ... b=dict(c=2, d=3))) >>> a == [{'a': 1, 'b': [{'c': 2, 'd': 3}]}] True """ newdict = {} try: for key, value in contents.items(): if isinstance(value, dict): if value: newdict[key] = self._reformat_dict_for_savemat(value) # if value is None, skip else: newdict[key] = value return [newdict] except TypeError: print 'Requires dict input' def _generate_job(self, prefix='', contents=None): """Recursive function to generate spm job specification as a string Parameters ---------- prefix : string A string that needs to get contents : dict A non-tuple Python structure containing spm job information gets converted to an appropriate sequence of matlab commands. """ jobstring = '' if contents is None: return jobstring if isinstance(contents, list): for i, value in enumerate(contents): if prefix.endswith(")"): newprefix = "%s,%d)" % (prefix[:-1], i + 1) else: newprefix = "%s(%d)" % (prefix, i + 1) jobstring += self._generate_job(newprefix, value) return jobstring if isinstance(contents, dict): for key, value in contents.items(): newprefix = "%s.%s" % (prefix, key) jobstring += self._generate_job(newprefix, value) return jobstring if isinstance(contents, np.ndarray): if contents.dtype == np.dtype(object): if prefix: jobstring += "%s = {...\n" % (prefix) else: jobstring += "{...\n" for i, val in enumerate(contents): if isinstance(val, np.ndarray): jobstring += self._generate_job(prefix=None, contents=val) elif isinstance(val, str): jobstring += '\'%s\';...\n' % (val) else: jobstring += '%s;...\n' % str(val) jobstring += '};\n' else: for i, val in enumerate(contents): for field in val.dtype.fields: if prefix: newprefix = "%s(%d).%s" % (prefix, i + 1, field) else: newprefix = "(%d).%s" % (i + 1, field) jobstring += self._generate_job(newprefix, val[field]) return jobstring if isinstance(contents, str): jobstring += "%s = '%s';\n" % (prefix, contents) return jobstring jobstring += "%s = %s;\n" % (prefix, str(contents)) return jobstring def _make_matlab_command(self, contents, postscript=None): """Generates a mfile to build job structure Parameters ---------- contents : list a list of dicts generated by _parse_inputs in each subclass cwd : string default os.getcwd() Returns ------- mscript : string contents of a script called by matlab """ cwd = os.getcwd() mscript = """ %% Generated by nipype.interfaces.spm if isempty(which('spm')), throw(MException('SPMCheck:NotFound', 'SPM not in matlab path')); end [name, version] = spm('ver'); fprintf('SPM version: %s Release: %s\\n',name, version); fprintf('SPM path: %s\\n', which('spm')); spm('Defaults','fMRI'); if strcmp(name, 'SPM8') || strcmp(name, 'SPM12b'), spm_jobman('initcfg'); spm_get_defaults('CmdLine', 1); end\n """ if self.mlab.inputs.mfile: if isdefined(self.inputs.use_v8struct) and self.inputs.use_v8struct: mscript += self._generate_job('jobs{1}.spm.%s.%s' % (self.jobtype, self.jobname), contents[0]) else: if self.jobname in ['st', 'smooth', 'preproc', 'preproc8', 'fmri_spec', 'fmri_est', 'factorial_design', 'defs']: # parentheses mscript += self._generate_job('jobs{1}.%s{1}.%s(1)' % (self.jobtype, self.jobname), contents[0]) else: #curly brackets mscript += self._generate_job('jobs{1}.%s{1}.%s{1}' % (self.jobtype, self.jobname), contents[0]) else: jobdef = {'jobs': [{self.jobtype: [{self.jobname: self.reformat_dict_for_savemat(contents[0])}] }]} savemat(os.path.join(cwd, 'pyjobs_%s.mat' % self.jobname), jobdef) mscript += "load pyjobs_%s;\n\n" % self.jobname mscript += """ spm_jobman(\'run\', jobs);\n """ if postscript is not None: mscript += postscript return mscript nipype-0.9.2/nipype/interfaces/spm/model.py000066400000000000000000001144441227300005300207260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The spm module provides basic functions for interfacing with matlab and spm to access spm tools. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ __docformat__ = 'restructuredtext' # Standard library imports import os from glob import glob # Third-party imports import numpy as np import scipy.io as sio # Local imports from nipype.interfaces.base import (Bunch, traits, TraitedSpec, File, Directory, OutputMultiPath, InputMultiPath, isdefined) from nipype.interfaces.spm.base import (SPMCommand, SPMCommandInputSpec, scans_for_fnames) from nipype.utils.filemanip import (filename_to_list, list_to_filename, split_filename) from ... import logging logger = logging.getLogger('interface') class Level1DesignInputSpec(SPMCommandInputSpec): spm_mat_dir = Directory(exists=True, field='dir', desc='directory to store SPM.mat file (opt)') timing_units = traits.Enum('secs', 'scans', field='timing.units', desc='units for specification of onsets', mandatory=True) interscan_interval = traits.Float(field='timing.RT', desc='Interscan interval in secs', mandatory=True) microtime_resolution = traits.Int(field='timing.fmri_t', desc='Number of time-bins per scan in secs (opt)') microtime_onset = traits.Float(field='timing.fmri_t0', desc='The onset/time-bin in seconds for alignment (opt)') session_info = traits.Any(field='sess', desc='Session specific information generated by ``modelgen.SpecifyModel``', mandatory=True) factor_info = traits.List(traits.Dict(traits.Enum('name', 'levels')), field='fact', desc='Factor specific information file (opt)') bases = traits.Dict(traits.Enum('hrf', 'fourier', 'fourier_han', 'gamma', 'fir'), field='bases', desc=""" dict {'name':{'basesparam1':val,...}} name : string Name of basis function (hrf, fourier, fourier_han, gamma, fir) hrf : derivs : 2-element list Model HRF Derivatives. No derivatives: [0,0], Time derivatives : [1,0], Time and Dispersion derivatives: [1,1] fourier, fourier_han, gamma, fir: length : int Post-stimulus window length (in seconds) order : int Number of basis functions """, mandatory=True) volterra_expansion_order = traits.Enum(1, 2, field='volt', desc='Model interactions - yes:1, no:2 (opt)') global_intensity_normalization = traits.Enum('none', 'scaling', field='global', desc='Global intensity normalization - scaling or none (opt)') mask_image = File(exists=True, field='mask', desc='Image for explicitly masking the analysis (opt)') mask_threshold = traits.Either(traits.Enum('-Inf'), traits.Float(), desc="Thresholding for the mask (opt, '-Inf')", default='-Inf', usedefault=True) model_serial_correlations = traits.Enum('AR(1)', 'none', field='cvi', desc='Model serial correlations AR(1) or none (opt)') class Level1DesignOutputSpec(TraitedSpec): spm_mat_file = File(exists=True, desc='SPM mat file') class Level1Design(SPMCommand): """Generate an SPM design matrix http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=61 Examples -------- >>> level1design = Level1Design() >>> level1design.inputs.timing_units = 'secs' >>> level1design.inputs.interscan_interval = 2.5 >>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} >>> level1design.inputs.session_info = 'session_info.npz' >>> level1design.run() # doctest: +SKIP """ input_spec = Level1DesignInputSpec output_spec = Level1DesignOutputSpec _jobtype = 'stats' _jobname = 'fmri_spec' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['spm_mat_dir', 'mask_image']: return np.array([str(val)], dtype=object) if opt in ['session_info']: #, 'factor_info']: if isinstance(val, dict): return [val] else: return val return super(Level1Design, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(Level1Design, self)._parse_inputs(skip=('mask_threshold')) for sessinfo in einputs[0]['sess']: sessinfo['scans'] = scans_for_fnames(filename_to_list(sessinfo['scans']), keep4d=False) if not isdefined(self.inputs.spm_mat_dir): einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object) return einputs def _make_matlab_command(self, content): """validates spm options and generates job structure if mfile is True uses matlab .m file else generates a job structure and saves in .mat """ if isdefined(self.inputs.mask_image): # SPM doesn't handle explicit masking properly, especially # when you want to use the entire mask image postscript = "load SPM;\n" postscript += "SPM.xM.VM = spm_vol('%s');\n" % list_to_filename(self.inputs.mask_image) postscript += "SPM.xM.I = 0;\n" postscript += "SPM.xM.T = [];\n" postscript += "SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n" % self.inputs.mask_threshold postscript += "SPM.xM.xs = struct('Masking', 'explicit masking only');\n" postscript += "save SPM SPM;\n" else: postscript = None return super(Level1Design, self)._make_matlab_command(content, postscript=postscript) def _list_outputs(self): outputs = self._outputs().get() spm = os.path.join(os.getcwd(), 'SPM.mat') outputs['spm_mat_file'] = spm return outputs class EstimateModelInputSpec(SPMCommandInputSpec): spm_mat_file = File(exists=True, field='spmmat', desc='absolute path to SPM.mat', copyfile=True, mandatory=True) estimation_method = traits.Dict(traits.Enum('Classical', 'Bayesian2', 'Bayesian'), field='method', desc='Classical, Bayesian2, Bayesian (dict)', mandatory=True) flags = traits.Str(desc='optional arguments (opt)') class EstimateModelOutputSpec(TraitedSpec): mask_image = File(exists=True, desc='binary mask to constrain estimation') beta_images = OutputMultiPath(File(exists=True), desc='design parameter estimates') residual_image = File(exists=True, desc='Mean-squared image of the residuals') RPVimage = File(exists=True, desc='Resels per voxel image') spm_mat_file = File(exists=True, desc='Updated SPM mat file') class EstimateModel(SPMCommand): """Use spm_spm to estimate the parameters of a model http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=71 Examples -------- >>> est = EstimateModel() >>> est.inputs.spm_mat_file = 'SPM.mat' >>> est.run() # doctest: +SKIP """ input_spec = EstimateModelInputSpec output_spec = EstimateModelOutputSpec _jobtype = 'stats' _jobname = 'fmri_est' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'spm_mat_file': return np.array([str(val)], dtype=object) if opt == 'estimation_method': if isinstance(val, str): return {'%s' % val: 1} else: return val return super(EstimateModel, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(EstimateModel, self)._parse_inputs(skip=('flags')) if isdefined(self.inputs.flags): einputs[0].update(self.inputs.flags) return einputs def _list_outputs(self): outputs = self._outputs().get() pth, _ = os.path.split(self.inputs.spm_mat_file) mask = os.path.join(pth, 'mask.img') outputs['mask_image'] = mask spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) betas = [] for vbeta in spm['SPM'][0, 0].Vbeta[0]: betas.append(str(os.path.join(pth, vbeta.fname[0]))) if betas: outputs['beta_images'] = betas resms = os.path.join(pth, 'ResMS.img') outputs['residual_image'] = resms rpv = os.path.join(pth, 'RPV.img') outputs['RPVimage'] = rpv spm = os.path.join(pth, 'SPM.mat') outputs['spm_mat_file'] = spm return outputs class EstimateContrastInputSpec(SPMCommandInputSpec): spm_mat_file = File(exists=True, field='spmmat', desc='Absolute path to SPM.mat', copyfile=True, mandatory=True) contrasts = traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('F'), traits.List(traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float)))))), desc="""List of contrasts with each contrast being a list of the form: [('name', 'stat', [condition list], [weight list], [session list])]. if session list is None or not provided, all sessions are used. For F contrasts, the condition list should contain previously defined T-contrasts.""", mandatory=True) beta_images = InputMultiPath(File(exists=True), desc='Parameter estimates of the design matrix', copyfile=False, mandatory=True) residual_image = File(exists=True, desc='Mean-squared image of the residuals', copyfile=False, mandatory=True) use_derivs = traits.Bool(desc='use derivatives for estimation', xor=['group_contrast']) group_contrast = traits.Bool(desc='higher level contrast', xor=['use_derivs']) class EstimateContrastOutputSpec(TraitedSpec): con_images = OutputMultiPath(File(exists=True), desc='contrast images from a t-contrast') spmT_images = OutputMultiPath(File(exists=True), desc='stat images from a t-contrast') ess_images = OutputMultiPath(File(exists=True), desc='contrast images from an F-contrast') spmF_images = OutputMultiPath(File(exists=True), desc='stat images from an F-contrast') spm_mat_file = File(exists=True, desc='Updated SPM mat file') class EstimateContrast(SPMCommand): """use spm_contrasts to estimate contrasts of interest Examples -------- >>> import nipype.interfaces.spm as spm >>> est = spm.EstimateContrast() >>> est.inputs.spm_mat_file = 'SPM.mat' >>> cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) >>> cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) >>> contrasts = [cont1,cont2] >>> est.inputs.contrasts = contrasts >>> est.run() # doctest: +SKIP """ input_spec = EstimateContrastInputSpec output_spec = EstimateContrastOutputSpec _jobtype = 'stats' _jobname = 'con' def _make_matlab_command(self, _): """validates spm options and generates job structure """ contrasts = [] cname = [] for i, cont in enumerate(self.inputs.contrasts): cname.insert(i, cont[0]) contrasts.insert(i, Bunch(name=cont[0], stat=cont[1], conditions=cont[2], weights=None, sessions=None)) if len(cont) >= 4: contrasts[i].weights = cont[3] if len(cont) >= 5: contrasts[i].sessions = cont[4] script = "% generated by nipype.interfaces.spm\n" script += "spm_defaults;\n" script += "jobs{1}.stats{1}.con.spmmat = {'%s'};\n" % self.inputs.spm_mat_file script += "load(jobs{1}.stats{1}.con.spmmat{:});\n" script += "SPM.swd = '%s';\n" % os.getcwd() script += "save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n" script += "names = SPM.xX.name;\n" # get names for columns if isdefined(self.inputs.group_contrast) and self.inputs.group_contrast: script += "condnames=names;\n" else: if self.inputs.use_derivs: script += "pat = 'Sn\([0-9]*\) (.*)';\n" else: script += "pat = 'Sn\([0-9]*\) (.*)\*bf\(1\)|Sn\([0-9]*\) .*\*bf\([2-9]\)|Sn\([0-9]*\) (.*)';\n" script += "t = regexp(names,pat,'tokens');\n" # get sessidx for columns script += "pat1 = 'Sn\(([0-9].*)\)\s.*';\n" script += "t1 = regexp(names,pat1,'tokens');\n" script += "for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if ~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n" # BUILD CONTRAST SESSION STRUCTURE for i, contrast in enumerate(contrasts): if contrast.stat == 'T': script += "consess{%d}.tcon.name = '%s';\n" % (i + 1, contrast.name) script += "consess{%d}.tcon.convec = zeros(1,numel(names));\n" % (i + 1) for c0, cond in enumerate(contrast.conditions): script += "idx = strmatch('%s',condnames,'exact');\n" % (cond) script += "if isempty(idx), throw(MException('CondName:Chk', sprintf('Condition %%s not found in design','%s'))); end;\n" % cond if contrast.sessions: for sno, sw in enumerate(contrast.sessions): script += "sidx = find(condsess(idx)==%d);\n" % (sno + 1) script += "consess{%d}.tcon.convec(idx(sidx)) = %f;\n" % (i + 1, sw * contrast.weights[c0]) else: script += "consess{%d}.tcon.convec(idx) = %f;\n" % (i + 1, contrast.weights[c0]) for i, contrast in enumerate(contrasts): if contrast.stat == 'F': script += "consess{%d}.fcon.name = '%s';\n" % (i + 1, contrast.name) for cl0, fcont in enumerate(contrast.conditions): try: tidx = cname.index(fcont[0]) except: Exception("Contrast Estimate: could not get index of" \ " T contrast. probably not defined prior " \ "to the F contrasts") script += "consess{%d}.fcon.convec{%d} = consess{%d}.tcon.convec;\n" % (i + 1, cl0 + 1, tidx + 1) script += "jobs{1}.stats{1}.con.consess = consess;\n" script += "if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');jobs=spm_jobman('spm5tospm8',{jobs});end\n" script += "spm_jobman('run',jobs);" return script def _list_outputs(self): outputs = self._outputs().get() pth, _ = os.path.split(self.inputs.spm_mat_file) spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) con_images = [] spmT_images = [] for con in spm['SPM'][0, 0].xCon[0]: con_images.append(str(os.path.join(pth, con.Vcon[0, 0].fname[0]))) spmT_images.append(str(os.path.join(pth, con.Vspm[0, 0].fname[0]))) if con_images: outputs['con_images'] = con_images outputs['spmT_images'] = spmT_images ess = glob(os.path.join(pth, 'ess*.img')) if len(ess) > 0: outputs['ess_images'] = sorted(ess) spmf = glob(os.path.join(pth, 'spmF*.img')) if len(spmf) > 0: outputs['spmF_images'] = sorted(spmf) outputs['spm_mat_file'] = self.inputs.spm_mat_file return outputs class ThresholdInputSpec(SPMCommandInputSpec): spm_mat_file = File(exists=True, desc='absolute path to SPM.mat', copyfile=True, mandatory=True) stat_image = File(exists=True, desc='stat image', copyfile=False, mandatory=True) contrast_index = traits.Int(mandatory=True, desc='which contrast in the SPM.mat to use') use_fwe_correction = traits.Bool(True, usedefault=True, desc="whether to use FWE (Bonferroni) correction for initial threshold (height_threshold_type has to be set to p-value)") use_topo_fdr = traits.Bool(True, usedefault=True, desc="whether to use FDR over cluster extent probabilities") height_threshold = traits.Float(0.05, usedefault=True, desc="value for initial thresholding (defining clusters)") height_threshold_type = traits.Enum('p-value', 'stat', usedefault=True, desc="Is the cluster forming threshold a stat value or p-value?") extent_fdr_p_threshold = traits.Float(0.05, usedefault=True, desc='p threshold on FDR corrected cluster size probabilities') extent_threshold = traits.Int(0, usedefault=True, desc="Minimum cluster size in voxels") force_activation = traits.Bool(False, usedefault=True, desc="In case no clusters survive the topological inference step this will pick a culster with the highes sum of t-values. Use with care.") class ThresholdOutputSpec(TraitedSpec): thresholded_map = File(exists=True) n_clusters = traits.Int() pre_topo_fdr_map = File(exists=True) pre_topo_n_clusters = traits.Int() activation_forced = traits.Bool() cluster_forming_thr = traits.Float() class Threshold(SPMCommand): '''Topological FDR thresholding based on cluster extent/size. Smoothness is estimated from GLM residuals but is assumed to be the same for all of the voxels. Examples -------- >>> thresh = Threshold() >>> thresh.inputs.spm_mat_file = 'SPM.mat' >>> thresh.inputs.stat_image = 'spmT_0001.img' >>> thresh.inputs.contrast_index = 1 >>> thresh.inputs.extent_fdr_p_threshold = 0.05 >>> thresh.run() # doctest: +SKIP ''' input_spec = ThresholdInputSpec output_spec = ThresholdOutputSpec def _gen_thresholded_map_filename(self): _, fname, ext = split_filename(self.inputs.stat_image) return os.path.abspath(fname + "_thr" + ext) def _gen_pre_topo_map_filename(self): _, fname, ext = split_filename(self.inputs.stat_image) return os.path.abspath(fname + "_pre_topo_thr" + ext) def _make_matlab_command(self, _): script = "con_index = %d;\n" % self.inputs.contrast_index script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold if self.inputs.use_fwe_correction: script += "thresDesc = 'FWE';\n" else: script += "thresDesc = 'none';\n" if self.inputs.use_topo_fdr: script += "use_topo_fdr = 1;\n" else: script += "use_topo_fdr = 0;\n" if self.inputs.force_activation: script += "force_activation = 1;\n" else: script += "force_activation = 0;\n" script += "cluster_extent_p_fdr_thr = %f;\n" % self.inputs.extent_fdr_p_threshold script += "stat_filename = '%s';\n" % self.inputs.stat_image script += "height_threshold_type = '%s';\n" % self.inputs.height_threshold_type script += "extent_threshold = %d;\n" % self.inputs.extent_threshold script += "load %s;\n" % self.inputs.spm_mat_file script += """ FWHM = SPM.xVol.FWHM; df = [SPM.xCon(con_index).eidf SPM.xX.erdf]; STAT = SPM.xCon(con_index).STAT; R = SPM.xVol.R; S = SPM.xVol.S; n = 1; switch thresDesc case 'FWE' cluster_forming_thr = spm_uc(cluster_forming_thr,df,STAT,R,n,S); case 'none' if strcmp(height_threshold_type, 'p-value') cluster_forming_thr = spm_u(cluster_forming_thr^(1/n),df,STAT); end end stat_map_vol = spm_vol(stat_filename); [stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol); Z = stat_map_data(:)'; [x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))'); XYZ = cat(1, x', y', z'); XYZth = XYZ(:, Z >= cluster_forming_thr); Zth = Z(Z >= cluster_forming_thr); """ script += "spm_write_filtered(Zth,XYZth,stat_map_vol.dim',stat_map_vol.mat,'thresholded map', '%s');\n" % self._gen_pre_topo_map_filename() script += """ max_size = 0; max_size_index = 0; th_nclusters = 0; nclusters = 0; if isempty(XYZth) thresholded_XYZ = []; thresholded_Z = []; else if use_topo_fdr V2R = 1/prod(FWHM(stat_map_vol.dim > 1)); [uc,Pc,ue] = spm_uc_clusterFDR(cluster_extent_p_fdr_thr,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr); end voxel_labels = spm_clusters(XYZth); nclusters = max(voxel_labels); thresholded_XYZ = []; thresholded_Z = []; for i = 1:nclusters cluster_size = sum(voxel_labels==i); if cluster_size > extent_threshold && (~use_topo_fdr || (cluster_size - uc) > -1) thresholded_XYZ = cat(2, thresholded_XYZ, XYZth(:,voxel_labels == i)); thresholded_Z = cat(2, thresholded_Z, Zth(voxel_labels == i)); th_nclusters = th_nclusters + 1; end if force_activation cluster_sum = sum(Zth(voxel_labels == i)); if cluster_sum > max_size max_size = cluster_sum; max_size_index = i; end end end end activation_forced = 0; if isempty(thresholded_XYZ) if force_activation && max_size ~= 0 thresholded_XYZ = XYZth(:,voxel_labels == max_size_index); thresholded_Z = Zth(voxel_labels == max_size_index); th_nclusters = 1; activation_forced = 1; else thresholded_Z = [0]; thresholded_XYZ = [1 1 1]'; th_nclusters = 0; end end fprintf('activation_forced = %d\\n',activation_forced); fprintf('pre_topo_n_clusters = %d\\n',nclusters); fprintf('n_clusters = %d\\n',th_nclusters); fprintf('cluster_forming_thr = %f\\n',cluster_forming_thr); """ script += "spm_write_filtered(thresholded_Z,thresholded_XYZ,stat_map_vol.dim',stat_map_vol.mat,'thresholded map', '%s');\n" % self._gen_thresholded_map_filename() return script def aggregate_outputs(self, runtime=None): outputs = self._outputs() setattr(outputs, 'thresholded_map', self._gen_thresholded_map_filename()) setattr(outputs, 'pre_topo_fdr_map', self._gen_pre_topo_map_filename()) for line in runtime.stdout.split('\n'): if line.startswith("activation_forced = "): setattr(outputs, 'activation_forced', line[len("activation_forced = "):].strip() == "1") elif line.startswith("n_clusters = "): setattr(outputs, 'n_clusters', int(line[len("n_clusters = "):].strip())) elif line.startswith("pre_topo_n_clusters = "): setattr(outputs, 'pre_topo_n_clusters', int(line[len("pre_topo_n_clusters = "):].strip())) elif line.startswith("cluster_forming_thr = "): setattr(outputs, 'cluster_forming_thr', float(line[len("cluster_forming_thr = "):].strip())) return outputs def _list_outputs(self): outputs = self._outputs().get() outputs['thresholded_map'] = self._gen_thresholded_map_filename() outputs['pre_topo_fdr_map'] = self._gen_pre_topo_map_filename() return outputs class ThresholdStatisticsInputSpec(SPMCommandInputSpec): spm_mat_file = File(exists=True, desc='absolute path to SPM.mat', copyfile=True, mandatory=True) stat_image = File(exists=True, desc='stat image', copyfile=False, mandatory=True) contrast_index = traits.Int(mandatory=True, desc='which contrast in the SPM.mat to use') height_threshold = traits.Float(desc="stat value for initial thresholding (defining clusters)", mandatory=True) extent_threshold = traits.Int(0, usedefault=True, desc="Minimum cluster size in voxels") class ThresholdStatisticsOutputSpec(TraitedSpec): voxelwise_P_Bonf = traits.Float() voxelwise_P_RF = traits.Float() voxelwise_P_uncor = traits.Float() voxelwise_P_FDR = traits.Float() clusterwise_P_RF = traits.Float() clusterwise_P_FDR = traits.Float() class ThresholdStatistics(SPMCommand): '''Given height and cluster size threshold calculate theoretical probabilities concerning false positives Examples -------- >>> thresh = ThresholdStatistics() >>> thresh.inputs.spm_mat_file = 'SPM.mat' >>> thresh.inputs.stat_image = 'spmT_0001.img' >>> thresh.inputs.contrast_index = 1 >>> thresh.inputs.height_threshold = 4.56 >>> thresh.run() # doctest: +SKIP ''' input_spec = ThresholdStatisticsInputSpec output_spec = ThresholdStatisticsOutputSpec def _make_matlab_command(self, _): script = "con_index = %d;\n" % self.inputs.contrast_index script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold script += "stat_filename = '%s';\n" % self.inputs.stat_image script += "extent_threshold = %d;\n" % self.inputs.extent_threshold script += "load '%s'\n" % self.inputs.spm_mat_file script += """ FWHM = SPM.xVol.FWHM; df = [SPM.xCon(con_index).eidf SPM.xX.erdf]; STAT = SPM.xCon(con_index).STAT; R = SPM.xVol.R; S = SPM.xVol.S; n = 1; voxelwise_P_Bonf = spm_P_Bonf(cluster_forming_thr,df,STAT,S,n) voxelwise_P_RF = spm_P_RF(1,0,cluster_forming_thr,df,STAT,R,n) stat_map_vol = spm_vol(stat_filename); [stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol); Z = stat_map_data(:); Zum = Z; switch STAT case 'Z' VPs = (1-spm_Ncdf(Zum)).^n; voxelwise_P_uncor = (1-spm_Ncdf(cluster_forming_thr)).^n case 'T' VPs = (1 - spm_Tcdf(Zum,df(2))).^n; voxelwise_P_uncor = (1 - spm_Tcdf(cluster_forming_thr,df(2))).^n case 'X' VPs = (1-spm_Xcdf(Zum,df(2))).^n; voxelwise_P_uncor = (1-spm_Xcdf(cluster_forming_thr,df(2))).^n case 'F' VPs = (1 - spm_Fcdf(Zum,df)).^n; voxelwise_P_uncor = (1 - spm_Fcdf(cluster_forming_thr,df)).^n end VPs = sort(VPs); voxelwise_P_FDR = spm_P_FDR(cluster_forming_thr,df,STAT,n,VPs) V2R = 1/prod(FWHM(stat_map_vol.dim > 1)); clusterwise_P_RF = spm_P_RF(1,extent_threshold*V2R,cluster_forming_thr,df,STAT,R,n) [x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))'); XYZ = cat(1, x', y', z'); [u, CPs, ue] = spm_uc_clusterFDR(0.05,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr); clusterwise_P_FDR = spm_P_clusterFDR(extent_threshold*V2R,df,STAT,R,n,cluster_forming_thr,CPs') """ return script def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() cur_output = "" for line in runtime.stdout.split('\n'): if cur_output != "" and len(line.split()) != 0: setattr(outputs, cur_output, float(line)) cur_output = "" continue if len(line.split()) != 0 and line.split()[0] in ["clusterwise_P_FDR", "clusterwise_P_RF", "voxelwise_P_Bonf", "voxelwise_P_FDR", "voxelwise_P_RF", "voxelwise_P_uncor"]: cur_output = line.split()[0] continue return outputs class FactorialDesignInputSpec(SPMCommandInputSpec): spm_mat_dir = Directory(exists=True, field='dir', desc='directory to store SPM.mat file (opt)') # really need to make an alias of InputMultiPath because the inputs below are not Path covariates = InputMultiPath(traits.Dict(key_trait=traits.Enum('vector', 'name', 'interaction', 'centering')), field='cov', desc='covariate dictionary {vector, name, interaction, centering}') threshold_mask_none = traits.Bool(field='masking.tm.tm_none', xor=['threshold_mask_absolute', 'threshold_mask_relative'], desc='do not use threshold masking') threshold_mask_absolute = traits.Float(field='masking.tm.tma.athresh', xor=['threshold_mask_none', 'threshold_mask_relative'], desc='use an absolute threshold') threshold_mask_relative = traits.Float(field='masking.tm.tmr.rthresh', xor=['threshold_mask_absolute', 'threshold_mask_none'], desc='threshold using a proportion of the global value') use_implicit_threshold = traits.Bool(field='masking.im', desc='use implicit mask NaNs or zeros to threshold') explicit_mask_file = File(field='masking.em', #requires cell desc='use an implicit mask file to threshold') global_calc_omit = traits.Bool(field='globalc.g_omit', xor=['global_calc_mean', 'global_calc_values'], desc='omit global calculation') global_calc_mean = traits.Bool(field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], desc='use mean for global calculation') global_calc_values = traits.List(traits.Float, field='globalc.g_user.global_uval', xor=['global_calc_mean', 'global_calc_omit'], desc='omit global calculation') no_grand_mean_scaling = traits.Bool(field='globalm.gmsca.gmsca_no', desc='do not perform grand mean scaling') global_normalization = traits.Enum(1, 2, 3, field='globalm.glonorm', desc='global normalization None-1, Proportional-2, ANCOVA-3') class FactorialDesignOutputSpec(TraitedSpec): spm_mat_file = File(exists=True, desc='SPM mat file') class FactorialDesign(SPMCommand): """Base class for factorial designs http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=79 """ input_spec = FactorialDesignInputSpec output_spec = FactorialDesignOutputSpec _jobtype = 'stats' _jobname = 'factorial_design' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['spm_mat_dir', 'explicit_mask_file']: return np.array([str(val)], dtype=object) if opt in ['covariates']: outlist = [] mapping = {'name': 'cname', 'vector': 'c', 'interaction': 'iCFI', 'centering': 'iCC'} for dictitem in val: outdict = {} for key, keyval in dictitem.items(): outdict[mapping[key]] = keyval outlist.append(outdict) return outlist return super(FactorialDesign, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(FactorialDesign, self)._parse_inputs() if not isdefined(self.inputs.spm_mat_dir): einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object) return einputs def _list_outputs(self): outputs = self._outputs().get() spm = os.path.join(os.getcwd(), 'SPM.mat') outputs['spm_mat_file'] = spm return outputs class OneSampleTTestDesignInputSpec(FactorialDesignInputSpec): in_files = traits.List(File(exists=True), field='des.t1.scans', mandatory=True, minlen=2, desc='input files') class OneSampleTTestDesign(FactorialDesign): """Create SPM design for one sample t-test Examples -------- >>> ttest = OneSampleTTestDesign() >>> ttest.inputs.in_files = ['cont1.nii', 'cont2.nii'] >>> ttest.run() # doctest: +SKIP """ input_spec = OneSampleTTestDesignInputSpec def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['in_files']: return np.array(val, dtype=object) return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val) class TwoSampleTTestDesignInputSpec(FactorialDesignInputSpec): # very unlikely that you will have a single image in one group, so setting # parameters to require at least two files in each group [SG] group1_files = traits.List(File(exists=True), field='des.t2.scans1', mandatory=True, minlen=2, desc='Group 1 input files') group2_files = traits.List(File(exists=True), field='des.t2.scans2', mandatory=True, minlen=2, desc='Group 2 input files') dependent = traits.Bool(field='des.t2.dept', desc='Are the measurements dependent between levels') unequal_variance = traits.Bool(field='des.t2.variance', desc='Are the variances equal or unequal between groups') class TwoSampleTTestDesign(FactorialDesign): """Create SPM design for two sample t-test Examples -------- >>> ttest = TwoSampleTTestDesign() >>> ttest.inputs.group1_files = ['cont1.nii', 'cont2.nii'] >>> ttest.inputs.group2_files = ['cont1a.nii', 'cont2a.nii'] >>> ttest.run() # doctest: +SKIP """ input_spec = TwoSampleTTestDesignInputSpec def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['group1_files', 'group2_files']: return np.array(val, dtype=object) return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val) class PairedTTestDesignInputSpec(FactorialDesignInputSpec): paired_files = traits.List(traits.List(File(exists=True), minlen=2, maxlen=2), field='des.pt.pair', mandatory=True, minlen=2, desc='List of paired files') grand_mean_scaling = traits.Bool(field='des.pt.gmsca', desc='Perform grand mean scaling') ancova = traits.Bool(field='des.pt.ancova', desc='Specify ancova-by-factor regressors') class PairedTTestDesign(FactorialDesign): """Create SPM design for paired t-test Examples -------- >>> pttest = PairedTTestDesign() >>> pttest.inputs.paired_files = [['cont1.nii','cont1a.nii'],['cont2.nii','cont2a.nii']] >>> pttest.run() # doctest: +SKIP """ input_spec = PairedTTestDesignInputSpec def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['paired_files']: return [dict(scans=np.array(files, dtype=object)) for files in val] return super(PairedTTestDesign, self)._format_arg(opt, spec, val) class MultipleRegressionDesignInputSpec(FactorialDesignInputSpec): in_files = traits.List(File(exists=True), field='des.mreg.scans', mandatory=True, minlen=2, desc='List of files') include_intercept = traits.Bool(True, field='des.mreg.incint', usedefault=True, desc='Include intercept in design') user_covariates = InputMultiPath(traits.Dict(key_trait=traits.Enum('vector', 'name', 'centering')), field='des.mreg.mcov', desc='covariate dictionary {vector, name, centering}') class MultipleRegressionDesign(FactorialDesign): """Create SPM design for multiple regression Examples -------- >>> mreg = MultipleRegressionDesign() >>> mreg.inputs.in_files = ['cont1.nii','cont2.nii'] >>> mreg.run() # doctest: +SKIP """ input_spec = MultipleRegressionDesignInputSpec def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['in_files']: return np.array(val, dtype=object) if opt in ['user_covariates']: outlist = [] mapping = {'name': 'cname', 'vector': 'c', 'centering': 'iCC'} for dictitem in val: outdict = {} for key, keyval in dictitem.items(): outdict[mapping[key]] = keyval outlist.append(outdict) return outlist return super(MultipleRegressionDesign, self)._format_arg(opt, spec, val) nipype-0.9.2/nipype/interfaces/spm/preprocess.py000066400000000000000000002043011227300005300220030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """SPM wrappers for preprocessing data Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ __docformat__ = 'restructuredtext' # Standard library imports from copy import deepcopy import os # Third-party imports import numpy as np # Local imports from nipype.interfaces.base import (OutputMultiPath, TraitedSpec, isdefined, traits, InputMultiPath, File) from nipype.interfaces.spm.base import (SPMCommand, scans_for_fname, func_is_3d, scans_for_fnames, SPMCommandInputSpec) from nipype.utils.filemanip import (fname_presuffix, filename_to_list, list_to_filename, split_filename) class SliceTimingInputSpec(SPMCommandInputSpec): in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), field='scans', desc='list of filenames to apply slice timing', mandatory=True, copyfile=False) num_slices = traits.Int(field='nslices', desc='number of slices in a volume', mandatory=True) time_repetition = traits.Float(field='tr', desc=('time between volume acquisitions ' '(start to start time)'), mandatory=True) time_acquisition = traits.Float(field='ta', desc=('time of volume acquisition. usually ' 'calculated as TR-(TR/num_slices)'), mandatory=True) slice_order = traits.List(traits.Int(), field='so', desc='1-based order in which slices are acquired', mandatory=True) ref_slice = traits.Int(field='refslice', desc='1-based Number of the reference slice', mandatory=True) out_prefix = traits.String('a', field='prefix', usedefault=True, desc='slicetimed output prefix') class SliceTimingOutputSpec(TraitedSpec): timecorrected_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), desc='slice time corrected files') class SliceTiming(SPMCommand): """Use spm to perform slice timing correction. http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=19 Examples -------- >>> from nipype.interfaces.spm import SliceTiming >>> st = SliceTiming() >>> st.inputs.in_files = 'functional.nii' >>> st.inputs.num_slices = 32 >>> st.inputs.time_repetition = 6.0 >>> st.inputs.time_acquisition = 6. - 6./32. >>> st.inputs.slice_order = range(32,0,-1) >>> st.inputs.ref_slice = 1 >>> st.run() # doctest: +SKIP """ input_spec = SliceTimingInputSpec output_spec = SliceTimingOutputSpec _jobtype = 'temporal' _jobname = 'st' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'in_files': return scans_for_fnames(filename_to_list(val), keep4d=False, separate_sessions=True) return super(SliceTiming, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['timecorrected_files'] = [] filelist = filename_to_list(self.inputs.in_files) for f in filelist: if isinstance(f, list): run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f] else: run = fname_presuffix(f, prefix=self.inputs.out_prefix) outputs['timecorrected_files'].append(run) return outputs class RealignInputSpec(SPMCommandInputSpec): in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), field='data', mandatory=True, copyfile=True, desc='list of filenames to realign') jobtype = traits.Enum('estwrite', 'estimate', 'write', desc='one of: estimate, write, estwrite', usedefault=True) quality = traits.Range(low=0.0, high=1.0, field='eoptions.quality', desc='0.1 = fast, 1.0 = precise') fwhm = traits.Range(low=0.0, field='eoptions.fwhm', desc='gaussian smoothing kernel width') separation = traits.Range(low=0.0, field='eoptions.sep', desc='sampling separation in mm') register_to_mean = traits.Bool(True, field='eoptions.rtm', mandatory=True, usedefault=True, desc='Indicate whether realignment is done to the mean image') weight_img = File(exists=True, field='eoptions.weight', desc='filename of weighting image') interp = traits.Range(low=0, high=7, field='eoptions.interp', desc='degree of b-spline used for interpolation') wrap = traits.List(traits.Int(), minlen=3, maxlen=3, field='eoptions.wrap', desc='Check if interpolation should wrap in [x,y,z]') write_which = traits.ListInt([2, 1], field='roptions.which', minlen=2, maxlen=2, usedefault=True, desc='determines which images to reslice') write_interp = traits.Range(low=0, high=7, field='roptions.interp', desc='degree of b-spline used for interpolation') write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3, field='roptions.wrap', desc='Check if interpolation should wrap in [x,y,z]') write_mask = traits.Bool(field='roptions.mask', desc='True/False mask output image') out_prefix = traits.String('r', field='roptions.prefix', usedefault=True, desc='realigned output prefix') class RealignOutputSpec(TraitedSpec): mean_image = File(exists=True, desc='Mean image file from the realignment') modified_in_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), desc='Copies of all files passed to in_files.\ Headers will have been modified to align all\ images with the first, or optionally to first\ do that, extract a mean image, and re-align to\ that mean image.') realigned_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), desc='If jobtype is write or estwrite, these will be the\ resliced files. Otherwise, they will be copies of\ in_files that have had their headers rewritten.') realignment_parameters = OutputMultiPath(File(exists=True), desc='Estimated translation and rotation parameters') class Realign(SPMCommand): """Use spm_realign for estimating within modality rigid body alignment http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25 Examples -------- >>> import nipype.interfaces.spm as spm >>> realign = spm.Realign() >>> realign.inputs.in_files = 'functional.nii' >>> realign.inputs.register_to_mean = True >>> realign.run() # doctest: +SKIP """ input_spec = RealignInputSpec output_spec = RealignOutputSpec _jobtype = 'spatial' _jobname = 'realign' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'in_files': return scans_for_fnames(val, keep4d=True, separate_sessions=True) return super(Realign, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(Realign, self)._parse_inputs() return [{'%s' % (self.inputs.jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() resliced_all = self.inputs.write_which[0] > 0 resliced_mean = self.inputs.write_which[1] > 0 if isdefined(self.inputs.in_files): outputs['realignment_parameters'] = [] for imgf in self.inputs.in_files: if isinstance(imgf, list): tmp_imgf = imgf[0] else: tmp_imgf = imgf outputs['realignment_parameters'].append(fname_presuffix(tmp_imgf, prefix='rp_', suffix='.txt', use_ext=False)) if not isinstance(imgf, list) and func_is_3d(imgf): break if self.inputs.jobtype == "estimate": outputs['realigned_files'] = self.inputs.in_files if self.inputs.jobtype == "estimate" or self.inputs.jobtype == "estwrite": outputs['modified_in_files'] = self.inputs.in_files if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite": if isinstance(self.inputs.in_files[0], list): first_image = self.inputs.in_files[0][0] else: first_image = self.inputs.in_files[0] if resliced_mean: outputs['mean_image'] = fname_presuffix(first_image, prefix='mean') if resliced_all: outputs['realigned_files'] = [] for idx, imgf in enumerate(filename_to_list(self.inputs.in_files)): realigned_run = [] if isinstance(imgf, list): for i, inner_imgf in enumerate(filename_to_list(imgf)): newfile = fname_presuffix(inner_imgf, prefix=self.inputs.out_prefix) if os.path.exists(newfile): realigned_run.append(newfile) continue if (idx == 0) and (i == 0) and \ func_is_3d(inner_imgf): realigned_run.append(fname_presuffix(inner_imgf, prefix='')) else: realigned_run = fname_presuffix(imgf, prefix=self.inputs.out_prefix) if (idx == 0) and func_is_3d(imgf): realigned_run = fname_presuffix(imgf, prefix='') outputs['realigned_files'].append(realigned_run) return outputs class CoregisterInputSpec(SPMCommandInputSpec): target = File(exists=True, field='ref', mandatory=True, desc='reference file to register to', copyfile=False) source = InputMultiPath(File(exists=True), field='source', desc='file to register to target', copyfile=True, mandatory=True) jobtype = traits.Enum('estwrite', 'estimate', 'write', desc='one of: estimate, write, estwrite', usedefault=True) apply_to_files = InputMultiPath(File(exists=True), field='other', desc='files to apply transformation to', copyfile=True) cost_function = traits.Enum('mi', 'nmi', 'ecc', 'ncc', field='eoptions.cost_fun', desc="""cost function, one of: 'mi' - Mutual Information, 'nmi' - Normalised Mutual Information, 'ecc' - Entropy Correlation Coefficient, 'ncc' - Normalised Cross Correlation""") fwhm = traits.List(traits.Float(), minlen=2, maxlen=2, field='eoptions.fwhm', desc='gaussian smoothing kernel width (mm)') separation = traits.List(traits.Float(), field='eoptions.sep', desc='sampling separation in mm') tolerance = traits.List(traits.Float(), field='eoptions.tol', desc='acceptable tolerance for each of 12 params') write_interp = traits.Range(low=0, high=7, field='roptions.interp', desc='degree of b-spline used for interpolation') write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3, field='roptions.wrap', desc='Check if interpolation should wrap in [x,y,z]') write_mask = traits.Bool(field='roptions.mask', desc='True/False mask output image') out_prefix = traits.String('r', field='roptions.prefix', usedefault=True, desc='coregistered output prefix') class CoregisterOutputSpec(TraitedSpec): coregistered_source = OutputMultiPath(File(exists=True), desc='Coregistered source files') coregistered_files = OutputMultiPath(File(exists=True), desc='Coregistered other files') class Coregister(SPMCommand): """Use spm_coreg for estimating cross-modality rigid body alignment http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=39 Examples -------- >>> import nipype.interfaces.spm as spm >>> coreg = spm.Coregister() >>> coreg.inputs.target = 'functional.nii' >>> coreg.inputs.source = 'structural.nii' >>> coreg.run() # doctest: +SKIP """ input_spec = CoregisterInputSpec output_spec = CoregisterOutputSpec _jobtype = 'spatial' _jobname = 'coreg' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'target' or (opt == 'source' and self.inputs.jobtype != "write"): return scans_for_fnames(filename_to_list(val), keep4d=True) if opt == 'apply_to_files': return np.array(filename_to_list(val), dtype=object) if opt == 'source' and self.inputs.jobtype == "write": if isdefined(self.inputs.apply_to_files): return scans_for_fnames(val+self.inputs.apply_to_files) else: return scans_for_fnames(val) return super(Coregister, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm coregister options if set to None ignore """ if self.inputs.jobtype == "write": einputs = super(Coregister, self)._parse_inputs(skip=('jobtype', 'apply_to_files')) else: einputs = super(Coregister, self)._parse_inputs(skip=('jobtype')) jobtype = self.inputs.jobtype return [{'%s' % (jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() if self.inputs.jobtype == "estimate": if isdefined(self.inputs.apply_to_files): outputs['coregistered_files'] = self.inputs.apply_to_files outputs['coregistered_source'] = self.inputs.source elif self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite": if isdefined(self.inputs.apply_to_files): outputs['coregistered_files'] = [] for imgf in filename_to_list(self.inputs.apply_to_files): outputs['coregistered_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix)) outputs['coregistered_source'] = [] for imgf in filename_to_list(self.inputs.source): outputs['coregistered_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix)) return outputs class NormalizeInputSpec(SPMCommandInputSpec): template = File(exists=True, field='eoptions.template', desc='template file to normalize to', mandatory=True, xor=['parameter_file'], copyfile=False) source = InputMultiPath(File(exists=True), field='subj.source', desc='file to normalize to template', xor=['parameter_file'], mandatory=True, copyfile=True) jobtype = traits.Enum('estwrite', 'est', 'write', desc='one of: est, write, estwrite (opt, estwrite)', usedefault=True) apply_to_files = InputMultiPath(traits.Either(File(exists=True), traits.List(File(exists=True))), field='subj.resample', desc='files to apply transformation to (opt)', copyfile=True) parameter_file = File(field='subj.matname', mandatory=True, xor=['source', 'template'], desc='normalization parameter file*_sn.mat', copyfile=False) source_weight = File(field='subj.wtsrc', desc='name of weighting image for source (opt)', copyfile=False) template_weight = File(field='eoptions.weight', desc='name of weighting image for template (opt)', copyfile=False) source_image_smoothing = traits.Float(field='eoptions.smosrc', desc='source smoothing (opt)') template_image_smoothing = traits.Float(field='eoptions.smoref', desc='template smoothing (opt)') affine_regularization_type = traits.Enum('mni', 'size', 'none', field='eoptions.regype', desc='mni, size, none (opt)') DCT_period_cutoff = traits.Float(field='eoptions.cutoff', desc='Cutoff of for DCT bases (opt)') nonlinear_iterations = traits.Int(field='eoptions.nits', desc='Number of iterations of nonlinear warping (opt)') nonlinear_regularization = traits.Float(field='eoptions.reg', desc='the amount of the regularization for the nonlinear part of the normalization (opt)') write_preserve = traits.Bool(field='roptions.preserve', desc='True/False warped images are modulated (opt,)') write_bounding_box = traits.List(traits.List(traits.Float(), minlen=3, maxlen=3), field='roptions.bb', minlen=2, maxlen=2, desc='3x2-element list of lists (opt)') write_voxel_sizes = traits.List(traits.Float(), field='roptions.vox', minlen=3, maxlen=3, desc='3-element list (opt)') write_interp = traits.Range(low=0, high=7, field='roptions.interp', desc='degree of b-spline used for interpolation') write_wrap = traits.List(traits.Int(), field='roptions.wrap', desc=('Check if interpolation should wrap in [x,y,z] ' '- list of bools (opt)')) out_prefix = traits.String('w', field='roptions.prefix', usedefault=True, desc='normalized output prefix') class NormalizeOutputSpec(TraitedSpec): normalization_parameters = OutputMultiPath(File(exists=True), desc='MAT files containing the normalization parameters') normalized_source = OutputMultiPath(File(exists=True), desc='Normalized source files') normalized_files = OutputMultiPath(File(exists=True), desc='Normalized other files') class Normalize(SPMCommand): """use spm_normalise for warping an image to a template http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=51 Examples -------- >>> import nipype.interfaces.spm as spm >>> norm = spm.Normalize() >>> norm.inputs.source = 'functional.nii' >>> norm.run() # doctest: +SKIP """ input_spec = NormalizeInputSpec output_spec = NormalizeOutputSpec _jobtype = 'spatial' _jobname = 'normalise' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'template': return scans_for_fname(filename_to_list(val)) if opt == 'source': return scans_for_fname(filename_to_list(val)) if opt == 'apply_to_files': return scans_for_fnames(filename_to_list(val)) if opt == 'parameter_file': return np.array([list_to_filename(val)], dtype=object) if opt in ['write_wrap']: if len(val) != 3: raise ValueError('%s must have 3 elements' % opt) return super(Normalize, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(Normalize, self)._parse_inputs(skip=('jobtype', 'apply_to_files')) if isdefined(self.inputs.apply_to_files): inputfiles = deepcopy(self.inputs.apply_to_files) if isdefined(self.inputs.source): inputfiles.extend(self.inputs.source) einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles) jobtype = self.inputs.jobtype if jobtype in ['estwrite', 'write']: if not isdefined(self.inputs.apply_to_files): if isdefined(self.inputs.source): einputs[0]['subj']['resample'] = scans_for_fname(self.inputs.source) return [{'%s' % (jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() jobtype = self.inputs.jobtype if jobtype.startswith('est'): outputs['normalization_parameters'] = [] for imgf in filename_to_list(self.inputs.source): outputs['normalization_parameters'].append(fname_presuffix(imgf, suffix='_sn.mat', use_ext=False)) outputs['normalization_parameters'] = list_to_filename(outputs['normalization_parameters']) if self.inputs.jobtype == "estimate": if isdefined(self.inputs.apply_to_files): outputs['normalized_files'] = self.inputs.apply_to_files outputs['normalized_source'] = self.inputs.source elif 'write' in self.inputs.jobtype: outputs['normalized_files'] = [] if isdefined(self.inputs.apply_to_files): filelist = filename_to_list(self.inputs.apply_to_files) for f in filelist: if isinstance(f, list): run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f] else: run = [fname_presuffix(f, prefix=self.inputs.out_prefix)] outputs['normalized_files'].extend(run) if isdefined(self.inputs.source): outputs['normalized_source'] = [] for imgf in filename_to_list(self.inputs.source): outputs['normalized_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix)) return outputs class SegmentInputSpec(SPMCommandInputSpec): data = InputMultiPath(File(exists=True), field='data', desc='one scan per subject', copyfile=False, mandatory=True) gm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.GM', desc="""Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img. None: [False,False,False], Native Space: [False,False,True], Unmodulated Normalised: [False,True,False], Modulated Normalised: [True,False,False], Native + Unmodulated Normalised: [False,True,True], Native + Modulated Normalised: [True,False,True], Native + Modulated + Unmodulated: [True,True,True], Modulated + Unmodulated Normalised: [True,True,False]""") wm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.WM', desc="""Options to produce white matter images: c2*.img, wc2*.img and mwc2*.img. None: [False,False,False], Native Space: [False,False,True], Unmodulated Normalised: [False,True,False], Modulated Normalised: [True,False,False], Native + Unmodulated Normalised: [False,True,True], Native + Modulated Normalised: [True,False,True], Native + Modulated + Unmodulated: [True,True,True], Modulated + Unmodulated Normalised: [True,True,False]""") csf_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.CSF', desc="""Options to produce CSF images: c3*.img, wc3*.img and mwc3*.img. None: [False,False,False], Native Space: [False,False,True], Unmodulated Normalised: [False,True,False], Modulated Normalised: [True,False,False], Native + Unmodulated Normalised: [False,True,True], Native + Modulated Normalised: [True,False,True], Native + Modulated + Unmodulated: [True,True,True], Modulated + Unmodulated Normalised: [True,True,False]""") save_bias_corrected = traits.Bool(field='output.biascor', desc='True/False produce a bias corrected image') clean_masks = traits.Enum('no', 'light', 'thorough', field='output.cleanup', desc="clean using estimated brain mask ('no','light','thorough')") tissue_prob_maps = traits.List(File(exists=True), field='opts.tpm', desc='list of gray, white & csf prob. (opt,)') gaussians_per_class = traits.List(traits.Int(), field='opts.ngaus', desc='num Gaussians capture intensity distribution') affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', '', field='opts.regtype', desc='Possible options: "mni", "eastern", "subj", "none" (no reguralisation), "" (no affine registration)') warping_regularization = traits.Float(field='opts.warpreg', desc='Controls balance between parameters and data') warp_frequency_cutoff = traits.Float(field='opts.warpco', desc='Cutoff of DCT bases') bias_regularization = traits.Enum(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, field='opts.biasreg', desc='no(0) - extremely heavy (10)') bias_fwhm = traits.Enum(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf', field='opts.biasfwhm', desc='FWHM of Gaussian smoothness of bias') sampling_distance = traits.Float(field='opts.samp', desc='Sampling distance on data for parameter estimation') mask_image = File(exists=True, field='opts.msk', desc='Binary image to restrict parameter estimation ') class SegmentOutputSpec(TraitedSpec): native_gm_image = File(desc='native space grey probability map') normalized_gm_image = File(desc='normalized grey probability map',) modulated_gm_image = File(desc='modulated, normalized grey probability map') native_wm_image = File(desc='native space white probability map') normalized_wm_image = File(desc='normalized white probability map') modulated_wm_image = File(desc='modulated, normalized white probability map') native_csf_image = File(desc='native space csf probability map') normalized_csf_image = File(desc='normalized csf probability map') modulated_csf_image = File(desc='modulated, normalized csf probability map') modulated_input_image = File(deprecated='0.10', new_name='bias_corrected_image', desc='bias-corrected version of input image') bias_corrected_image = File(desc='bias-corrected version of input image') transformation_mat = File(exists=True, desc='Normalization transformation') inverse_transformation_mat = File(exists=True, desc='Inverse normalization info') class Segment(SPMCommand): """use spm_segment to separate structural images into different tissue classes. http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=43 Examples -------- >>> import nipype.interfaces.spm as spm >>> seg = spm.Segment() >>> seg.inputs.data = 'structural.nii' >>> seg.run() # doctest: +SKIP """ _jobtype = 'spatial' _jobname = 'preproc' input_spec = SegmentInputSpec output_spec = SegmentOutputSpec def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ clean_masks_dict = {'no': 0, 'light': 1, 'thorough': 2} if opt in ['data', 'tissue_prob_maps']: if isinstance(val, list): return scans_for_fnames(val) else: return scans_for_fname(val) if 'output_type' in opt: return [int(v) for v in val] if opt == 'mask_image': return scans_for_fname(val) if opt == 'clean_masks': return clean_masks_dict[val] return super(Segment, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() f = self.inputs.data[0] for tidx, tissue in enumerate(['gm', 'wm', 'csf']): outtype = '%s_output_type' % tissue if isdefined(getattr(self.inputs, outtype)): for idx, (image, prefix) in enumerate([('modulated', 'mw'), ('normalized', 'w'), ('native', '')]): if getattr(self.inputs, outtype)[idx]: outfield = '%s_%s_image' % (image, tissue) outputs[outfield] = fname_presuffix(f, prefix='%sc%d' % (prefix, tidx+1)) if isdefined(self.inputs.save_bias_corrected) and \ self.inputs.save_bias_corrected: outputs['bias_corrected_image'] = fname_presuffix(f, prefix='m') t_mat = fname_presuffix(f, suffix='_seg_sn.mat', use_ext=False) outputs['transformation_mat'] = t_mat invt_mat = fname_presuffix(f, suffix='_seg_inv_sn.mat', use_ext=False) outputs['inverse_transformation_mat'] = invt_mat return outputs class NewSegmentInputSpec(SPMCommandInputSpec): channel_files = InputMultiPath(File(exists=True), desc="A list of files to be segmented", field='channel', copyfile=False, mandatory=True) channel_info = traits.Tuple(traits.Float(), traits.Float(), traits.Tuple(traits.Bool, traits.Bool), desc="""A tuple with the following fields: - bias reguralisation (0-10) - FWHM of Gaussian smoothness of bias - which maps to save (Corrected, Field) - a tuple of two boolean values""", field='channel') tissues = traits.List(traits.Tuple(traits.Tuple(File(exists=True), traits.Int()), traits.Int(), traits.Tuple(traits.Bool, traits.Bool), traits.Tuple(traits.Bool, traits.Bool)), desc="""A list of tuples (one per tissue) with the following fields: - tissue probability map (4D), 1-based index to frame - number of gaussians - which maps to save [Native, DARTEL] - a tuple of two boolean values - which maps to save [Modulated, Unmodualted] - a tuple of two boolean values""", field='tissue') affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', field='warp.affreg', desc='mni, eastern, subj, none ') warping_regularization = traits.Float(field='warp.reg', desc='Aproximate distance between sampling points.') sampling_distance = traits.Float(field='warp.samp', desc='Sampling distance on data for parameter estimation') write_deformation_fields = traits.List(traits.Bool(), minlen=2, maxlen=2, field='warp.write', desc="Which deformation fields to write:[Inverse, Forward]") class NewSegmentOutputSpec(TraitedSpec): native_class_images = traits.List(traits.List(File(exists=True)), desc='native space probability maps') dartel_input_images = traits.List(traits.List(File(exists=True)), desc='dartel imported class images') normalized_class_images = traits.List(traits.List(File(exists=True)), desc='normalized class images') modulated_class_images = traits.List(traits.List(File(exists=True)), desc='modulated+normalized class images') transformation_mat = OutputMultiPath(File(exists=True), desc='Normalization transformation') bias_corrected_images = OutputMultiPath(File(exists=True), desc='bias corrected images') bias_field_images = OutputMultiPath(File(exists=True), desc='bias field images') forward_deformation_field = OutputMultiPath(File(exists=True)) inverse_deformation_field = OutputMultiPath(File(exists=True)) class NewSegment(SPMCommand): """Use spm_preproc8 (New Segment) to separate structural images into different tissue classes. Supports multiple modalities. NOTE: This interface currently supports single channel input only http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=185 Examples -------- >>> import nipype.interfaces.spm as spm >>> seg = spm.NewSegment() >>> seg.inputs.channel_files = 'structural.nii' >>> seg.inputs.channel_info = (0.0001, 60, (True, True)) >>> seg.run() # doctest: +SKIP For VBM pre-processing [http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf], TPM.nii should be replaced by /path/to/spm8/toolbox/Seg/TPM.nii >>> seg = NewSegment() >>> seg.inputs.channel_files = 'structural.nii' >>> tissue1 = (('TPM.nii', 1), 2, (True,True), (False, False)) >>> tissue2 = (('TPM.nii', 2), 2, (True,True), (False, False)) >>> tissue3 = (('TPM.nii', 3), 2, (True,False), (False, False)) >>> tissue4 = (('TPM.nii', 4), 2, (False,False), (False, False)) >>> tissue5 = (('TPM.nii', 5), 2, (False,False), (False, False)) >>> seg.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5] >>> seg.run() # doctest: +SKIP """ input_spec = NewSegmentInputSpec output_spec = NewSegmentOutputSpec _jobtype = 'tools' _jobname = 'preproc8' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['channel_files', 'channel_info']: # structure have to be recreated, because of some weird traits error new_channel = {} new_channel['vols'] = scans_for_fnames(self.inputs.channel_files) if isdefined(self.inputs.channel_info): info = self.inputs.channel_info new_channel['biasreg'] = info[0] new_channel['biasfwhm'] = info[1] new_channel['write'] = [int(info[2][0]), int(info[2][1])] return [new_channel] elif opt == 'tissues': new_tissues = [] for tissue in val: new_tissue = {} new_tissue['tpm'] = np.array([','.join([tissue[0][0], str(tissue[0][1])])], dtype=object) new_tissue['ngaus'] = tissue[1] new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])] new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])] new_tissues.append(new_tissue) return new_tissues elif opt == 'write_deformation_fields': return super(NewSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])]) else: return super(NewSegment, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['native_class_images'] = [] outputs['dartel_input_images'] = [] outputs['normalized_class_images'] = [] outputs['modulated_class_images'] = [] outputs['transformation_mat'] = [] outputs['bias_corrected_images'] = [] outputs['bias_field_images'] = [] outputs['inverse_deformation_field'] = [] outputs['forward_deformation_field'] = [] n_classes = 5 if isdefined(self.inputs.tissues): n_classes = len(self.inputs.tissues) for i in range(n_classes): outputs['native_class_images'].append([]) outputs['dartel_input_images'].append([]) outputs['normalized_class_images'].append([]) outputs['modulated_class_images'].append([]) for filename in self.inputs.channel_files: pth, base, ext = split_filename(filename) if isdefined(self.inputs.tissues): for i, tissue in enumerate(self.inputs.tissues): if tissue[2][0]: outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i+1, base))) if tissue[2][1]: outputs['dartel_input_images'][i].append(os.path.join(pth, "rc%d%s.nii" % (i+1, base))) if tissue[3][0]: outputs['normalized_class_images'][i].append(os.path.join(pth, "wc%d%s.nii" % (i+1, base))) if tissue[3][1]: outputs['modulated_class_images'][i].append(os.path.join(pth, "mwc%d%s.nii" % (i+1, base))) else: for i in range(n_classes): outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i+1, base))) outputs['transformation_mat'].append(os.path.join(pth, "%s_seg8.mat" % base)) if isdefined(self.inputs.write_deformation_fields): if self.inputs.write_deformation_fields[0]: outputs['inverse_deformation_field'].append(os.path.join(pth, "iy_%s.nii" % base)) if self.inputs.write_deformation_fields[1]: outputs['forward_deformation_field'].append(os.path.join(pth, "y_%s.nii" % base)) if isdefined(self.inputs.channel_info): if self.inputs.channel_info[2][0]: outputs['bias_corrected_images'].append(os.path.join(pth, "m%s.nii" % (base))) if self.inputs.channel_info[2][1]: outputs['bias_field_images'].append(os.path.join(pth, "BiasField_%s.nii" % (base))) return outputs class SmoothInputSpec(SPMCommandInputSpec): in_files = InputMultiPath(File(exists=True), field='data', desc='list of files to smooth', mandatory=True, copyfile=False) fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3), traits.Float(), field='fwhm', desc='3-list of fwhm for each dimension (opt)') data_type = traits.Int(field='dtype', desc='Data type of the output images (opt)') implicit_masking = traits.Bool(field='im', desc=('A mask implied by a particular ' 'voxel value')) out_prefix = traits.String('s', field='prefix', usedefault=True, desc='smoothed output prefix') class SmoothOutputSpec(TraitedSpec): smoothed_files = OutputMultiPath(File(exists=True), desc='smoothed files') class Smooth(SPMCommand): """Use spm_smooth for 3D Gaussian smoothing of image volumes. http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=57 Examples -------- >>> import nipype.interfaces.spm as spm >>> smooth = spm.Smooth() >>> smooth.inputs.in_files = 'functional.nii' >>> smooth.inputs.fwhm = [4, 4, 4] >>> smooth.run() # doctest: +SKIP """ input_spec = SmoothInputSpec output_spec = SmoothOutputSpec _jobtype = 'spatial' _jobname = 'smooth' def _format_arg(self, opt, spec, val): if opt in ['in_files']: return scans_for_fnames(filename_to_list(val)) if opt == 'fwhm': if not isinstance(val, list): return [val, val, val] if isinstance(val, list): if len(val) == 1: return [val[0], val[0], val[0]] else: return val return super(Smooth, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['smoothed_files'] = [] for imgf in filename_to_list(self.inputs.in_files): outputs['smoothed_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix)) return outputs class DARTELInputSpec(SPMCommandInputSpec): image_files = traits.List(traits.List(File(exists=True)), desc="A list of files to be segmented", field='warp.images', copyfile=False, mandatory=True) template_prefix = traits.Str('Template', usedefault=True, field='warp.settings.template', desc='Prefix for template') regularization_form = traits.Enum('Linear', 'Membrane', 'Bending', field='warp.settings.rform', desc='Form of regularization energy term') iteration_parameters = traits.List(traits.Tuple(traits.Range(1, 10), traits.Tuple(traits.Float, traits.Float, traits.Float), traits.Enum(1, 2, 4, 8, 16, 32, 64, 128, 256, 512), traits.Enum(0, 0.5, 1, 2, 4, 8, 16, 32)), minlen=3, maxlen=12, field='warp.settings.param', desc="""List of tuples for each iteration - Inner iterations - Regularization parameters - Time points for deformation model - smoothing parameter """) optimization_parameters = traits.Tuple(traits.Float, traits.Range(1, 8), traits.Range(1, 8), field='warp.settings.optim', desc="""Optimization settings a tuple - LM regularization - cycles of multigrid solver - relaxation iterations """) class DARTELOutputSpec(TraitedSpec): final_template_file = File(exists=True, desc='final DARTEL template') template_files = traits.List(File(exists=True), desc='Templates from different stages of iteration') dartel_flow_fields = traits.List(File(exists=True), desc='DARTEL flow fields') class DARTEL(SPMCommand): """Use spm DARTEL to create a template and flow fields http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=197 Examples -------- >>> import nipype.interfaces.spm as spm >>> dartel = spm.DARTEL() >>> dartel.inputs.image_files = [['rc1s1.nii','rc1s2.nii'],['rc2s1.nii', 'rc2s2.nii']] >>> dartel.run() # doctest: +SKIP """ input_spec = DARTELInputSpec output_spec = DARTELOutputSpec _jobtype = 'tools' _jobname = 'dartel' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['image_files']: return scans_for_fnames(val, keep4d=True, separate_sessions=True) elif opt == 'regularization_form': mapper = {'Linear': 0, 'Membrane': 1, 'Bending': 2} return mapper[val] elif opt == 'iteration_parameters': params = [] for param in val: new_param = {} new_param['its'] = param[0] new_param['rparam'] = list(param[1]) new_param['K'] = param[2] new_param['slam'] = param[3] params.append(new_param) return params elif opt == 'optimization_parameters': new_param = {} new_param['lmreg'] = val[0] new_param['cyc'] = val[1] new_param['its'] = val[2] return [new_param] else: return super(DARTEL, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['template_files'] = [] for i in range(6): outputs['template_files'].append(os.path.realpath('%s_%d.nii' % (self.inputs.template_prefix, i+1))) outputs['final_template_file'] = os.path.realpath('%s_6.nii' % self.inputs.template_prefix) outputs['dartel_flow_fields'] = [] for filename in self.inputs.image_files[0]: pth, base, ext = split_filename(filename) outputs['dartel_flow_fields'].append(os.path.realpath('u_%s_%s%s' % (base, self.inputs.template_prefix, ext))) return outputs class DARTELNorm2MNIInputSpec(SPMCommandInputSpec): template_file = File(exists=True, desc="DARTEL template", field='mni_norm.template', copyfile=False, mandatory=True) flowfield_files = InputMultiPath(File(exists=True), desc="DARTEL flow fields u_rc1*", field='mni_norm.data.subjs.flowfields', mandatory=True) apply_to_files = InputMultiPath(File(exists=True), desc="Files to apply the transform to", field='mni_norm.data.subjs.images', mandatory=True, copyfile=False) voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float, desc="Voxel sizes for output file", field='mni_norm.vox') bounding_box = traits.Tuple(traits.Float, traits.Float, traits.Float, traits.Float, traits.Float, traits.Float, desc="Voxel sizes for output file", field='mni_norm.bb') modulate = traits.Bool(field='mni_norm.preserve', desc="Modulate out images - no modulation preserves concentrations") fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3), traits.Float(), field='mni_norm.fwhm', desc='3-list of fwhm for each dimension') class DARTELNorm2MNIOutputSpec(TraitedSpec): normalized_files = OutputMultiPath(File(exists=True), desc='Normalized files in MNI space') normalization_parameter_file = File(exists=True, desc='Transform parameters to MNI space') class DARTELNorm2MNI(SPMCommand): """Use spm DARTEL to normalize data to MNI space http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=200 Examples -------- >>> import nipype.interfaces.spm as spm >>> nm = spm.DARTELNorm2MNI() >>> nm.inputs.template_file = 'Template_6.nii' >>> nm.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s3_Template.nii'] >>> nm.inputs.apply_to_files = ['c1s1.nii', 'c1s3.nii'] >>> nm.inputs.modulate = True >>> nm.run() # doctest: +SKIP """ input_spec = DARTELNorm2MNIInputSpec output_spec = DARTELNorm2MNIOutputSpec _jobtype = 'tools' _jobname = 'dartel' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['template_file']: return np.array([val], dtype=object) elif opt in ['flowfield_files']: return scans_for_fnames(val, keep4d=True) elif opt in ['apply_to_files']: return scans_for_fnames(val, keep4d=True, separate_sessions=True) elif opt == 'voxel_size': return list(val) elif opt == 'bounding_box': return list(val) elif opt == 'fwhm': if isinstance(val, list): return val else: return [val, val, val] else: return super(DARTELNorm2MNI, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() pth, base, ext = split_filename(self.inputs.template_file) outputs['normalization_parameter_file'] = os.path.realpath(base+'_2mni.mat') outputs['normalized_files'] = [] prefix = "w" if isdefined(self.inputs.modulate) and self.inputs.modulate: prefix = 'm' + prefix if not isdefined(self.inputs.fwhm) or self.inputs.fwhm > 0: prefix = 's' + prefix for filename in self.inputs.apply_to_files: pth, base, ext = split_filename(filename) outputs['normalized_files'].append(os.path.realpath('%s%s%s' % (prefix, base, ext))) return outputs class CreateWarpedInputSpec(SPMCommandInputSpec): image_files = InputMultiPath(File(exists=True), desc="A list of files to be warped", field='crt_warped.images', copyfile=False, mandatory=True) flowfield_files = InputMultiPath(File(exists=True), desc="DARTEL flow fields u_rc1*", field='crt_warped.flowfields', copyfile=False, mandatory=True) iterations = traits.Range(low=0, high=9, desc=("The number of iterations: log2(number of " "time steps)"), field='crt_warped.K') interp = traits.Range(low=0, high=7, field='crt_warped.interp', desc='degree of b-spline used for interpolation') class CreateWarpedOutputSpec(TraitedSpec): warped_files = traits.List(File(exists=True, desc='final warped files')) class CreateWarped(SPMCommand): """Apply a flow field estimated by DARTEL to create warped images http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=202 Examples -------- >>> import nipype.interfaces.spm as spm >>> create_warped = spm.CreateWarped() >>> create_warped.inputs.image_files = ['rc1s1.nii', 'rc1s2.nii'] >>> create_warped.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s2_Template.nii'] >>> create_warped.run() # doctest: +SKIP """ input_spec = CreateWarpedInputSpec output_spec = CreateWarpedOutputSpec _jobtype = 'tools' _jobname = 'dartel' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['image_files']: return scans_for_fnames(val, keep4d=True, separate_sessions=True) if opt in ['flowfield_files']: return scans_for_fnames(val, keep4d=True) else: return super(CreateWarped, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['warped_files'] = [] for filename in self.inputs.image_files: pth, base, ext = split_filename(filename) outputs['warped_files'].append(os.path.realpath('w%s%s' % (base, ext))) return outputs class ApplyDeformationFieldInputSpec(SPMCommandInputSpec): in_files = InputMultiPath(File(exists=True), mandatory=True, field='fnames') deformation_field = File(exists=True, mandatory=True, field='comp{1}.def') reference_volume = File(exists=True, mandatory=True, field='comp{2}.id.space') interp = traits.Range(low=0, high=7, field='interp', desc='degree of b-spline used for interpolation') class ApplyDeformationFieldOutputSpec(TraitedSpec): out_files = OutputMultiPath(File(exists=True)) class ApplyDeformations(SPMCommand): input_spec = ApplyDeformationFieldInputSpec output_spec = ApplyDeformationFieldOutputSpec _jobtype = 'util' _jobname = 'defs' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['deformation_field', 'reference_volume']: val = [val] if opt in ['deformation_field']: return scans_for_fnames(val, keep4d=True, separate_sessions=False) if opt in ['in_files', 'reference_volume']: return scans_for_fnames(val, keep4d=False, separate_sessions=False) else: return super(ApplyDeformations, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['out_files'] = [] for filename in self.inputs.in_files: _, fname = os.path.split(filename) outputs['out_files'].append(os.path.realpath('w%s' % fname)) return outputs class VBMSegmentInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( File(exists=True), desc="A list of files to be segmented", field='estwrite.data', copyfile=False, mandatory=True) tissues = File( exists=True, field='estwrite.tpm', desc='tissue probability map') gaussians_per_class = traits.Tuple( (2, 2, 2, 3, 4, 2), *([traits.Int()]*6), usedefault=True, desc='number of gaussians for each tissue class') bias_regularization = traits.Enum( 0.0001, (0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10), field='estwrite.opts.biasreg', usedefault=True, desc='no(0) - extremely heavy (10)') bias_fwhm = traits.Enum( 60, (30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf'), field='estwrite.opts.biasfwhm', usedefault=True, desc='FWHM of Gaussian smoothness of bias') sampling_distance = traits.Float( 3, usedefault=True, field='estwrite.opts.samp', desc='Sampling distance on data for parameter estimation') warping_regularization = traits.Float( 4, usedefault=True, field='estwrite.opts.warpreg', desc='Controls balance between parameters and data') spatial_normalization = traits.Enum( 'high', 'low', usedefault=True,) dartel_template = File( exists=True, field='estwrite.extopts.dartelwarp.normhigh.darteltpm') use_sanlm_denoising_filter = traits.Range( 0, 2, 2, usedefault=True, field='estwrite.extopts.sanlm', desc="0=No denoising, 1=denoising,2=denoising multi-threaded") mrf_weighting = traits.Float( 0.15, usedefault=True, field='estwrite.extopts.mrf') cleanup_partitions = traits.Int( 1, usedefault=True, field='estwrite.extopts.cleanup', desc="0=None,1=light,2=thorough") display_results = traits.Bool( True, usedefault=True, field='estwrite.extopts.print') gm_native = traits.Bool( False, usedefault=True, field='estwrite.output.GM.native',) gm_normalized = traits.Bool( False, usedefault=True, field='estwrite.output.GM.warped',) gm_modulated_normalized = traits.Range( 0, 2, 2, usedefault=True, field='estwrite.output.GM.modulated', desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') gm_dartel = traits.Range( 0, 2, 0, usedefault=True, field='estwrite.output.GM.dartel', desc="0=None,1=rigid(SPM8 default),2=affine") wm_native = traits.Bool( False, usedefault=True, field='estwrite.output.WM.native',) wm_normalized = traits.Bool( False, usedefault=True, field='estwrite.output.WM.warped',) wm_modulated_normalized = traits.Range( 0, 2, 2, usedefault=True, field='estwrite.output.WM.modulated', desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') wm_dartel = traits.Range( 0, 2, 0, usedefault=True, field='estwrite.output.WM.dartel', desc="0=None,1=rigid(SPM8 default),2=affine") csf_native = traits.Bool( False, usedefault=True, field='estwrite.output.CSF.native',) csf_normalized = traits.Bool( False, usedefault=True, field='estwrite.output.CSF.warped',) csf_modulated_normalized = traits.Range( 0, 2, 2, usedefault=True, field='estwrite.output.CSF.modulated', desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') csf_dartel = traits.Range( 0, 2, 0, usedefault=True, field='estwrite.output.CSF.dartel', desc="0=None,1=rigid(SPM8 default),2=affine") bias_corrected_native = traits.Bool( False, usedefault=True, field='estwrite.output.bias.native',) bias_corrected_normalized = traits.Bool( True, usedefault=True, field='estwrite.output.bias.warped',) bias_corrected_affine = traits.Bool( False, usedefault=True, field='estwrite.output.bias.affine',) pve_label_native = traits.Bool( False, usedefault=True, field='estwrite.output.label.native') pve_label_normalized = traits.Bool( False, usedefault=True, field='estwrite.output.label.warped') pve_label_dartel = traits.Range( 0, 2, 0, usedefault=True, field='estwrite.output.label.dartel', desc="0=None,1=rigid(SPM8 default),2=affine") jacobian_determinant = traits.Bool( False, usedefault=True, field='estwrite.jacobian.warped') deformation_field = traits.Tuple( (0, 0), traits.Bool, traits.Bool, usedefault=True, field='estwrite.output.warps', desc='forward and inverse field') class VBMSegmentOuputSpec(TraitedSpec): native_class_images = traits.List(traits.List(File(exists=True)), desc='native space probability maps') dartel_input_images = traits.List(traits.List(File(exists=True)), desc='dartel imported class images') normalized_class_images = traits.List(traits.List(File(exists=True)), desc='normalized class images') modulated_class_images = traits.List(traits.List(File(exists=True)), desc='modulated+normalized class images') transformation_mat = OutputMultiPath(File(exists=True), desc='Normalization transformation') bias_corrected_images = OutputMultiPath( File(exists=True), desc='bias corrected images') normalized_bias_corrected_images = OutputMultiPath( File(exists=True), desc='bias corrected images') pve_label_native_images = OutputMultiPath(File(exists=True)) pve_label_normalized_images = OutputMultiPath(File(exists=True)) pve_label_registered_images = OutputMultiPath(File(exists=True)) forward_deformation_field = OutputMultiPath(File(exists=True)) inverse_deformation_field = OutputMultiPath(File(exists=True)) jacobian_determinant_images = OutputMultiPath(File(exists=True)) class VBMSegment(SPMCommand): """Use VBM8 toolbox to separate structural images into different tissue classes. Example ------- >>> import nipype.interfaces.spm as spm >>> seg = spm.VBMSegment() >>> seg.inputs.tissues = 'TPM.nii' >>> seg.inputs.dartel_template = 'Template_1_IXI550_MNI152.nii' >>> seg.inputs.bias_corrected_native = True >>> seg.inputs.gm_native = True >>> seg.inputs.wm_native = True >>> seg.inputs.csf_native = True >>> seg.inputs.pve_label_native = True >>> seg.inputs.deformation_field = (True, False) >>> seg.run() # doctest: +SKIP """ input_spec = VBMSegmentInputSpec output_spec = VBMSegmentOuputSpec _jobtype = 'tools' _jobname = 'vbm8' def _list_outputs(self): outputs = self._outputs().get() do_dartel = self.inputs.spatial_normalization dartel_px = '' if do_dartel: dartel_px = 'r' outputs['native_class_images'] = [[], [], []] outputs['dartel_input_images'] = [[], [], []] outputs['normalized_class_images'] = [[], [], []] outputs['modulated_class_images'] = [[], [], []] outputs['transformation_mat'] = [] outputs['bias_corrected_images'] = [] outputs['normalized_bias_corrected_images'] = [] outputs['inverse_deformation_field'] = [] outputs['forward_deformation_field'] = [] outputs['jacobian_determinant_images'] = [] outputs['pve_label_native_images'] = [] outputs['pve_label_normalized_images'] = [] outputs['pve_label_registered_images'] = [] for filename in self.inputs.in_files: pth, base, ext = split_filename(filename) outputs['transformation_mat'].append( os.path.join(pth, "%s_seg8.mat" % base)) for i, tis in enumerate(['gm', 'wm', 'csf']): # native space if getattr(self.inputs, '%s_native' % tis): outputs['native_class_images'][i].append( os.path.join(pth, "p%d%s.nii" % (i+1, base))) if getattr(self.inputs, '%s_dartel' % tis) == 1: outputs['dartel_input_images'][i].append( os.path.join(pth, "rp%d%s.nii" % (i+1, base))) elif getattr(self.inputs, '%s_dartel' % tis) == 2: outputs['dartel_input_images'][i].append( os.path.join(pth, "rp%d%s_affine.nii" % (i+1, base))) # normalized space if getattr(self.inputs, '%s_normalized' % tis): outputs['normalized_class_images'][i].append( os.path.join(pth, "w%sp%d%s.nii" % (dartel_px, i+1, base))) if getattr(self.inputs, '%s_modulated_normalized' % tis) == 1: outputs['modulated_class_images'][i].append(os.path.join( pth, "mw%sp%d%s.nii" % (dartel_px, i+1, base))) elif getattr(self.inputs, '%s_modulated_normalized' % tis) == 2: outputs['normalized_class_images'][i].append(os.path.join( pth, "m0w%sp%d%s.nii" % (dartel_px, i+1, base))) if self.inputs.pve_label_native: outputs['pve_label_native_images'].append( os.path.join(pth, "p0%s.nii" % (base))) if self.inputs.pve_label_normalized: outputs['pve_label_normalized_images'].append( os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base))) if self.inputs.pve_label_dartel == 1: outputs['pve_label_registered_images'].append( os.path.join(pth, "rp0%s.nii" % (base))) elif self.inputs.pve_label_dartel == 2: outputs['pve_label_registered_images'].append( os.path.join(pth, "rp0%s_affine.nii" % (base))) if self.inputs.bias_corrected_native: outputs['bias_corrected_images'].append( os.path.join(pth, "m%s.nii" % (base))) if self.inputs.bias_corrected_normalized: outputs['normalized_bias_corrected_images'].append( os.path.join(pth, "wm%s%s.nii" % (dartel_px, base))) if self.inputs.deformation_field[0]: outputs['forward_deformation_field'].append( os.path.join(pth, "y_%s%s.nii" % (dartel_px, base))) if self.inputs.deformation_field[1]: outputs['inverse_deformation_field'].append( os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base))) if self.inputs.jacobian_determinant and do_dartel: outputs['jacobian_determinant_images'].append( os.path.join(pth, "jac_wrp1%s.nii" % (base))) return outputs def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['in_files']: return scans_for_fnames(val, keep4d=True) elif opt in ['spatial_normalization']: if val == 'low': return {'normlow': []} elif opt in ['dartel_template']: return np.array([val], dtype=object) elif opt in ['deformation_field']: return super(VBMSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])]) else: return super(VBMSegment, self)._format_arg(opt, spec, val) def _parse_inputs(self): if self.inputs.spatial_normalization == 'low': einputs = super(VBMSegment, self)._parse_inputs( skip=('spatial_normalization', 'dartel_template')) einputs[0]['estwrite']['extopts']['dartelwarp'] = {'normlow': 1} return einputs else: return super(VBMSegment, self)._parse_inputs(skip=('spatial_normalization')) nipype-0.9.2/nipype/interfaces/spm/setup.py000066400000000000000000000007101227300005300207540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('spm', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/interfaces/spm/tests/000077500000000000000000000000001227300005300204065ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py000066400000000000000000000022711227300005300252360ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import Analyze2nii def test_Analyze2nii_inputs(): input_map = dict(analyze_file=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = Analyze2nii.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Analyze2nii_outputs(): output_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), nifti_file=dict(), paths=dict(), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) outputs = Analyze2nii.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py000066400000000000000000000023061227300005300265100ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import ApplyDeformations def test_ApplyDeformations_inputs(): input_map = dict(deformation_field=dict(field='comp{1}.def', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(field='fnames', mandatory=True, ), interp=dict(field='interp', ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), reference_volume=dict(field='comp{2}.id.space', mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = ApplyDeformations.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyDeformations_outputs(): output_map = dict(out_files=dict(), ) outputs = ApplyDeformations.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py000066400000000000000000000027111227300005300276610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import ApplyInverseDeformation def test_ApplyInverseDeformation_inputs(): input_map = dict(bounding_box=dict(field='comp{1}.inv.comp{1}.sn2def.bb', ), deformation=dict(field='comp{1}.inv.comp{1}.sn2def.matname', xor=['deformation_field'], ), deformation_field=dict(field='comp{1}.inv.comp{1}.def', xor=['deformation'], ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(field='fnames', mandatory=True, ), interpolation=dict(field='interp', ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), target=dict(field='comp{1}.inv.space', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), voxel_sizes=dict(field='comp{1}.inv.comp{1}.sn2def.vox', ), ) inputs = ApplyInverseDeformation.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyInverseDeformation_outputs(): output_map = dict(out_files=dict(), ) outputs = ApplyInverseDeformation.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py000066400000000000000000000020221227300005300260240ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import ApplyTransform def test_ApplyTransform_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(copyfile=True, mandatory=True, ), mat=dict(mandatory=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = ApplyTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyTransform_outputs(): output_map = dict(out_file=dict(), ) outputs = ApplyTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py000066400000000000000000000021131227300005300257770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import CalcCoregAffine def test_CalcCoregAffine_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), invmat=dict(), mat=dict(), matlab_cmd=dict(), mfile=dict(usedefault=True, ), moving=dict(copyfile=False, mandatory=True, ), paths=dict(), target=dict(mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = CalcCoregAffine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CalcCoregAffine_outputs(): output_map = dict(invmat=dict(), mat=dict(), ) outputs = CalcCoregAffine.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Coregister.py000066400000000000000000000031731227300005300251610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import Coregister def test_Coregister_inputs(): input_map = dict(apply_to_files=dict(copyfile=True, field='other', ), cost_function=dict(field='eoptions.cost_fun', ), fwhm=dict(field='eoptions.fwhm', ), ignore_exception=dict(nohash=True, usedefault=True, ), jobtype=dict(usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), out_prefix=dict(field='roptions.prefix', usedefault=True, ), paths=dict(), separation=dict(field='eoptions.sep', ), source=dict(copyfile=True, field='source', mandatory=True, ), target=dict(copyfile=False, field='ref', mandatory=True, ), tolerance=dict(field='eoptions.tol', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), write_interp=dict(field='roptions.interp', ), write_mask=dict(field='roptions.mask', ), write_wrap=dict(field='roptions.wrap', ), ) inputs = Coregister.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Coregister_outputs(): output_map = dict(coregistered_files=dict(), coregistered_source=dict(), ) outputs = Coregister.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_CreateWarped.py000066400000000000000000000023331227300005300254160ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import CreateWarped def test_CreateWarped_inputs(): input_map = dict(flowfield_files=dict(copyfile=False, field='crt_warped.flowfields', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), image_files=dict(copyfile=False, field='crt_warped.images', mandatory=True, ), interp=dict(field='crt_warped.interp', ), iterations=dict(field='crt_warped.K', ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = CreateWarped.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CreateWarped_outputs(): output_map = dict(warped_files=dict(), ) outputs = CreateWarped.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_DARTEL.py000066400000000000000000000025131227300005300240230ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import DARTEL def test_DARTEL_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), image_files=dict(copyfile=False, field='warp.images', mandatory=True, ), iteration_parameters=dict(field='warp.settings.param', ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), optimization_parameters=dict(field='warp.settings.optim', ), paths=dict(), regularization_form=dict(field='warp.settings.rform', ), template_prefix=dict(field='warp.settings.template', usedefault=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = DARTEL.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DARTEL_outputs(): output_map = dict(dartel_flow_fields=dict(), final_template_file=dict(), template_files=dict(), ) outputs = DARTEL.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py000066400000000000000000000027241227300005300253110ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import DARTELNorm2MNI def test_DARTELNorm2MNI_inputs(): input_map = dict(apply_to_files=dict(copyfile=False, field='mni_norm.data.subjs.images', mandatory=True, ), bounding_box=dict(field='mni_norm.bb', ), flowfield_files=dict(field='mni_norm.data.subjs.flowfields', mandatory=True, ), fwhm=dict(field='mni_norm.fwhm', ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), modulate=dict(field='mni_norm.preserve', ), paths=dict(), template_file=dict(copyfile=False, field='mni_norm.template', mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), voxel_size=dict(field='mni_norm.vox', ), ) inputs = DARTELNorm2MNI.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DARTELNorm2MNI_outputs(): output_map = dict(normalization_parameter_file=dict(), normalized_files=dict(), ) outputs = DARTELNorm2MNI.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_DicomImport.py000066400000000000000000000023611227300005300252770ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import DicomImport def test_DicomImport_inputs(): input_map = dict(format=dict(field='convopts.format', usedefault=True, ), icedims=dict(field='convopts.icedims', usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(field='data', mandatory=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), output_dir=dict(field='outdir', usedefault=True, ), output_dir_struct=dict(field='root', usedefault=True, ), paths=dict(), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = DicomImport.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DicomImport_outputs(): output_map = dict(out_files=dict(), ) outputs = DicomImport.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py000066400000000000000000000026061227300005300263440ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import EstimateContrast def test_EstimateContrast_inputs(): input_map = dict(beta_images=dict(copyfile=False, mandatory=True, ), contrasts=dict(mandatory=True, ), group_contrast=dict(xor=['use_derivs'], ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), residual_image=dict(copyfile=False, mandatory=True, ), spm_mat_file=dict(copyfile=True, field='spmmat', mandatory=True, ), use_derivs=dict(xor=['group_contrast'], ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = EstimateContrast.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EstimateContrast_outputs(): output_map = dict(con_images=dict(), ess_images=dict(), spmF_images=dict(), spmT_images=dict(), spm_mat_file=dict(), ) outputs = EstimateContrast.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_EstimateModel.py000066400000000000000000000022751227300005300256110ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import EstimateModel def test_EstimateModel_inputs(): input_map = dict(estimation_method=dict(field='method', mandatory=True, ), flags=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), spm_mat_file=dict(copyfile=True, field='spmmat', mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = EstimateModel.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_EstimateModel_outputs(): output_map = dict(RPVimage=dict(), beta_images=dict(), mask_image=dict(), residual_image=dict(), spm_mat_file=dict(), ) outputs = EstimateModel.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py000066400000000000000000000037361227300005300261160ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import FactorialDesign def test_FactorialDesign_inputs(): input_map = dict(covariates=dict(field='cov', ), explicit_mask_file=dict(field='masking.em', ), global_calc_mean=dict(field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], ), global_calc_omit=dict(field='globalc.g_omit', xor=['global_calc_mean', 'global_calc_values'], ), global_calc_values=dict(field='globalc.g_user.global_uval', xor=['global_calc_mean', 'global_calc_omit'], ), global_normalization=dict(field='globalm.glonorm', ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), paths=dict(), spm_mat_dir=dict(field='dir', ), threshold_mask_absolute=dict(field='masking.tm.tma.athresh', xor=['threshold_mask_none', 'threshold_mask_relative'], ), threshold_mask_none=dict(field='masking.tm.tm_none', xor=['threshold_mask_absolute', 'threshold_mask_relative'], ), threshold_mask_relative=dict(field='masking.tm.tmr.rthresh', xor=['threshold_mask_absolute', 'threshold_mask_none'], ), use_implicit_threshold=dict(field='masking.im', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = FactorialDesign.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FactorialDesign_outputs(): output_map = dict(spm_mat_file=dict(), ) outputs = FactorialDesign.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Level1Design.py000066400000000000000000000032031227300005300253270ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import Level1Design def test_Level1Design_inputs(): input_map = dict(bases=dict(field='bases', mandatory=True, ), factor_info=dict(field='fact', ), global_intensity_normalization=dict(field='global', ), ignore_exception=dict(nohash=True, usedefault=True, ), interscan_interval=dict(field='timing.RT', mandatory=True, ), mask_image=dict(field='mask', ), mask_threshold=dict(usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), microtime_onset=dict(field='timing.fmri_t0', ), microtime_resolution=dict(field='timing.fmri_t', ), model_serial_correlations=dict(field='cvi', ), paths=dict(), session_info=dict(field='sess', mandatory=True, ), spm_mat_dir=dict(field='dir', ), timing_units=dict(field='timing.units', mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), volterra_expansion_order=dict(field='volt', ), ) inputs = Level1Design.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Level1Design_outputs(): output_map = dict(spm_mat_file=dict(), ) outputs = Level1Design.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py000066400000000000000000000043271227300005300300430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import MultipleRegressionDesign def test_MultipleRegressionDesign_inputs(): input_map = dict(covariates=dict(field='cov', ), explicit_mask_file=dict(field='masking.em', ), global_calc_mean=dict(field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], ), global_calc_omit=dict(field='globalc.g_omit', xor=['global_calc_mean', 'global_calc_values'], ), global_calc_values=dict(field='globalc.g_user.global_uval', xor=['global_calc_mean', 'global_calc_omit'], ), global_normalization=dict(field='globalm.glonorm', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(field='des.mreg.scans', mandatory=True, ), include_intercept=dict(field='des.mreg.incint', usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), paths=dict(), spm_mat_dir=dict(field='dir', ), threshold_mask_absolute=dict(field='masking.tm.tma.athresh', xor=['threshold_mask_none', 'threshold_mask_relative'], ), threshold_mask_none=dict(field='masking.tm.tm_none', xor=['threshold_mask_absolute', 'threshold_mask_relative'], ), threshold_mask_relative=dict(field='masking.tm.tmr.rthresh', xor=['threshold_mask_absolute', 'threshold_mask_none'], ), use_implicit_threshold=dict(field='masking.im', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), user_covariates=dict(field='des.mreg.mcov', ), ) inputs = MultipleRegressionDesign.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MultipleRegressionDesign_outputs(): output_map = dict(spm_mat_file=dict(), ) outputs = MultipleRegressionDesign.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_NewSegment.py000066400000000000000000000031241227300005300251230ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import NewSegment def test_NewSegment_inputs(): input_map = dict(affine_regularization=dict(field='warp.affreg', ), channel_files=dict(copyfile=False, field='channel', mandatory=True, ), channel_info=dict(field='channel', ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), sampling_distance=dict(field='warp.samp', ), tissues=dict(field='tissue', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), warping_regularization=dict(field='warp.reg', ), write_deformation_fields=dict(field='warp.write', ), ) inputs = NewSegment.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_NewSegment_outputs(): output_map = dict(bias_corrected_images=dict(), bias_field_images=dict(), dartel_input_images=dict(), forward_deformation_field=dict(), inverse_deformation_field=dict(), modulated_class_images=dict(), native_class_images=dict(), normalized_class_images=dict(), transformation_mat=dict(), ) outputs = NewSegment.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Normalize.py000066400000000000000000000044521227300005300250140ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import Normalize def test_Normalize_inputs(): input_map = dict(DCT_period_cutoff=dict(field='eoptions.cutoff', ), affine_regularization_type=dict(field='eoptions.regype', ), apply_to_files=dict(copyfile=True, field='subj.resample', ), ignore_exception=dict(nohash=True, usedefault=True, ), jobtype=dict(usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), nonlinear_iterations=dict(field='eoptions.nits', ), nonlinear_regularization=dict(field='eoptions.reg', ), out_prefix=dict(field='roptions.prefix', usedefault=True, ), parameter_file=dict(copyfile=False, field='subj.matname', mandatory=True, xor=['source', 'template'], ), paths=dict(), source=dict(copyfile=True, field='subj.source', mandatory=True, xor=['parameter_file'], ), source_image_smoothing=dict(field='eoptions.smosrc', ), source_weight=dict(copyfile=False, field='subj.wtsrc', ), template=dict(copyfile=False, field='eoptions.template', mandatory=True, xor=['parameter_file'], ), template_image_smoothing=dict(field='eoptions.smoref', ), template_weight=dict(copyfile=False, field='eoptions.weight', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), write_bounding_box=dict(field='roptions.bb', ), write_interp=dict(field='roptions.interp', ), write_preserve=dict(field='roptions.preserve', ), write_voxel_sizes=dict(field='roptions.vox', ), write_wrap=dict(field='roptions.wrap', ), ) inputs = Normalize.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Normalize_outputs(): output_map = dict(normalization_parameters=dict(), normalized_files=dict(), normalized_source=dict(), ) outputs = Normalize.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py000066400000000000000000000040721227300005300270530ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import OneSampleTTestDesign def test_OneSampleTTestDesign_inputs(): input_map = dict(covariates=dict(field='cov', ), explicit_mask_file=dict(field='masking.em', ), global_calc_mean=dict(field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], ), global_calc_omit=dict(field='globalc.g_omit', xor=['global_calc_mean', 'global_calc_values'], ), global_calc_values=dict(field='globalc.g_user.global_uval', xor=['global_calc_mean', 'global_calc_omit'], ), global_normalization=dict(field='globalm.glonorm', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(field='des.t1.scans', mandatory=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), paths=dict(), spm_mat_dir=dict(field='dir', ), threshold_mask_absolute=dict(field='masking.tm.tma.athresh', xor=['threshold_mask_none', 'threshold_mask_relative'], ), threshold_mask_none=dict(field='masking.tm.tm_none', xor=['threshold_mask_absolute', 'threshold_mask_relative'], ), threshold_mask_relative=dict(field='masking.tm.tmr.rthresh', xor=['threshold_mask_absolute', 'threshold_mask_none'], ), use_implicit_threshold=dict(field='masking.im', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = OneSampleTTestDesign.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_OneSampleTTestDesign_outputs(): output_map = dict(spm_mat_file=dict(), ) outputs = OneSampleTTestDesign.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py000066400000000000000000000042251227300005300263740ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import PairedTTestDesign def test_PairedTTestDesign_inputs(): input_map = dict(ancova=dict(field='des.pt.ancova', ), covariates=dict(field='cov', ), explicit_mask_file=dict(field='masking.em', ), global_calc_mean=dict(field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], ), global_calc_omit=dict(field='globalc.g_omit', xor=['global_calc_mean', 'global_calc_values'], ), global_calc_values=dict(field='globalc.g_user.global_uval', xor=['global_calc_mean', 'global_calc_omit'], ), global_normalization=dict(field='globalm.glonorm', ), grand_mean_scaling=dict(field='des.pt.gmsca', ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), paired_files=dict(field='des.pt.pair', mandatory=True, ), paths=dict(), spm_mat_dir=dict(field='dir', ), threshold_mask_absolute=dict(field='masking.tm.tma.athresh', xor=['threshold_mask_none', 'threshold_mask_relative'], ), threshold_mask_none=dict(field='masking.tm.tm_none', xor=['threshold_mask_absolute', 'threshold_mask_relative'], ), threshold_mask_relative=dict(field='masking.tm.tmr.rthresh', xor=['threshold_mask_absolute', 'threshold_mask_none'], ), use_implicit_threshold=dict(field='masking.im', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = PairedTTestDesign.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_PairedTTestDesign_outputs(): output_map = dict(spm_mat_file=dict(), ) outputs = PairedTTestDesign.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Realign.py000066400000000000000000000034621227300005300244350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import Realign def test_Realign_inputs(): input_map = dict(fwhm=dict(field='eoptions.fwhm', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(copyfile=True, field='data', mandatory=True, ), interp=dict(field='eoptions.interp', ), jobtype=dict(usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), out_prefix=dict(field='roptions.prefix', usedefault=True, ), paths=dict(), quality=dict(field='eoptions.quality', ), register_to_mean=dict(field='eoptions.rtm', mandatory=True, usedefault=True, ), separation=dict(field='eoptions.sep', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), weight_img=dict(field='eoptions.weight', ), wrap=dict(field='eoptions.wrap', ), write_interp=dict(field='roptions.interp', ), write_mask=dict(field='roptions.mask', ), write_which=dict(field='roptions.which', maxlen=2, minlen=2, usedefault=True, ), write_wrap=dict(field='roptions.wrap', ), ) inputs = Realign.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Realign_outputs(): output_map = dict(mean_image=dict(), modified_in_files=dict(), realigned_files=dict(), realignment_parameters=dict(), ) outputs = Realign.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Reslice.py000066400000000000000000000020441227300005300244350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import Reslice def test_Reslice_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(mandatory=True, ), interp=dict(usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), out_file=dict(), paths=dict(), space_defining=dict(mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = Reslice.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Reslice_outputs(): output_map = dict(out_file=dict(), ) outputs = Reslice.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py000066400000000000000000000023261227300005300265620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import ResliceToReference def test_ResliceToReference_inputs(): input_map = dict(bounding_box=dict(field='comp{2}.idbbvox.bb', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(field='fnames', mandatory=True, ), interpolation=dict(field='interp', ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), target=dict(field='comp{1}.id.space', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), voxel_sizes=dict(field='comp{2}.idbbvox.vox', ), ) inputs = ResliceToReference.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ResliceToReference_outputs(): output_map = dict(out_files=dict(), ) outputs = ResliceToReference.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_SPMCommand.py000066400000000000000000000011751227300005300250110ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.base import SPMCommand def test_SPMCommand_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = SPMCommand.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Segment.py000066400000000000000000000042301227300005300244500ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import Segment def test_Segment_inputs(): input_map = dict(affine_regularization=dict(field='opts.regtype', ), bias_fwhm=dict(field='opts.biasfwhm', ), bias_regularization=dict(field='opts.biasreg', ), clean_masks=dict(field='output.cleanup', ), csf_output_type=dict(field='output.CSF', ), data=dict(copyfile=False, field='data', mandatory=True, ), gaussians_per_class=dict(field='opts.ngaus', ), gm_output_type=dict(field='output.GM', ), ignore_exception=dict(nohash=True, usedefault=True, ), mask_image=dict(field='opts.msk', ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), sampling_distance=dict(field='opts.samp', ), save_bias_corrected=dict(field='output.biascor', ), tissue_prob_maps=dict(field='opts.tpm', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), warp_frequency_cutoff=dict(field='opts.warpco', ), warping_regularization=dict(field='opts.warpreg', ), wm_output_type=dict(field='output.WM', ), ) inputs = Segment.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Segment_outputs(): output_map = dict(bias_corrected_image=dict(), inverse_transformation_mat=dict(), modulated_csf_image=dict(), modulated_gm_image=dict(), modulated_input_image=dict(deprecated='0.10', new_name='bias_corrected_image', ), modulated_wm_image=dict(), native_csf_image=dict(), native_gm_image=dict(), native_wm_image=dict(), normalized_csf_image=dict(), normalized_gm_image=dict(), normalized_wm_image=dict(), transformation_mat=dict(), ) outputs = Segment.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_SliceTiming.py000066400000000000000000000026011227300005300252550ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import SliceTiming def test_SliceTiming_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(copyfile=False, field='scans', mandatory=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), num_slices=dict(field='nslices', mandatory=True, ), out_prefix=dict(field='prefix', usedefault=True, ), paths=dict(), ref_slice=dict(field='refslice', mandatory=True, ), slice_order=dict(field='so', mandatory=True, ), time_acquisition=dict(field='ta', mandatory=True, ), time_repetition=dict(field='tr', mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = SliceTiming.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SliceTiming_outputs(): output_map = dict(timecorrected_files=dict(), ) outputs = SliceTiming.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Smooth.py000066400000000000000000000022361227300005300243230ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import Smooth def test_Smooth_inputs(): input_map = dict(data_type=dict(field='dtype', ), fwhm=dict(field='fwhm', ), ignore_exception=dict(nohash=True, usedefault=True, ), implicit_masking=dict(field='im', ), in_files=dict(copyfile=False, field='data', mandatory=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), out_prefix=dict(field='prefix', usedefault=True, ), paths=dict(), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = Smooth.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Smooth_outputs(): output_map = dict(smoothed_files=dict(), ) outputs = Smooth.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_Threshold.py000066400000000000000000000031101227300005300247760ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import Threshold def test_Threshold_inputs(): input_map = dict(contrast_index=dict(mandatory=True, ), extent_fdr_p_threshold=dict(usedefault=True, ), extent_threshold=dict(usedefault=True, ), force_activation=dict(usedefault=True, ), height_threshold=dict(usedefault=True, ), height_threshold_type=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), spm_mat_file=dict(copyfile=True, mandatory=True, ), stat_image=dict(copyfile=False, mandatory=True, ), use_fwe_correction=dict(usedefault=True, ), use_mcr=dict(), use_topo_fdr=dict(usedefault=True, ), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = Threshold.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Threshold_outputs(): output_map = dict(activation_forced=dict(), cluster_forming_thr=dict(), n_clusters=dict(), pre_topo_fdr_map=dict(), pre_topo_n_clusters=dict(), thresholded_map=dict(), ) outputs = Threshold.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py000066400000000000000000000025651227300005300270660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import ThresholdStatistics def test_ThresholdStatistics_inputs(): input_map = dict(contrast_index=dict(mandatory=True, ), extent_threshold=dict(usedefault=True, ), height_threshold=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), spm_mat_file=dict(copyfile=True, mandatory=True, ), stat_image=dict(copyfile=False, mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = ThresholdStatistics.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ThresholdStatistics_outputs(): output_map = dict(clusterwise_P_FDR=dict(), clusterwise_P_RF=dict(), voxelwise_P_Bonf=dict(), voxelwise_P_FDR=dict(), voxelwise_P_RF=dict(), voxelwise_P_uncor=dict(), ) outputs = ThresholdStatistics.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py000066400000000000000000000043601227300005300271030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.model import TwoSampleTTestDesign def test_TwoSampleTTestDesign_inputs(): input_map = dict(covariates=dict(field='cov', ), dependent=dict(field='des.t2.dept', ), explicit_mask_file=dict(field='masking.em', ), global_calc_mean=dict(field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], ), global_calc_omit=dict(field='globalc.g_omit', xor=['global_calc_mean', 'global_calc_values'], ), global_calc_values=dict(field='globalc.g_user.global_uval', xor=['global_calc_mean', 'global_calc_omit'], ), global_normalization=dict(field='globalm.glonorm', ), group1_files=dict(field='des.t2.scans1', mandatory=True, ), group2_files=dict(field='des.t2.scans2', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), paths=dict(), spm_mat_dir=dict(field='dir', ), threshold_mask_absolute=dict(field='masking.tm.tma.athresh', xor=['threshold_mask_none', 'threshold_mask_relative'], ), threshold_mask_none=dict(field='masking.tm.tm_none', xor=['threshold_mask_absolute', 'threshold_mask_relative'], ), threshold_mask_relative=dict(field='masking.tm.tmr.rthresh', xor=['threshold_mask_absolute', 'threshold_mask_none'], ), unequal_variance=dict(field='des.t2.variance', ), use_implicit_threshold=dict(field='masking.im', ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = TwoSampleTTestDesign.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TwoSampleTTestDesign_outputs(): output_map = dict(spm_mat_file=dict(), ) outputs = TwoSampleTTestDesign.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_auto_VBMSegment.py000066400000000000000000000101701227300005300250150ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.preprocess import VBMSegment def test_VBMSegment_inputs(): input_map = dict(bias_corrected_affine=dict(field='estwrite.output.bias.affine', usedefault=True, ), bias_corrected_native=dict(field='estwrite.output.bias.native', usedefault=True, ), bias_corrected_normalized=dict(field='estwrite.output.bias.warped', usedefault=True, ), bias_fwhm=dict(field='estwrite.opts.biasfwhm', usedefault=True, ), bias_regularization=dict(field='estwrite.opts.biasreg', usedefault=True, ), cleanup_partitions=dict(field='estwrite.extopts.cleanup', usedefault=True, ), csf_dartel=dict(field='estwrite.output.CSF.dartel', usedefault=True, ), csf_modulated_normalized=dict(field='estwrite.output.CSF.modulated', usedefault=True, ), csf_native=dict(field='estwrite.output.CSF.native', usedefault=True, ), csf_normalized=dict(field='estwrite.output.CSF.warped', usedefault=True, ), dartel_template=dict(field='estwrite.extopts.dartelwarp.normhigh.darteltpm', ), deformation_field=dict(field='estwrite.output.warps', usedefault=True, ), display_results=dict(field='estwrite.extopts.print', usedefault=True, ), gaussians_per_class=dict(usedefault=True, ), gm_dartel=dict(field='estwrite.output.GM.dartel', usedefault=True, ), gm_modulated_normalized=dict(field='estwrite.output.GM.modulated', usedefault=True, ), gm_native=dict(field='estwrite.output.GM.native', usedefault=True, ), gm_normalized=dict(field='estwrite.output.GM.warped', usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_files=dict(copyfile=False, field='estwrite.data', mandatory=True, ), jacobian_determinant=dict(field='estwrite.jacobian.warped', usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), mrf_weighting=dict(field='estwrite.extopts.mrf', usedefault=True, ), paths=dict(), pve_label_dartel=dict(field='estwrite.output.label.dartel', usedefault=True, ), pve_label_native=dict(field='estwrite.output.label.native', usedefault=True, ), pve_label_normalized=dict(field='estwrite.output.label.warped', usedefault=True, ), sampling_distance=dict(field='estwrite.opts.samp', usedefault=True, ), spatial_normalization=dict(usedefault=True, ), tissues=dict(field='estwrite.tpm', ), use_mcr=dict(), use_sanlm_denoising_filter=dict(field='estwrite.extopts.sanlm', usedefault=True, ), use_v8struct=dict(min_ver='8', usedefault=True, ), warping_regularization=dict(field='estwrite.opts.warpreg', usedefault=True, ), wm_dartel=dict(field='estwrite.output.WM.dartel', usedefault=True, ), wm_modulated_normalized=dict(field='estwrite.output.WM.modulated', usedefault=True, ), wm_native=dict(field='estwrite.output.WM.native', usedefault=True, ), wm_normalized=dict(field='estwrite.output.WM.warped', usedefault=True, ), ) inputs = VBMSegment.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_VBMSegment_outputs(): output_map = dict(bias_corrected_images=dict(), dartel_input_images=dict(), forward_deformation_field=dict(), inverse_deformation_field=dict(), jacobian_determinant_images=dict(), modulated_class_images=dict(), native_class_images=dict(), normalized_bias_corrected_images=dict(), normalized_class_images=dict(), pve_label_native_images=dict(), pve_label_normalized_images=dict(), pve_label_registered_images=dict(), transformation_mat=dict(), ) outputs = VBMSegment.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/spm/tests/test_base.py000066400000000000000000000140411227300005300227310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree import nibabel as nb import numpy as np from nipype.testing import (assert_equal, assert_false, assert_true, skipif) import nipype.interfaces.spm.base as spm from nipype.interfaces.spm import no_spm import nipype.interfaces.matlab as mlab from nipype.interfaces.spm.base import SPMCommandInputSpec from nipype.interfaces.base import traits try: matlab_cmd = os.environ['MATLABCMD'] except: matlab_cmd = 'matlab' mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) def create_files_in_directory(): outdir = mkdtemp() cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii', 'b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3, 3, 3, 4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img, np.eye(4), hdr), os.path.join(outdir, f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) def test_scan_for_fnames(): filelist, outdir, cwd = create_files_in_directory() names = spm.scans_for_fnames(filelist, keep4d=True) yield assert_equal, names[0], filelist[0] yield assert_equal, names[1], filelist[1] clean_directory(outdir, cwd) save_time = False if not save_time: @skipif(no_spm) def test_spm_path(): spm_path = spm.Info.version()['path'] if spm_path is not None: yield assert_equal, type(spm_path), type('') yield assert_true, 'spm' in spm_path def test_use_mfile(): class TestClass(spm.SPMCommand): input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class yield assert_true, dc.inputs.mfile def test_find_mlab_cmd_defaults(): saved_env = dict(os.environ) class TestClass(spm.SPMCommand): pass # test without FORCE_SPMMCR, SPMMCRCMD set for varname in ['FORCE_SPMMCR', 'SPMMCRCMD']: try: del os.environ[varname] except KeyError: pass dc = TestClass() yield assert_equal, dc._use_mcr, None yield assert_equal, dc._matlab_cmd, None # test with only FORCE_SPMMCR set os.environ['FORCE_SPMMCR'] = '1' dc = TestClass() yield assert_equal, dc._use_mcr, True yield assert_equal, dc._matlab_cmd, None # test with both, FORCE_SPMMCR and SPMMCRCMD set os.environ['SPMMCRCMD'] = 'spmcmd' dc = TestClass() yield assert_equal, dc._use_mcr, True yield assert_equal, dc._matlab_cmd, 'spmcmd' # restore environment os.environ.clear(); os.environ.update(saved_env) @skipif(no_spm, "SPM not found") def test_cmd_update(): class TestClass(spm.SPMCommand): input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class dc.inputs.matlab_cmd = 'foo' yield assert_equal, dc.mlab._cmd, 'foo' def test_cmd_update2(): class TestClass(spm.SPMCommand): _jobtype = 'jobtype' _jobname = 'jobname' input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class yield assert_equal, dc.jobtype, 'jobtype' yield assert_equal, dc.jobname, 'jobname' def test_reformat_dict_for_savemat(): class TestClass(spm.SPMCommand): input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class out = dc._reformat_dict_for_savemat({'a': {'b': {'c': []}}}) yield assert_equal, out, [{'a': [{'b': [{'c': []}]}]}] def test_generate_job(): class TestClass(spm.SPMCommand): input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class out = dc._generate_job() yield assert_equal, out, '' # struct array contents = {'contents': [1, 2, 3, 4]} out = dc._generate_job(contents=contents) yield assert_equal, out, ('.contents(1) = 1;\n.contents(2) = 2;' '\n.contents(3) = 3;\n.contents(4) = 4;\n') # cell array of strings filelist, outdir, cwd = create_files_in_directory() names = spm.scans_for_fnames(filelist, keep4d=True) contents = {'files': names} out = dc._generate_job(prefix='test', contents=contents) yield assert_equal, out, "test.files = {...\n'a.nii';...\n'b.nii';...\n};\n" clean_directory(outdir, cwd) # string assignment contents = 'foo' out = dc._generate_job(prefix='test', contents=contents) yield assert_equal, out, "test = 'foo';\n" # cell array of vectors contents = {'onsets': np.array((1,), dtype=object)} contents['onsets'][0] = [1, 2, 3, 4] out = dc._generate_job(prefix='test', contents=contents) yield assert_equal, out, 'test.onsets = {...\n[1, 2, 3, 4];...\n};\n' def test_bool(): class TestClassInputSpec(SPMCommandInputSpec): test_in = include_intercept = traits.Bool(field='testfield') class TestClass(spm.SPMCommand): input_spec = TestClassInputSpec _jobtype = 'jobtype' _jobname = 'jobname' dc = TestClass() # dc = derived_class dc.inputs.test_in = True out = dc._make_matlab_command(dc._parse_inputs()) yield assert_equal, out.find('jobs{1}.spm.jobtype.jobname.testfield = 1;') > 0, 1 dc.inputs.use_v8struct = False out = dc._make_matlab_command(dc._parse_inputs()) yield assert_equal, out.find('jobs{1}.jobtype{1}.jobname{1}.testfield = 1;') > 0, 1 def test_make_matlab_command(): class TestClass(spm.SPMCommand): _jobtype = 'jobtype' _jobname = 'jobname' input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class filelist, outdir, cwd = create_files_in_directory() contents = {'contents': [1, 2, 3, 4]} script = dc._make_matlab_command([contents]) yield assert_true, 'jobs{1}.spm.jobtype.jobname.contents(3) = 3;' in script dc.inputs.use_v8struct = False script = dc._make_matlab_command([contents]) yield assert_true, 'jobs{1}.jobtype{1}.jobname{1}.contents(3) = 3;' in script clean_directory(outdir, cwd) nipype-0.9.2/nipype/interfaces/spm/tests/test_model.py000066400000000000000000000043001227300005300231140ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree import numpy as np from nipype.testing import (assert_equal, assert_false, assert_true, assert_raises, skipif) import nibabel as nb import nipype.interfaces.spm.model as spm from nipype.interfaces.spm import no_spm import nipype.interfaces.matlab as mlab try: matlab_cmd = os.environ['MATLABCMD'] except: matlab_cmd = 'matlab' mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) def create_files_in_directory(): outdir = mkdtemp() cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii','b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3,3,3,4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img,np.eye(4),hdr), os.path.join(outdir,f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) def test_level1design(): yield assert_equal, spm.Level1Design._jobtype, 'stats' yield assert_equal, spm.Level1Design._jobname, 'fmri_spec' def test_estimatemodel(): yield assert_equal, spm.EstimateModel._jobtype, 'stats' yield assert_equal, spm.EstimateModel._jobname, 'fmri_est' def test_estimatecontrast(): yield assert_equal, spm.EstimateContrast._jobtype, 'stats' yield assert_equal, spm.EstimateContrast._jobname, 'con' def test_threshold(): yield assert_equal, spm.Threshold._jobtype, 'basetype' yield assert_equal, spm.Threshold._jobname, 'basename' def test_factorialdesign(): yield assert_equal, spm.FactorialDesign._jobtype, 'stats' yield assert_equal, spm.FactorialDesign._jobname, 'factorial_design' def test_onesamplettestdesign(): yield assert_equal, spm.OneSampleTTestDesign._jobtype, 'stats' yield assert_equal, spm.OneSampleTTestDesign._jobname, 'factorial_design' def test_twosamplettestdesign(): yield assert_equal, spm.TwoSampleTTestDesign._jobtype, 'stats' yield assert_equal, spm.TwoSampleTTestDesign._jobname, 'factorial_design' nipype-0.9.2/nipype/interfaces/spm/tests/test_preprocess.py000066400000000000000000000077251227300005300242170ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree import numpy as np from nipype.testing import (assert_equal, assert_false, assert_true, assert_raises, skipif) import nibabel as nb import nipype.interfaces.spm as spm from nipype.interfaces.spm import no_spm import nipype.interfaces.matlab as mlab try: matlab_cmd = os.environ['MATLABCMD'] except: matlab_cmd = 'matlab' mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) def create_files_in_directory(): outdir = mkdtemp() cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii','b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3,3,3,4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img,np.eye(4),hdr), os.path.join(outdir,f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) def test_slicetiming(): yield assert_equal, spm.SliceTiming._jobtype, 'temporal' yield assert_equal, spm.SliceTiming._jobname, 'st' def test_slicetiming_list_outputs(): filelist, outdir, cwd = create_files_in_directory() st = spm.SliceTiming(in_files=filelist[0]) yield assert_equal, st._list_outputs()['timecorrected_files'][0][0], 'a' clean_directory(outdir, cwd) def test_realign(): yield assert_equal, spm.Realign._jobtype, 'spatial' yield assert_equal, spm.Realign._jobname, 'realign' yield assert_equal, spm.Realign().inputs.jobtype, 'estwrite' def test_realign_list_outputs(): filelist, outdir, cwd = create_files_in_directory() rlgn = spm.Realign(in_files=filelist[0]) yield assert_true, rlgn._list_outputs()['realignment_parameters'][0].startswith('rp_') yield assert_true, rlgn._list_outputs()['realigned_files'][0].startswith('r') yield assert_true, rlgn._list_outputs()['mean_image'].startswith('mean') clean_directory(outdir, cwd) def test_coregister(): yield assert_equal, spm.Coregister._jobtype, 'spatial' yield assert_equal, spm.Coregister._jobname, 'coreg' yield assert_equal, spm.Coregister().inputs.jobtype, 'estwrite' def test_coregister_list_outputs(): filelist, outdir, cwd = create_files_in_directory() coreg = spm.Coregister(source=filelist[0]) yield assert_true, coreg._list_outputs()['coregistered_source'][0].startswith('r') coreg = spm.Coregister(source=filelist[0],apply_to_files=filelist[1]) yield assert_true, coreg._list_outputs()['coregistered_files'][0].startswith('r') clean_directory(outdir, cwd) def test_normalize(): yield assert_equal, spm.Normalize._jobtype, 'spatial' yield assert_equal, spm.Normalize._jobname, 'normalise' yield assert_equal, spm.Normalize().inputs.jobtype, 'estwrite' def test_normalize_list_outputs(): filelist, outdir, cwd = create_files_in_directory() norm = spm.Normalize(source=filelist[0]) yield assert_true, norm._list_outputs()['normalized_source'][0].startswith('w') norm = spm.Normalize(source=filelist[0],apply_to_files=filelist[1]) yield assert_true, norm._list_outputs()['normalized_files'][0].startswith('w') clean_directory(outdir, cwd) def test_segment(): yield assert_equal, spm.Segment._jobtype, 'spatial' yield assert_equal, spm.Segment._jobname, 'preproc' def test_newsegment(): yield assert_equal, spm.NewSegment._jobtype, 'tools' yield assert_equal, spm.NewSegment._jobname, 'preproc8' def test_smooth(): yield assert_equal, spm.Smooth._jobtype, 'spatial' yield assert_equal, spm.Smooth._jobname, 'smooth' def test_dartel(): yield assert_equal, spm.DARTEL._jobtype, 'tools' yield assert_equal, spm.DARTEL._jobname, 'dartel' def test_dartelnorm2mni(): yield assert_equal, spm.DARTELNorm2MNI._jobtype, 'tools' yield assert_equal, spm.DARTELNorm2MNI._jobname, 'dartel' nipype-0.9.2/nipype/interfaces/spm/tests/test_utils.py000066400000000000000000000065531227300005300231700ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from nipype.testing import (assert_equal, assert_false,assert_raises, assert_true, skipif, example_data) from nipype.interfaces.spm import no_spm import nipype.interfaces.spm.utils as spmu from nipype.interfaces.base import isdefined from nipype.utils.filemanip import split_filename, fname_presuffix from nipype.interfaces.base import TraitError def test_coreg(): moving = example_data(infile = 'functional.nii') target = example_data(infile = 'T1.nii') mat = example_data(infile = 'trans.mat') coreg = spmu.CalcCoregAffine(matlab_cmd = 'mymatlab') coreg.inputs.target = target assert_equal(coreg.inputs.matlab_cmd, 'mymatlab') coreg.inputs.moving = moving assert_equal( isdefined(coreg.inputs.mat),False) pth, mov, _ = split_filename(moving) _, tgt, _ = split_filename(target) mat = os.path.join(pth, '%s_to_%s.mat'%(mov,tgt)) invmat = fname_presuffix(mat, prefix = 'inverse_') scrpt = coreg._make_matlab_command(None) assert_equal(coreg.inputs.mat, mat) assert_equal( coreg.inputs.invmat, invmat) def test_apply_transform(): moving = example_data(infile = 'functional.nii') mat = example_data(infile = 'trans.mat') applymat = spmu.ApplyTransform(matlab_cmd = 'mymatlab') assert_equal( applymat.inputs.matlab_cmd, 'mymatlab' ) applymat.inputs.in_file = moving applymat.inputs.mat = mat scrpt = applymat._make_matlab_command(None) expected = 'img_space = spm_get_space(infile);' assert_equal( expected in scrpt, True) expected = 'spm_get_space(infile, M * img_space);' assert_equal(expected in scrpt, True) def test_reslice(): moving = example_data(infile = 'functional.nii') space_defining = example_data(infile = 'T1.nii') reslice = spmu.Reslice(matlab_cmd = 'mymatlab_version') assert_equal( reslice.inputs.matlab_cmd, 'mymatlab_version') reslice.inputs.in_file = moving reslice.inputs.space_defining = space_defining assert_equal( reslice.inputs.interp, 0) assert_raises(TraitError,reslice.inputs.trait_set,interp = 'nearest') assert_raises(TraitError, reslice.inputs.trait_set, interp = 10) reslice.inputs.interp = 1 script = reslice._make_matlab_command(None) outfile = fname_presuffix(moving, prefix='r') assert_equal(reslice.inputs.out_file, outfile) expected = '\nflags.mean=0;\nflags.which=1;\nflags.mask=0;' assert_equal(expected in script.replace(' ',''), True) expected_interp = 'flags.interp = 1;\n' assert_equal(expected_interp in script, True) assert_equal('spm_reslice(invols, flags);' in script, True) def test_dicom_import(): dicom = example_data(infile = 'dicomdir/123456-1-1.dcm') di = spmu.DicomImport(matlab_cmd = 'mymatlab') assert_equal(di.inputs.matlab_cmd, 'mymatlab') assert_equal(di.inputs.output_dir_struct, 'flat') assert_equal(di.inputs.output_dir, './converted_dicom') assert_equal(di.inputs.format, 'nii') assert_equal(di.inputs.icedims, False) assert_raises(TraitError,di.inputs.trait_set,output_dir_struct = 'wrong') assert_raises(TraitError,di.inputs.trait_set,format = 'FAT') assert_raises(TraitError,di.inputs.trait_set,in_files = ['does_sfd_not_32fn_exist.dcm']) di.inputs.in_files = [dicom] assert_equal(di.inputs.in_files, [dicom]) nipype-0.9.2/nipype/interfaces/spm/utils.py000066400000000000000000000355231227300005300207660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from nipype.interfaces.spm.base import SPMCommandInputSpec, SPMCommand, Info, scans_for_fnames, scans_for_fname from nipype.interfaces.matlab import MatlabCommand from nipype.interfaces.base import (TraitedSpec, BaseInterface, BaseInterfaceInputSpec, isdefined, OutputMultiPath, InputMultiPath) from nipype.interfaces.base import File, traits from nipype.utils.filemanip import split_filename, fname_presuffix, filename_to_list,list_to_filename import os import numpy as np class Analyze2niiInputSpec(SPMCommandInputSpec): analyze_file = File(exists=True, mandatory=True) class Analyze2niiOutputSpec(SPMCommandInputSpec): nifti_file = File(exists=True) class Analyze2nii(SPMCommand): input_spec = Analyze2niiInputSpec output_spec = Analyze2niiOutputSpec def _make_matlab_command(self, _): script = "V = spm_vol('%s');\n"%self.inputs.analyze_file _, name,_ = split_filename(self.inputs.analyze_file) self.output_name = os.path.join(os.getcwd(), name + ".nii") script += "[Y, XYZ] = spm_read_vols(V);\n" script += "V.fname = '%s';\n"%self.output_name script += "spm_write_vol(V, Y);\n" return script def _list_outputs(self): outputs = self._outputs().get() outputs['nifti_file'] = self.output_name return outputs class CalcCoregAffineInputSpec(SPMCommandInputSpec): target = File( exists = True, mandatory = True, desc = 'target for generating affine transform') moving = File( exists = True, mandatory = True, copyfile=False, desc = 'volume transform can be applied to register with target') mat = File( desc = 'Filename used to store affine matrix') invmat = File( desc = 'Filename used to store inverse affine matrix') class CalcCoregAffineOutputSpec(TraitedSpec): mat = File(exists = True, desc = 'Matlab file holding transform') invmat = File( desc = 'Matlab file holding inverse transform') class CalcCoregAffine(SPMCommand): """ Uses SPM (spm_coreg) to calculate the transform mapping moving to target. Saves Transform in mat (matlab binary file) Also saves inverse transform Examples -------- >>> import nipype.interfaces.spm.utils as spmu >>> coreg = spmu.CalcCoregAffine(matlab_cmd='matlab-spm8') >>> coreg.inputs.target = 'structural.nii' >>> coreg.inputs.moving = 'functional.nii' >>> coreg.inputs.mat = 'func_to_struct.mat' >>> coreg.run() # doctest: +SKIP .. note:: * the output file mat is saves as a matlab binary file * calculating the transforms does NOT change either input image it does not **move** the moving image, only calculates the transform that can be used to move it """ input_spec = CalcCoregAffineInputSpec output_spec = CalcCoregAffineOutputSpec def _make_inv_file(self): """ makes filename to hold inverse transform if not specified""" invmat = fname_presuffix(self.inputs.mat, prefix = 'inverse_') return invmat def _make_mat_file(self): """ makes name for matfile if doesn exist""" pth, mv, _ = split_filename(self.inputs.moving) _, tgt, _ = split_filename(self.inputs.target) mat = os.path.join(pth, '%s_to_%s.mat'%(mv,tgt)) return mat def _make_matlab_command(self, _): """checks for SPM, generates script""" if not isdefined(self.inputs.mat): self.inputs.mat = self._make_mat_file() if not isdefined(self.inputs.invmat): self.inputs.invmat = self._make_inv_file() script = """ target = '%s'; moving = '%s'; targetv = spm_vol(target); movingv = spm_vol(moving); x = spm_coreg(targetv, movingv); M = spm_matrix(x); save('%s' , 'M' ); M = inv(M); save('%s','M') """%(self.inputs.target, self.inputs.moving, self.inputs.mat, self.inputs.invmat) return script def _list_outputs(self): outputs = self._outputs().get() outputs['mat'] = os.path.abspath(self.inputs.mat) outputs['invmat'] = os.path.abspath(self.inputs.invmat) return outputs class ApplyTransformInputSpec(SPMCommandInputSpec): in_file = File( exists = True, mandatory = True, copyfile=True, desc='file to apply transform to, (only updates header)') mat = File( exists = True, mandatory = True, desc='file holding transform to apply') class ApplyTransformOutputSpec(TraitedSpec): out_file = File(exists = True, desc = 'File with updated header') class ApplyTransform(SPMCommand): """ Uses spm to apply transform stored in a .mat file to given file Examples -------- >>> import nipype.interfaces.spm.utils as spmu >>> applymat = spmu.ApplyTransform(matlab_cmd='matlab-spm8') >>> applymat.inputs.in_file = 'functional.nii' >>> applymat.inputs.mat = 'func_to_struct.mat' >>> applymat.run() # doctest: +SKIP .. warning:: CHANGES YOUR INPUT FILE (applies transform by updating the header) except when used with nipype caching or workflow. """ input_spec = ApplyTransformInputSpec output_spec = ApplyTransformOutputSpec def _make_matlab_command(self, _): """checks for SPM, generates script""" script = """ infile = '%s'; transform = load('%s'); M = inv(transform.M); img_space = spm_get_space(infile); spm_get_space(infile, M * img_space); """%(self.inputs.in_file, self.inputs.mat) return script def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = os.path.abspath(self.inputs.in_file) return outputs class ResliceInputSpec(SPMCommandInputSpec): in_file = File( exists = True, mandatory=True, desc='file to apply transform to, (only updates header)') space_defining = File ( exists = True, mandatory = True, desc = 'Volume defining space to slice in_file into') interp = traits.Range(low = 0, high = 7, usedefault = True, desc='degree of b-spline used for interpolation'\ '0 is nearest neighbor (default)') out_file = File(desc = 'Optional file to save resliced volume') class ResliceOutputSpec(TraitedSpec): out_file = File( exists = True, desc = 'resliced volume') class Reslice(SPMCommand): """ uses spm_reslice to resample in_file into space of space_defining""" input_spec = ResliceInputSpec output_spec = ResliceOutputSpec def _make_matlab_command(self, _): """ generates script""" if not isdefined(self.inputs.out_file): self.inputs.out_file = fname_presuffix(self.inputs.in_file, prefix = 'r') script = """ flags.mean = 0; flags.which = 1; flags.mask = 0; flags.interp = %d; infiles = strvcat(\'%s\', \'%s\'); invols = spm_vol(infiles); spm_reslice(invols, flags); """%(self.inputs.interp, self.inputs.space_defining, self.inputs.in_file) return script def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs class ApplyInverseDeformationInput(SPMCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, field='fnames', desc='Files on which deformation is applied') target = File( exists=True, field='comp{1}.inv.space', desc='File defining target space') deformation = File( exists=True, field='comp{1}.inv.comp{1}.sn2def.matname', desc='SN SPM deformation file', xor=['deformation_field']) deformation_field = File( exists=True, field='comp{1}.inv.comp{1}.def', desc='SN SPM deformation file', xor=['deformation']) interpolation = traits.Range( low=0, high=7, field='interp', desc='degree of b-spline used for interpolation') bounding_box = traits.List( traits.Float(), field='comp{1}.inv.comp{1}.sn2def.bb', minlen=6, maxlen=6, desc='6-element list (opt)') voxel_sizes = traits.List( traits.Float(), field='comp{1}.inv.comp{1}.sn2def.vox', minlen=3, maxlen=3, desc='3-element list (opt)') class ApplyInverseDeformationOutput(TraitedSpec): out_files = OutputMultiPath(File(exists=True), desc='Transformed files') class ApplyInverseDeformation(SPMCommand): """ Uses spm to apply inverse deformation stored in a .mat file or a deformation field to a given file Examples -------- >>> import nipype.interfaces.spm.utils as spmu >>> inv = spmu.ApplyInverseDeformation() >>> inv.inputs.in_files = 'functional.nii' >>> inv.inputs.deformation = 'struct_to_func.mat' >>> inv.inputs.target = 'structural.nii' >>> inv.run() # doctest: +SKIP """ input_spec = ApplyInverseDeformationInput output_spec = ApplyInverseDeformationOutput _jobtype = 'util' _jobname = 'defs' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'in_files': return scans_for_fnames(filename_to_list(val)) if opt == 'target': return scans_for_fname(filename_to_list(val)) if opt == 'deformation': return np.array([list_to_filename(val)], dtype=object) if opt == 'deformation_field': return np.array([list_to_filename(val)], dtype=object) return val def _list_outputs(self): outputs = self._outputs().get() outputs['out_files'] = [] for filename in self.inputs.in_files: _, fname = os.path.split(filename) outputs['out_files'].append(os.path.realpath('w%s' % fname)) return outputs class ResliceToReferenceInput(SPMCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, field='fnames', desc='Files on which deformation is applied') target = File( exists=True, field='comp{1}.id.space', desc='File defining target space') interpolation = traits.Range( low=0, high=7, field='interp', desc='degree of b-spline used for interpolation') bounding_box = traits.List( traits.Float(), field='comp{2}.idbbvox.bb', minlen=6, maxlen=6, desc='6-element list (opt)') voxel_sizes = traits.List( traits.Float(), field='comp{2}.idbbvox.vox', minlen=3, maxlen=3, desc='3-element list (opt)') class ResliceToReferenceOutput(TraitedSpec): out_files = OutputMultiPath(File(exists=True), desc='Transformed files') class ResliceToReference(SPMCommand): """ Uses spm to reslice a volume to a target image space or to a provided voxel size and bounding box Examples -------- >>> import nipype.interfaces.spm.utils as spmu >>> r2ref = spmu.ResliceToReference() >>> r2ref.inputs.in_files = 'functional.nii' >>> r2ref.inputs.target = 'structural.nii' >>> r2ref.run() # doctest: +SKIP """ input_spec = ResliceToReferenceInput output_spec = ResliceToReferenceOutput _jobtype = 'util' _jobname = 'defs' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'in_files': return scans_for_fnames(filename_to_list(val)) if opt == 'target': return scans_for_fname(filename_to_list(val)) if opt == 'deformation': return np.array([list_to_filename(val)], dtype=object) if opt == 'deformation_field': return np.array([list_to_filename(val)], dtype=object) return val def _list_outputs(self): outputs = self._outputs().get() outputs['out_files'] = [] for filename in self.inputs.in_files: _, fname = os.path.split(filename) outputs['out_files'].append(os.path.realpath('w%s' % fname)) return outputs class DicomImportInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, field='data', desc='dicom files to be converted') output_dir_struct = traits.Enum( 'flat', 'series', 'patname', 'patid_date', 'patid', 'date_time', field='root', usedefault=True, desc='directory structure for the output.') output_dir = traits.Str('./converted_dicom', field='outdir', usedefault=True, desc='output directory.') format = traits.Enum( 'nii', 'img', field='convopts.format', usedefault=True, desc='output format.') icedims = traits.Bool(False, field='convopts.icedims', usedefault=True, desc='If image sorting fails, one can try using the additional\ SIEMENS ICEDims information to create unique filenames.\ Use this only if there would be multiple volumes with\ exactly the same file names.') class DicomImportOutputSpec(TraitedSpec): out_files = OutputMultiPath(File(exists=True), desc='converted files') class DicomImport(SPMCommand): """ Uses spm to convert DICOM files to nii or img+hdr. Examples -------- >>> import nipype.interfaces.spm.utils as spmu >>> di = spmu.DicomImport() >>> di.inputs.in_files = ['functional_1.dcm', 'functional_2.dcm'] >>> di.run() # doctest: +SKIP """ input_spec = DicomImportInputSpec output_spec = DicomImportOutputSpec _jobtype = 'util' _jobname = 'dicom' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt == 'in_files': return np.array(val, dtype=object) if opt == 'output_dir': return np.array([val], dtype=object) if opt == 'output_dir': return os.path.abspath(val) if opt == 'icedims': if val: return 1 return 0 return super(DicomImport, self)._format_arg(opt, spec, val) def _run_interface(self, runtime): od = os.path.abspath(self.inputs.output_dir) if not os.path.isdir(od): os.mkdir(od) return super(DicomImport, self)._run_interface(runtime) def _list_outputs(self): from glob import glob outputs = self._outputs().get() od = os.path.abspath(self.inputs.output_dir) outputs['out_files'] = glob(os.path.join(od, '*')) return outputs nipype-0.9.2/nipype/interfaces/tests/000077500000000000000000000000001227300005300176075ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/tests/realign_json.json000066400000000000000000000016431227300005300231600ustar00rootroot00000000000000{ "cwd": "/home/cburns/data/nipype-tutorial/workingdir/_subject_id_s1/Realign.spm", "flags": null, "fwhm": null, "infile": [ [ "/home/cburns/data/nipype-tutorial/data/s1/f3.nii", "a3c80eb0260e7501b1458c462f51c77f" ], [ "/home/cburns/data/nipype-tutorial/data/s1/f5.nii", "9d6931fbd1b295fef475a2fe1eba5b5d" ], [ "/home/cburns/data/nipype-tutorial/data/s1/f7.nii", "bddcecd01af1cd16bcda369e685c8c89" ], [ "/home/cburns/data/nipype-tutorial/data/s1/f10.nii", "d75253b6ec33489adb72daa7b5b3bf31" ] ], "interp": null, "quality": null, "register_to_mean": true, "separation": null, "weight_img": null, "wrap": null, "write": true, "write_interp": null, "write_mask": null, "write_which": null, "write_wrap": null }nipype-0.9.2/nipype/interfaces/tests/test_auto_AssertEqual.py000066400000000000000000000010541227300005300245010ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.utility import AssertEqual def test_AssertEqual_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), volume1=dict(mandatory=True, ), volume2=dict(mandatory=True, ), ) inputs = AssertEqual.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_BaseInterface.py000066400000000000000000000007371227300005300247520ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.base import BaseInterface def test_BaseInterface_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), ) inputs = BaseInterface.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_C3dAffineTool.py000066400000000000000000000023631227300005300246340ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.c3 import C3dAffineTool def test_C3dAffineTool_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fsl2ras=dict(argstr='-fsl2ras', position=4, ), ignore_exception=dict(nohash=True, usedefault=True, ), itk_transform=dict(argstr='-oitk %s', hash_files=False, position=5, ), reference_file=dict(argstr='-ref %s', position=1, ), source_file=dict(argstr='-src %s', position=2, ), terminal_output=dict(mandatory=True, nohash=True, ), transform_file=dict(argstr='%s', position=3, ), ) inputs = C3dAffineTool.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_C3dAffineTool_outputs(): output_map = dict(itk_transform=dict(), ) outputs = C3dAffineTool.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_CommandLine.py000066400000000000000000000011661227300005300244420ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.base import CommandLine def test_CommandLine_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = CommandLine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_CopyMeta.py000066400000000000000000000014721227300005300237750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcmstack import CopyMeta def test_CopyMeta_inputs(): input_map = dict(dest_file=dict(mandatory=True, ), exclude_classes=dict(), include_classes=dict(), src_file=dict(mandatory=True, ), ) inputs = CopyMeta.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CopyMeta_outputs(): output_map = dict(dest_file=dict(), ) outputs = CopyMeta.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_DataFinder.py000066400000000000000000000016561227300005300242610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import DataFinder def test_DataFinder_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), ignore_regexes=dict(), match_regex=dict(usedefault=True, ), max_depth=dict(), min_depth=dict(), root_paths=dict(mandatory=True, ), unpack_single=dict(usedefault=True, ), ) inputs = DataFinder.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DataFinder_outputs(): output_map = dict() outputs = DataFinder.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_DataGrabber.py000066400000000000000000000016411227300005300244100ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import DataGrabber def test_DataGrabber_inputs(): input_map = dict(base_directory=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), raise_on_empty=dict(usedefault=True, ), sort_filelist=dict(mandatory=True, ), template=dict(mandatory=True, ), template_args=dict(), ) inputs = DataGrabber.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DataGrabber_outputs(): output_map = dict() outputs = DataGrabber.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_DataSink.py000066400000000000000000000017721227300005300237550ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import DataSink def test_DataSink_inputs(): input_map = dict(_outputs=dict(usedefault=True, ), base_directory=dict(), container=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), parameterization=dict(usedefault=True, ), regexp_substitutions=dict(), remove_dest_dir=dict(usedefault=True, ), strip_dir=dict(), substitutions=dict(), ) inputs = DataSink.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DataSink_outputs(): output_map = dict(out_file=dict(), ) outputs = DataSink.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_Dcm2nii.py000066400000000000000000000033111227300005300235330ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcm2nii import Dcm2nii def test_Dcm2nii_inputs(): input_map = dict(anonymize=dict(argstr='-a', position=2, ), args=dict(argstr='%s', position=9, ), config_file=dict(argstr='-b %s', genfile=True, position=7, ), convert_all_pars=dict(argstr='-v', position=8, ), environ=dict(nohash=True, usedefault=True, ), gzip_output=dict(argstr='-g', position=0, usedefault=True, ), id_in_filename=dict(argstr='-i', position=3, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), nii_output=dict(argstr='-n', position=1, usedefault=True, ), output_dir=dict(argstr='-o %s', genfile=True, position=6, ), reorient=dict(argstr='-r', position=4, ), reorient_and_crop=dict(argstr='-x', position=5, ), source_names=dict(argstr='%s', mandatory=True, position=10, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = Dcm2nii.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Dcm2nii_outputs(): output_map = dict(bvals=dict(), bvecs=dict(), converted_files=dict(), reoriented_and_cropped_files=dict(), reoriented_files=dict(), ) outputs = Dcm2nii.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_DcmStack.py000066400000000000000000000015511227300005300237430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcmstack import DcmStack def test_DcmStack_inputs(): input_map = dict(dicom_files=dict(mandatory=True, ), embed_meta=dict(), exclude_regexes=dict(), include_regexes=dict(), out_ext=dict(usedefault=True, ), out_format=dict(), ) inputs = DcmStack.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DcmStack_outputs(): output_map = dict(out_file=dict(), ) outputs = DcmStack.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_FreeSurferSource.py000066400000000000000000000044151227300005300255050ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import FreeSurferSource def test_FreeSurferSource_inputs(): input_map = dict(hemi=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), subject_id=dict(mandatory=True, ), subjects_dir=dict(mandatory=True, ), ) inputs = FreeSurferSource.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_FreeSurferSource_outputs(): output_map = dict(BA_stats=dict(altkey='BA', loc='stats', ), T1=dict(loc='mri', ), annot=dict(altkey='*annot', loc='label', ), aparc_a2009s_stats=dict(altkey='aparc.a2009s', loc='stats', ), aparc_aseg=dict(altkey='aparc*aseg', loc='mri', ), aparc_stats=dict(altkey='aparc', loc='stats', ), aseg=dict(loc='mri', ), aseg_stats=dict(altkey='aseg', loc='stats', ), brain=dict(loc='mri', ), brainmask=dict(loc='mri', ), curv=dict(loc='surf', ), curv_stats=dict(altkey='curv', loc='stats', ), entorhinal_exvivo_stats=dict(altkey='entorhinal_exvivo', loc='stats', ), filled=dict(loc='mri', ), inflated=dict(loc='surf', ), label=dict(altkey='*label', loc='label', ), norm=dict(loc='mri', ), nu=dict(loc='mri', ), orig=dict(loc='mri', ), pial=dict(loc='surf', ), rawavg=dict(loc='mri', ), ribbon=dict(altkey='*ribbon', loc='mri', ), smoothwm=dict(loc='surf', ), sphere=dict(loc='surf', ), sphere_reg=dict(altkey='sphere.reg', loc='surf', ), sulc=dict(loc='surf', ), thickness=dict(loc='surf', ), volume=dict(loc='surf', ), white=dict(loc='surf', ), wm=dict(loc='mri', ), wmparc=dict(loc='mri', ), wmparc_stats=dict(altkey='wmparc', loc='stats', ), ) outputs = FreeSurferSource.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_Function.py000066400000000000000000000014101227300005300240310ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.utility import Function def test_Function_inputs(): input_map = dict(function_str=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), ) inputs = Function.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Function_outputs(): output_map = dict() outputs = Function.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_GroupAndStack.py000066400000000000000000000016021227300005300247540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcmstack import GroupAndStack def test_GroupAndStack_inputs(): input_map = dict(dicom_files=dict(mandatory=True, ), embed_meta=dict(), exclude_regexes=dict(), include_regexes=dict(), out_ext=dict(usedefault=True, ), out_format=dict(), ) inputs = GroupAndStack.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_GroupAndStack_outputs(): output_map = dict(out_list=dict(), ) outputs = GroupAndStack.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_IOBase.py000066400000000000000000000007101227300005300233500ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import IOBase def test_IOBase_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), ) inputs = IOBase.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_IdentityInterface.py000066400000000000000000000013051227300005300256610ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.utility import IdentityInterface def test_IdentityInterface_inputs(): input_map = dict() inputs = IdentityInterface.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_IdentityInterface_outputs(): output_map = dict() outputs = IdentityInterface.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_LookupMeta.py000066400000000000000000000013651227300005300243350ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcmstack import LookupMeta def test_LookupMeta_inputs(): input_map = dict(in_file=dict(mandatory=True, ), meta_keys=dict(mandatory=True, ), ) inputs = LookupMeta.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_LookupMeta_outputs(): output_map = dict() outputs = LookupMeta.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_MatlabCommand.py000066400000000000000000000024231227300005300247500ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.matlab import MatlabCommand def test_MatlabCommand_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), logfile=dict(argstr='-logfile %s', ), mfile=dict(usedefault=True, ), nodesktop=dict(argstr='-nodesktop', nohash=True, usedefault=True, ), nosplash=dict(argstr='-nosplash', nohash=True, usedefault=True, ), paths=dict(), postscript=dict(usedefault=True, ), prescript=dict(usedefault=True, ), script=dict(argstr='-r "%s;exit"', mandatory=True, position=-1, ), script_file=dict(usedefault=True, ), single_comp_thread=dict(argstr='-singleCompThread', nohash=True, ), terminal_output=dict(mandatory=True, nohash=True, ), uses_mcr=dict(nohash=True, xor=['nodesktop', 'nosplash', 'single_comp_thread'], ), ) inputs = MatlabCommand.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_Merge.py000066400000000000000000000014561227300005300233150ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.utility import Merge def test_Merge_inputs(): input_map = dict(axis=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), no_flatten=dict(usedefault=True, ), ) inputs = Merge.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Merge_outputs(): output_map = dict(out=dict(), ) outputs = Merge.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_MergeNifti.py000066400000000000000000000015161227300005300243040ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcmstack import MergeNifti def test_MergeNifti_inputs(): input_map = dict(in_files=dict(mandatory=True, ), merge_dim=dict(), out_ext=dict(usedefault=True, ), out_format=dict(), sort_order=dict(), ) inputs = MergeNifti.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MergeNifti_outputs(): output_map = dict(out_file=dict(), ) outputs = MergeNifti.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_MeshFix.py000066400000000000000000000057451227300005300236260ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.meshfix import MeshFix def test_MeshFix_inputs(): input_map = dict(args=dict(argstr='%s', ), cut_inner=dict(argstr='--cut-inner %d', ), cut_outer=dict(argstr='--cut-outer %d', ), decouple_inin=dict(argstr='--decouple-inin %d', ), decouple_outin=dict(argstr='--decouple-outin %d', ), decouple_outout=dict(argstr='--decouple-outout %d', ), dilation=dict(argstr='--dilate %d', ), dont_clean=dict(argstr='--no-clean', ), environ=dict(nohash=True, usedefault=True, ), epsilon_angle=dict(argstr='-a %f', ), finetuning_distance=dict(argstr='%f', requires=['finetuning_substeps'], ), finetuning_inwards=dict(argstr='--fineTuneIn ', requires=['finetuning_distance', 'finetuning_substeps'], ), finetuning_outwards=dict(argstr='--fineTuneIn ', requires=['finetuning_distance', 'finetuning_substeps'], xor=['finetuning_inwards'], ), finetuning_substeps=dict(argstr='%d', requires=['finetuning_distance'], ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file1=dict(argstr='%s', mandatory=True, position=1, ), in_file2=dict(argstr='%s', position=2, ), join_closest_components=dict(argstr='-jc', xor=['join_closest_components'], ), join_overlapping_largest_components=dict(argstr='-j', xor=['join_closest_components'], ), laplacian_smoothing_steps=dict(argstr='--smooth %d', ), number_of_biggest_shells=dict(argstr='--shells %d', ), out_filename=dict(argstr='-o %s', genfile=True, ), output_type=dict(usedefault=True, ), quiet_mode=dict(argstr='-q', ), remove_handles=dict(argstr='--remove-handles', ), save_as_freesurfer_mesh=dict(argstr='--fsmesh', xor=['save_as_vrml', 'save_as_stl'], ), save_as_stl=dict(argstr='--stl', xor=['save_as_vmrl', 'save_as_freesurfer_mesh'], ), save_as_vmrl=dict(argstr='--wrl', xor=['save_as_stl', 'save_as_freesurfer_mesh'], ), set_intersections_to_one=dict(argstr='--intersect', ), terminal_output=dict(mandatory=True, nohash=True, ), uniform_remeshing_steps=dict(argstr='-u %d', requires=['uniform_remeshing_vertices'], ), uniform_remeshing_vertices=dict(argstr='--vertices %d', requires=['uniform_remeshing_steps'], ), x_shift=dict(argstr='--smooth %d', ), ) inputs = MeshFix.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MeshFix_outputs(): output_map = dict(mesh_file=dict(), ) outputs = MeshFix.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_MpiCommandLine.py000066400000000000000000000012741227300005300251100ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.base import MpiCommandLine def test_MpiCommandLine_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), n_procs=dict(), terminal_output=dict(mandatory=True, nohash=True, ), use_mpi=dict(usedefault=True, ), ) inputs = MpiCommandLine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_MySQLSink.py000066400000000000000000000014021227300005300240370ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import MySQLSink def test_MySQLSink_inputs(): input_map = dict(config=dict(mandatory=True, xor=['host'], ), database_name=dict(mandatory=True, ), host=dict(mandatory=True, requires=['username', 'password'], usedefault=True, xor=['config'], ), ignore_exception=dict(nohash=True, usedefault=True, ), password=dict(), table_name=dict(mandatory=True, ), username=dict(), ) inputs = MySQLSink.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py000066400000000000000000000007621227300005300257700ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcmstack import NiftiGeneratorBase def test_NiftiGeneratorBase_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), ) inputs = NiftiGeneratorBase.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_Rename.py000066400000000000000000000015251227300005300234620ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.utility import Rename def test_Rename_inputs(): input_map = dict(format_string=dict(mandatory=True, ), in_file=dict(mandatory=True, ), keep_ext=dict(), parse_string=dict(), use_fullpath=dict(usedefault=True, ), ) inputs = Rename.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Rename_outputs(): output_map = dict(out_file=dict(), ) outputs = Rename.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py000066400000000000000000000012131227300005300256050ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.base import SEMLikeCommandLine def test_SEMLikeCommandLine_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SEMLikeCommandLine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_SQLiteSink.py000066400000000000000000000010551227300005300242370ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import SQLiteSink def test_SQLiteSink_inputs(): input_map = dict(database_file=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), table_name=dict(mandatory=True, ), ) inputs = SQLiteSink.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_Select.py000066400000000000000000000014561227300005300234750ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.utility import Select def test_Select_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), index=dict(mandatory=True, ), inlist=dict(mandatory=True, ), ) inputs = Select.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Select_outputs(): output_map = dict(out=dict(), ) outputs = Select.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_SelectFiles.py000066400000000000000000000016141227300005300244540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import SelectFiles def test_SelectFiles_inputs(): input_map = dict(base_directory=dict(), force_lists=dict(usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), raise_on_empty=dict(usedefault=True, ), sort_filelist=dict(usedefault=True, ), ) inputs = SelectFiles.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SelectFiles_outputs(): output_map = dict() outputs = SelectFiles.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_SlicerCommandLine.py000066400000000000000000000016771227300005300256130ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dynamic_slicer import SlicerCommandLine def test_SlicerCommandLine_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), module=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = SlicerCommandLine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SlicerCommandLine_outputs(): output_map = dict() outputs = SlicerCommandLine.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_Split.py000066400000000000000000000014321227300005300233430ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.utility import Split def test_Split_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), inlist=dict(mandatory=True, ), splits=dict(mandatory=True, ), ) inputs = Split.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Split_outputs(): output_map = dict() outputs = Split.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_SplitNifti.py000066400000000000000000000014661227300005300243440ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.dcmstack import SplitNifti def test_SplitNifti_inputs(): input_map = dict(in_file=dict(mandatory=True, ), out_ext=dict(usedefault=True, ), out_format=dict(), split_dim=dict(), ) inputs = SplitNifti.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SplitNifti_outputs(): output_map = dict(out_list=dict(), ) outputs = SplitNifti.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_StdOutCommandLine.py000066400000000000000000000013231227300005300256000ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.base import StdOutCommandLine def test_StdOutCommandLine_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = StdOutCommandLine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_XNATSink.py000066400000000000000000000017341227300005300236540ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import XNATSink def test_XNATSink_inputs(): input_map = dict(_outputs=dict(usedefault=True, ), assessor_id=dict(xor=['reconstruction_id'], ), cache_dir=dict(), config=dict(mandatory=True, xor=['server'], ), experiment_id=dict(mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), project_id=dict(mandatory=True, ), pwd=dict(), reconstruction_id=dict(xor=['assessor_id'], ), server=dict(mandatory=True, requires=['user', 'pwd'], xor=['config'], ), share=dict(usedefault=True, ), subject_id=dict(mandatory=True, ), user=dict(), ) inputs = XNATSink.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_auto_XNATSource.py000066400000000000000000000020171227300005300242030ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.io import XNATSource def test_XNATSource_inputs(): input_map = dict(cache_dir=dict(), config=dict(mandatory=True, xor=['server'], ), ignore_exception=dict(nohash=True, usedefault=True, ), pwd=dict(), query_template=dict(mandatory=True, ), query_template_args=dict(usedefault=True, ), server=dict(mandatory=True, requires=['user', 'pwd'], xor=['config'], ), user=dict(), ) inputs = XNATSource.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_XNATSource_outputs(): output_map = dict() outputs = XNATSource.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/tests/test_base.py000066400000000000000000000470741227300005300221460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import tempfile import shutil from nipype.testing import (assert_equal, assert_not_equal, assert_raises, assert_true, assert_false, with_setup, package_check, skipif) import nipype.interfaces.base as nib from nipype.utils.filemanip import split_filename from nipype.interfaces.base import Undefined, config from traits.testing.nose_tools import skip #test Bunch def test_bunch(): b = nib.Bunch() yield assert_equal, b.__dict__,{} b = nib.Bunch(a=1,b=[2,3]) yield assert_equal, b.__dict__,{'a': 1, 'b': [2,3]} def test_bunch_attribute(): b = nib.Bunch(a=1,b=[2,3],c=None) yield assert_equal, b.a ,1 yield assert_equal, b.b, [2,3] yield assert_equal, b.c, None def test_bunch_repr(): b = nib.Bunch(b=2,c=3,a=dict(n=1,m=2)) yield assert_equal, repr(b), "Bunch(a={'m': 2, 'n': 1}, b=2, c=3)" def test_bunch_methods(): b = nib.Bunch(a=2) b.update(a=3) newb = b.dictcopy() yield assert_equal, b.a, 3 yield assert_equal, b.get('a'), 3 yield assert_equal, b.get('badkey', 'otherthing'), 'otherthing' yield assert_not_equal, b, newb yield assert_equal, type(dict()), type(newb) yield assert_equal, newb['a'], 3 def test_bunch_hash(): # NOTE: Since the path to the json file is included in the Bunch, # the hash will be unique to each machine. pth = os.path.split(os.path.abspath(__file__))[0] json_pth = os.path.join(pth, 'realign_json.json') b = nib.Bunch(infile = json_pth, otherthing = 'blue', yat = True) newbdict, bhash = b._get_bunch_hash() yield assert_equal, bhash, 'ddcc7b4ec5675df8cf317a48bd1857fa' # Make sure the hash stored in the json file for `infile` is correct. jshash = nib.md5() fp = file(json_pth) jshash.update(fp.read()) fp.close() yield assert_equal, newbdict['infile'][0][1], jshash.hexdigest() yield assert_equal, newbdict['yat'], True # create a temp file #global tmp_infile, tmp_dir #tmp_infile = None #tmp_dir = None def setup_file(): #global tmp_infile, tmp_dir tmp_dir = tempfile.mkdtemp() tmp_infile = os.path.join(tmp_dir, 'foo.txt') open(tmp_infile, 'w').writelines('123456789') return tmp_infile def teardown_file(tmp_dir): shutil.rmtree(tmp_dir) def test_TraitedSpec(): yield assert_true, nib.TraitedSpec().get_hashval() yield assert_equal, nib.TraitedSpec().__repr__(), '\n\n' class spec(nib.TraitedSpec): foo = nib.traits.Int goo = nib.traits.Float(usedefault=True) yield assert_equal, spec().foo, Undefined yield assert_equal, spec().goo, 0.0 specfunc = lambda x : spec(hoo=x) yield assert_raises, nib.traits.TraitError, specfunc, 1 infields = spec(foo=1) hashval = ({'foo': 1, 'goo': '0.0000000000'}, 'cb03be1c3182ff941eecea6440c910f0') yield assert_equal, infields.get_hashval(), hashval #yield assert_equal, infields.hashval[1], hashval[1] yield assert_equal, infields.__repr__(), '\nfoo = 1\ngoo = 0.0\n' @skip def test_TraitedSpec_dynamic(): from cPickle import dumps, loads a = nib.BaseTraitedSpec() a.add_trait('foo', nib.traits.Int) a.foo = 1 assign_a = lambda : setattr(a, 'foo', 'a') yield assert_raises, Exception, assign_a pkld_a = dumps(a) unpkld_a = loads(pkld_a) assign_a_again = lambda : setattr(unpkld_a, 'foo', 'a') yield assert_raises, Exception, assign_a_again def test_TraitedSpec_logic(): class spec3(nib.TraitedSpec): _xor_inputs = ('foo', 'bar') foo = nib.traits.Int(xor = _xor_inputs, desc = 'foo or bar, not both') bar = nib.traits.Int(xor = _xor_inputs, desc = 'bar or foo, not both') kung = nib.traits.Float(requires = ('foo',), position = 0, desc = 'kung foo') class out3(nib.TraitedSpec): output = nib.traits.Int class MyInterface(nib.BaseInterface): input_spec = spec3 output_spec = out3 myif = MyInterface() yield assert_raises, TypeError, setattr(myif.inputs, 'kung', 10.0) myif.inputs.foo = 1 yield assert_equal, myif.inputs.foo, 1 set_bar = lambda : setattr(myif.inputs, 'bar', 1) yield assert_raises, IOError, set_bar yield assert_equal, myif.inputs.foo, 1 myif.inputs.kung = 2 yield assert_equal, myif.inputs.kung, 2.0 def test_deprecation(): class DeprecationSpec1(nib.TraitedSpec): foo = nib.traits.Int(deprecated='0.1') spec_instance = DeprecationSpec1() set_foo = lambda : setattr(spec_instance, 'foo', 1) yield assert_raises, nib.TraitError, set_foo class DeprecationSpec1numeric(nib.TraitedSpec): foo = nib.traits.Int(deprecated='0.1') spec_instance = DeprecationSpec1numeric() set_foo = lambda : setattr(spec_instance, 'foo', 1) yield assert_raises, nib.TraitError, set_foo class DeprecationSpec2(nib.TraitedSpec): foo = nib.traits.Int(deprecated='100', new_name='bar') spec_instance = DeprecationSpec2() set_foo = lambda : setattr(spec_instance, 'foo', 1) yield assert_raises, nib.TraitError, set_foo class DeprecationSpec3(nib.TraitedSpec): foo = nib.traits.Int(deprecated='1000', new_name='bar') bar = nib.traits.Int() spec_instance = DeprecationSpec3() not_raised = True try: spec_instance.foo = 1 except nib.TraitError: not_raised = False yield assert_true, not_raised class DeprecationSpec3(nib.TraitedSpec): foo = nib.traits.Int(deprecated='1000', new_name='bar') bar = nib.traits.Int() spec_instance = DeprecationSpec3() not_raised = True try: spec_instance.foo = 1 except nib.TraitError: not_raised = False yield assert_true, not_raised yield assert_equal, spec_instance.foo, Undefined yield assert_equal, spec_instance.bar, 1 def test_namesource(): tmp_infile = setup_file() tmpd, nme, ext = split_filename(tmp_infile) pwd = os.getcwd() os.chdir(tmpd) class spec2(nib.CommandLineInputSpec): moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", position=2) doo = nib.File(exists=True, argstr="%s", position=1) class TestName(nib.CommandLine): _cmd = "mycommand" input_spec = spec2 testobj = TestName() testobj.inputs.doo = tmp_infile yield assert_true, '%s_generated' % nme in testobj.cmdline testobj.inputs.moo = "my_%s_template" yield assert_true, 'my_%s_template' % nme in testobj.cmdline os.chdir(pwd) teardown_file(tmpd) def checknose(): """check version of nose for known incompatability""" mod = __import__('nose') if mod.__versioninfo__[1] <= 11: return 0 else: return 1 @skipif(checknose) def test_TraitedSpec_withFile(): tmp_infile = setup_file() tmpd, nme = os.path.split(tmp_infile) yield assert_true, os.path.exists(tmp_infile) class spec2(nib.TraitedSpec): moo = nib.File(exists=True) doo = nib.traits.List(nib.File(exists=True)) infields = spec2(moo=tmp_infile, doo=[tmp_infile]) hashval = infields.get_hashval(hash_method='content') yield assert_equal, hashval[1], '8c227fb727c32e00cd816c31d8fea9b9' teardown_file(tmpd) @skipif(checknose) def test_TraitedSpec_withNoFileHashing(): tmp_infile = setup_file() tmpd, nme = os.path.split(tmp_infile) pwd = os.getcwd() os.chdir(tmpd) yield assert_true, os.path.exists(tmp_infile) class spec2(nib.TraitedSpec): moo = nib.File(exists=True, hash_files=False) doo = nib.traits.List(nib.File(exists=True)) infields = spec2(moo=nme, doo=[tmp_infile]) hashval = infields.get_hashval(hash_method='content') yield assert_equal, hashval[1], '642c326a05add933e9cdc333ce2d0ac2' class spec3(nib.TraitedSpec): moo = nib.File(exists=True, name_source="doo") doo = nib.traits.List(nib.File(exists=True)) infields = spec3(moo=nme, doo=[tmp_infile]) hashval1 = infields.get_hashval(hash_method='content') class spec4(nib.TraitedSpec): moo = nib.File(exists=True) doo = nib.traits.List(nib.File(exists=True)) infields = spec4(moo=nme, doo=[tmp_infile]) hashval2 = infields.get_hashval(hash_method='content') yield assert_not_equal, hashval1[1], hashval2[1] os.chdir(pwd) teardown_file(tmpd) def test_Interface(): yield assert_equal, nib.Interface.input_spec, None yield assert_equal, nib.Interface.output_spec, None yield assert_raises, NotImplementedError, nib.Interface yield assert_raises, NotImplementedError, nib.Interface.help yield assert_raises, NotImplementedError, nib.Interface._inputs_help yield assert_raises, NotImplementedError, nib.Interface._outputs_help yield assert_raises, NotImplementedError, nib.Interface._outputs class DerivedInterface(nib.Interface): def __init__(self): pass nif = DerivedInterface() yield assert_raises, NotImplementedError, nif.run yield assert_raises, NotImplementedError, nif.aggregate_outputs yield assert_raises, NotImplementedError, nif._list_outputs yield assert_raises, NotImplementedError, nif._get_filecopy_info def test_BaseInterface(): yield assert_equal, nib.BaseInterface.help(), None yield assert_equal, nib.BaseInterface._get_filecopy_info(), [] class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int') goo = nib.traits.Int(desc='a random int', mandatory=True) moo = nib.traits.Int(desc='a random int', mandatory=False) hoo = nib.traits.Int(desc='a random int', usedefault=True) zoo = nib.File(desc='a file', copyfile=False) woo = nib.File(desc='a file', copyfile=True) class OutputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int') class DerivedInterface(nib.BaseInterface): input_spec = InputSpec yield assert_equal, DerivedInterface.help(), None yield assert_true, 'moo' in ''.join(DerivedInterface._inputs_help()) yield assert_equal, DerivedInterface()._outputs(), None yield assert_equal, DerivedInterface._get_filecopy_info()[0]['key'], 'woo' yield assert_true, DerivedInterface._get_filecopy_info()[0]['copy'] yield assert_equal, DerivedInterface._get_filecopy_info()[1]['key'], 'zoo' yield assert_false, DerivedInterface._get_filecopy_info()[1]['copy'] yield assert_equal, DerivedInterface().inputs.foo, Undefined yield assert_raises, ValueError, DerivedInterface()._check_mandatory_inputs yield assert_equal, DerivedInterface(goo=1)._check_mandatory_inputs(), None yield assert_raises, ValueError, DerivedInterface().run yield assert_raises, NotImplementedError, DerivedInterface(goo=1).run class DerivedInterface2(DerivedInterface): output_spec = OutputSpec def _run_interface(self, runtime): return runtime yield assert_equal, DerivedInterface2.help(), None yield assert_equal, DerivedInterface2()._outputs().foo, Undefined yield assert_raises, NotImplementedError, DerivedInterface2(goo=1).run nib.BaseInterface.input_spec = None yield assert_raises, Exception, nib.BaseInterface def test_input_version(): class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', min_ver='0.9') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec obj = DerivedInterface1() not_raised = True try: obj._check_version_requirements(obj.inputs) except: not_raised = False yield assert_true, not_raised config.set('execution', 'stop_on_unknown_version', True) try: obj._check_version_requirements(obj.inputs) except: not_raised = False yield assert_false, not_raised config.set_default_config() class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', min_ver='0.9') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec _version = '0.8' obj = DerivedInterface1() obj.inputs.foo = 1 yield assert_raises, Exception, obj._check_version_requirements class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', min_ver='0.9') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec _version = '0.10' obj = DerivedInterface1() not_raised = True try: obj._check_version_requirements(obj.inputs) except: not_raised = False yield assert_true, not_raised class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', min_ver='0.9') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec _version = '0.9' obj = DerivedInterface1() obj.inputs.foo = 1 not_raised = True try: obj._check_version_requirements(obj.inputs) except: not_raised = False yield assert_true, not_raised class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', max_ver='0.7') class DerivedInterface2(nib.BaseInterface): input_spec = InputSpec _version = '0.8' obj = DerivedInterface2() obj.inputs.foo = 1 yield assert_raises, Exception, obj._check_version_requirements class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', max_ver='0.9') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec _version = '0.9' obj = DerivedInterface1() obj.inputs.foo = 1 not_raised = True try: obj._check_version_requirements(obj.inputs) except: not_raised = False yield assert_true, not_raised def test_output_version(): class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', min_ver='0.9') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec _version = '0.10' obj = DerivedInterface1() yield assert_equal, obj._check_version_requirements(obj._outputs()), [] class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', min_ver='0.11') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec _version = '0.10' obj = DerivedInterface1() yield assert_equal, obj._check_version_requirements(obj._outputs()), ['foo'] class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc='a random int', min_ver='0.11') class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec _version = '0.10' def _run_interface(self, runtime): return runtime def _list_outputs(self): return {'foo': 1} obj = DerivedInterface1() yield assert_raises, KeyError, obj.run def test_Commandline(): yield assert_raises, Exception, nib.CommandLine ci = nib.CommandLine(command='which') yield assert_equal, ci.cmd, 'which' yield assert_equal, ci.inputs.args, Undefined ci2 = nib.CommandLine(command='which', args='ls') yield assert_equal, ci2.cmdline, 'which ls' ci3 = nib.CommandLine(command='echo') ci3.inputs.environ = {'MYENV' : 'foo'} res = ci3.run() yield assert_equal, res.runtime.environ['MYENV'], 'foo' yield assert_equal, res.outputs, None class CommandLineInputSpec1(nib.CommandLineInputSpec): foo = nib.traits.Str(argstr='%s', desc='a str') goo = nib.traits.Bool(argstr='-g', desc='a bool', position=0) hoo = nib.traits.List(argstr='-l %s', desc='a list') moo = nib.traits.List(argstr='-i %d...', desc='a repeated list', position=-1) noo = nib.traits.Int(argstr='-x %d', desc='an int') roo = nib.traits.Str(desc='not on command line') soo = nib.traits.Bool(argstr="-soo") nib.CommandLine.input_spec = CommandLineInputSpec1 ci4 = nib.CommandLine(command='cmd') ci4.inputs.foo = 'foo' ci4.inputs.goo = True ci4.inputs.hoo = ['a', 'b'] ci4.inputs.moo = [1, 2, 3] ci4.inputs.noo = 0 ci4.inputs.roo = 'hello' ci4.inputs.soo = False cmd = ci4._parse_inputs() yield assert_equal, cmd[0], '-g' yield assert_equal, cmd[-1], '-i 1 -i 2 -i 3' yield assert_true, 'hello' not in ' '.join(cmd) yield assert_true, '-soo' not in ' '.join(cmd) ci4.inputs.soo = True cmd = ci4._parse_inputs() yield assert_true, '-soo' in ' '.join(cmd) class CommandLineInputSpec2(nib.CommandLineInputSpec): foo = nib.File(argstr='%s', desc='a str', genfile=True) nib.CommandLine.input_spec = CommandLineInputSpec2 ci5 = nib.CommandLine(command='cmd') yield assert_raises, NotImplementedError, ci5._parse_inputs class DerivedClass(nib.CommandLine): input_spec = CommandLineInputSpec2 def _gen_filename(self, name): return 'filename' ci6 = DerivedClass(command='cmd') yield assert_equal, ci6._parse_inputs()[0], 'filename' nib.CommandLine.input_spec = nib.CommandLineInputSpec def test_Commandline_environ(): from nipype import config config.set_default_config() ci3 = nib.CommandLine(command='echo') res = ci3.run() yield assert_equal, res.runtime.environ['DISPLAY'], ':1' config.set('execution', 'display_variable', ':3') res = ci3.run() yield assert_false, 'DISPLAY' in ci3.inputs.environ yield assert_equal, res.runtime.environ['DISPLAY'], ':3' ci3.inputs.environ = {'DISPLAY' : ':2'} res = ci3.run() yield assert_equal, res.runtime.environ['DISPLAY'], ':2' def test_CommandLine_output(): tmp_infile = setup_file() tmpd, name = os.path.split(tmp_infile) pwd = os.getcwd() os.chdir(tmpd) yield assert_true, os.path.exists(tmp_infile) ci = nib.CommandLine(command='ls -l') ci.inputs.terminal_output = 'allatonce' res = ci.run() yield assert_equal, res.runtime.merged, '' yield assert_true, name in res.runtime.stdout ci = nib.CommandLine(command='ls -l') ci.inputs.terminal_output = 'file' res = ci.run() yield assert_true, 'stdout.nipype' in res.runtime.stdout ci = nib.CommandLine(command='ls -l') ci.inputs.terminal_output = 'none' res = ci.run() yield assert_equal, res.runtime.stdout, '' ci = nib.CommandLine(command='ls -l') res = ci.run() yield assert_true, 'stdout.nipype' in res.runtime.stdout os.chdir(pwd) teardown_file(tmpd) def test_global_CommandLine_output(): tmp_infile = setup_file() tmpd, name = os.path.split(tmp_infile) pwd = os.getcwd() os.chdir(tmpd) ci = nib.CommandLine(command='ls -l') res = ci.run() yield assert_true, name in res.runtime.stdout yield assert_true, os.path.exists(tmp_infile) nib.CommandLine.set_default_terminal_output('allatonce') ci = nib.CommandLine(command='ls -l') res = ci.run() yield assert_equal, res.runtime.merged, '' yield assert_true, name in res.runtime.stdout nib.CommandLine.set_default_terminal_output('file') ci = nib.CommandLine(command='ls -l') res = ci.run() yield assert_true, 'stdout.nipype' in res.runtime.stdout nib.CommandLine.set_default_terminal_output('none') ci = nib.CommandLine(command='ls -l') res = ci.run() yield assert_equal, res.runtime.stdout, '' os.chdir(pwd) teardown_file(tmpd)nipype-0.9.2/nipype/interfaces/tests/test_io.py000066400000000000000000000215101227300005300216260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import glob import shutil import os.path as op from tempfile import mkstemp, mkdtemp from nose.tools import assert_raises import nipype from nipype.testing import assert_equal, assert_true, assert_false import nipype.interfaces.io as nio from nipype.interfaces.base import Undefined def test_datagrabber(): dg = nio.DataGrabber() yield assert_equal, dg.inputs.template, Undefined yield assert_equal, dg.inputs.base_directory, Undefined yield assert_equal, dg.inputs.template_args, {'outfiles': []} def test_selectfiles(): base_dir = op.dirname(nipype.__file__) templates = {"model": "interfaces/{package}/model.py", "preprocess": "interfaces/{package}/pre*.py"} dg = nio.SelectFiles(templates, base_directory=base_dir) yield assert_equal, dg._infields, ["package"] yield assert_equal, sorted(dg._outfields), ["model", "preprocess"] dg.inputs.package = "fsl" res = dg.run() wanted = op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py") yield assert_equal, res.outputs.model, wanted dg = nio.SelectFiles(templates, base_directory=base_dir, force_lists=True) outfields = sorted(dg._outputs().get()) yield assert_equal, outfields, ["model", "preprocess"] dg.inputs.package = "spm" res = dg.run() wanted = op.join(op.dirname(nipype.__file__), "interfaces/spm/preprocess.py") yield assert_equal, res.outputs.preprocess, [wanted] dg.inputs.package = "fsl" dg.inputs.force_lists = ["model"] res = dg.run() preproc = op.join(op.dirname(nipype.__file__), "interfaces/fsl/preprocess.py") model = [op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py")] yield assert_equal, res.outputs.preprocess, preproc yield assert_equal, res.outputs.model, model templates = {"converter": "interfaces/dcm{to!s}nii.py"} dg = nio.SelectFiles(templates, base_directory=base_dir) dg.inputs.to = 2 res = dg.run() wanted = op.join(base_dir, "interfaces/dcm2nii.py") yield assert_equal, res.outputs.converter, wanted def test_selectfiles_valueerror(): """Test ValueError when force_lists has field that isn't in template.""" base_dir = op.dirname(nipype.__file__) templates = {"model": "interfaces/{package}/model.py", "preprocess": "interfaces/{package}/pre*.py"} force_lists = ["model", "preprocess", "registration"] sf = nio.SelectFiles(templates, base_directory=base_dir, force_lists=force_lists) yield assert_raises, ValueError, sf.run def test_datagrabber_order(): tempdir = mkdtemp() file1 = mkstemp(prefix='sub002_L1_R1.q', dir=tempdir) file2 = mkstemp(prefix='sub002_L1_R2.q', dir=tempdir) file3 = mkstemp(prefix='sub002_L2_R1.q', dir=tempdir) file4 = mkstemp(prefix='sub002_L2_R2.q', dir=tempdir) file5 = mkstemp(prefix='sub002_L3_R10.q', dir=tempdir) file6 = mkstemp(prefix='sub002_L3_R2.q', dir=tempdir) dg = nio.DataGrabber(infields=['sid']) dg.inputs.base_directory = tempdir dg.inputs.template = '%s_L%d_R*.q*' dg.inputs.template_args = {'outfiles': [['sid', 1], ['sid', 2], ['sid', 3]]} dg.inputs.sid = 'sub002' dg.inputs.sort_filelist = True res = dg.run() outfiles = res.outputs.outfiles yield assert_true, 'sub002_L1_R1' in outfiles[0][0] yield assert_true, 'sub002_L1_R2' in outfiles[0][1] yield assert_true, 'sub002_L2_R1' in outfiles[1][0] yield assert_true, 'sub002_L2_R2' in outfiles[1][1] yield assert_true, 'sub002_L3_R2' in outfiles[2][0] yield assert_true, 'sub002_L3_R10' in outfiles[2][1] shutil.rmtree(tempdir) def test_datasink(): ds = nio.DataSink() yield assert_true, ds.inputs.parameterization yield assert_equal, ds.inputs.base_directory, Undefined yield assert_equal, ds.inputs.strip_dir, Undefined yield assert_equal, ds.inputs._outputs, {} ds = nio.DataSink(base_directory='foo') yield assert_equal, ds.inputs.base_directory, 'foo' ds = nio.DataSink(infields=['test']) yield assert_true, 'test' in ds.inputs.copyable_trait_names() def test_datasink_substitutions(): indir = mkdtemp(prefix='-Tmp-nipype_ds_subs_in') outdir = mkdtemp(prefix='-Tmp-nipype_ds_subs_out') files = [] for n in ['ababab.n', 'xabababyz.n']: f = os.path.join(indir, n) files.append(f) open(f, 'w') ds = nio.DataSink( parametrization=False, base_directory=outdir, substitutions=[('ababab', 'ABABAB')], # end archoring ($) is used to assure operation on the filename # instead of possible temporary directories names matches # Patterns should be more comprehendable in the real-world usage # cases since paths would be quite more sensible regexp_substitutions=[(r'xABABAB(\w*)\.n$', r'a-\1-b.n'), ('(.*%s)[-a]([^%s]*)$' % ((os.path.sep,) * 2), r'\1!\2')]) setattr(ds.inputs, '@outdir', files) ds.run() yield assert_equal, \ sorted([os.path.basename(x) for x in glob.glob(os.path.join(outdir, '*'))]), \ ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns shutil.rmtree(indir) shutil.rmtree(outdir) def _temp_analyze_files(): """Generate temporary analyze file pair.""" fd, orig_img = mkstemp(suffix='.img', dir=mkdtemp()) orig_hdr = orig_img[:-4] + '.hdr' fp = file(orig_hdr, 'w+') fp.close() return orig_img, orig_hdr def test_datasink_copydir(): orig_img, orig_hdr = _temp_analyze_files() outdir = mkdtemp() pth, fname = os.path.split(orig_img) ds = nio.DataSink(base_directory=outdir, parameterization=False) setattr(ds.inputs, '@outdir', pth) ds.run() sep = os.path.sep file_exists = lambda: os.path.exists(os.path.join(outdir, pth.split(sep)[-1], fname)) yield assert_true, file_exists() shutil.rmtree(pth) orig_img, orig_hdr = _temp_analyze_files() pth, fname = os.path.split(orig_img) ds.inputs.remove_dest_dir = True setattr(ds.inputs, 'outdir', pth) ds.run() yield assert_false, file_exists() shutil.rmtree(outdir) shutil.rmtree(pth) def test_datafinder_copydir(): outdir = mkdtemp() open(os.path.join(outdir, "findme.txt"), 'a').close() open(os.path.join(outdir, "dontfindme"), 'a').close() open(os.path.join(outdir, "dontfindmealsotxt"), 'a').close() open(os.path.join(outdir, "findmetoo.txt"), 'a').close() open(os.path.join(outdir, "ignoreme.txt"), 'a').close() open(os.path.join(outdir, "alsoignore.txt"), 'a').close() from nipype.interfaces.io import DataFinder df = DataFinder() df.inputs.root_paths = outdir df.inputs.match_regex = '.+/(?P.+)\.txt' df.inputs.ignore_regexes = ['ignore'] result = df.run() expected = ["findme.txt", "findmetoo.txt"] for path, expected_fname in zip(result.outputs.out_paths, expected): _, fname = os.path.split(path) yield assert_equal, fname, expected_fname yield assert_equal, result.outputs.basename, ["findme", "findmetoo"] shutil.rmtree(outdir) def test_datafinder_depth(): outdir = mkdtemp() os.makedirs(os.path.join(outdir, '0', '1', '2', '3')) from nipype.interfaces.io import DataFinder df = DataFinder() df.inputs.root_paths = os.path.join(outdir, '0') for min_depth in range(4): for max_depth in range(min_depth, 4): df.inputs.min_depth = min_depth df.inputs.max_depth = max_depth result = df.run() expected = [str(x) for x in range(min_depth, max_depth + 1)] for path, exp_fname in zip(result.outputs.out_paths, expected): _, fname = os.path.split(path) yield assert_equal, fname, exp_fname shutil.rmtree(outdir) def test_datafinder_unpack(): outdir = mkdtemp() single_res = os.path.join(outdir, "findme.txt") open(single_res, 'a').close() open(os.path.join(outdir, "dontfindme"), 'a').close() from nipype.interfaces.io import DataFinder df = DataFinder() df.inputs.root_paths = outdir df.inputs.match_regex = '.+/(?P.+)\.txt' df.inputs.unpack_single = True result = df.run() print result.outputs.out_paths yield assert_equal, result.outputs.out_paths, single_res def test_freesurfersource(): fss = nio.FreeSurferSource() yield assert_equal, fss.inputs.hemi, 'both' yield assert_equal, fss.inputs.subject_id, Undefined yield assert_equal, fss.inputs.subjects_dir, Undefined nipype-0.9.2/nipype/interfaces/tests/test_matlab.py000066400000000000000000000060071227300005300224630ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkdtemp from shutil import rmtree from nipype.testing import (assert_equal, assert_true, assert_false, assert_raises, skipif) import nipype.interfaces.matlab as mlab matlab_cmd = mlab.get_matlab_command() no_matlab = matlab_cmd is None if not no_matlab: mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) @skipif(no_matlab) def test_cmdline(): basedir = mkdtemp() mi = mlab.MatlabCommand(script='whos', script_file='testscript', mfile=False) yield assert_equal, mi.cmdline, \ matlab_cmd + (' -nodesktop -nosplash -singleCompThread -r "fprintf(1,' '\'Executing code at %s:\\n\',datestr(now));ver,try,' 'whos,catch ME,fprintf(2,\'MATLAB code threw an ' 'exception:\\n\');fprintf(2,\'%s\\n\',ME.message);if ' 'length(ME.stack) ~= 0, fprintf(2,\'File:%s\\nName:%s\\n' 'Line:%d\\n\',ME.stack.file,ME.stack.name,' 'ME.stack.line);, end;end;;exit"') yield assert_equal, mi.inputs.script, 'whos' yield assert_equal, mi.inputs.script_file, 'testscript' path_exists = os.path.exists(os.path.join(basedir, 'testscript.m')) yield assert_false, path_exists rmtree(basedir) @skipif(no_matlab) def test_mlab_inputspec(): spec = mlab.MatlabInputSpec() for k in ['paths', 'script', 'nosplash', 'mfile', 'logfile', 'script_file', 'nodesktop']: yield assert_true, k in spec.copyable_trait_names() yield assert_true, spec.nodesktop yield assert_true, spec.nosplash yield assert_true, spec.mfile yield assert_equal, spec.script_file, 'pyscript.m' @skipif(no_matlab) def test_mlab_init(): yield assert_equal, mlab.MatlabCommand._cmd, 'matlab' yield assert_equal, mlab.MatlabCommand.input_spec, mlab.MatlabInputSpec yield assert_equal, mlab.MatlabCommand().cmd, matlab_cmd mc = mlab.MatlabCommand(matlab_cmd='foo_m') yield assert_equal, mc.cmd, 'foo_m' @skipif(no_matlab) def test_run_interface(): mc = mlab.MatlabCommand(matlab_cmd='foo_m') yield assert_raises, ValueError, mc.run # script is mandatory mc.inputs.script = 'a=1;' yield assert_raises, IOError, mc.run # foo_m is not an executable cwd = os.getcwd() basedir = mkdtemp() os.chdir(basedir) # bypasses ubuntu dash issue mc = mlab.MatlabCommand(script='foo;', paths=[basedir], mfile=True) yield assert_raises, RuntimeError, mc.run # bypasses ubuntu dash issue res = mlab.MatlabCommand(script='a=1;', paths=[basedir], mfile=True).run() yield assert_equal, res.runtime.returncode, 0 os.chdir(cwd) rmtree(basedir) @skipif(no_matlab) def test_set_matlabcmd(): mi = mlab.MatlabCommand() mi.set_default_matlab_cmd('foo') yield assert_equal, mi._default_matlab_cmd, 'foo' mi.set_default_matlab_cmd(matlab_cmd) nipype-0.9.2/nipype/interfaces/tests/test_utility.py000066400000000000000000000064111227300005300227250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import shutil from tempfile import mkdtemp import numpy as np from nipype.testing import assert_equal, assert_true, assert_raises from nipype.interfaces import utility import nipype.pipeline.engine as pe def test_rename(): tempdir = os.path.realpath(mkdtemp()) origdir = os.getcwd() os.chdir(tempdir) # Test very simple rename _ = open("file.txt", "w").close() rn = utility.Rename(in_file="file.txt", format_string="test_file1.txt") res = rn.run() outfile = os.path.join(tempdir, "test_file1.txt") yield assert_equal, res.outputs.out_file, outfile yield assert_true, os.path.exists(outfile) # Now a string-formatting version rn = utility.Rename(in_file="file.txt", format_string="%(field1)s_file%(field2)d", keep_ext=True) # Test .input field creation yield assert_true, hasattr(rn.inputs, "field1") yield assert_true, hasattr(rn.inputs, "field2") # Set the inputs rn.inputs.field1 = "test" rn.inputs.field2 = 2 res = rn.run() outfile = os.path.join(tempdir, "test_file2.txt") yield assert_equal, res.outputs.out_file, outfile yield assert_true, os.path.exists(outfile) # Clean up os.chdir(origdir) shutil.rmtree(tempdir) def test_function(): tempdir = os.path.realpath(mkdtemp()) origdir = os.getcwd() os.chdir(tempdir) def gen_random_array(size): import numpy as np return np.random.rand(size, size) f1 = pe.MapNode(utility.Function(input_names=['size'], output_names=['random_array'], function=gen_random_array), name='random_array', iterfield=['size']) f1.inputs.size = [2, 3, 5] wf = pe.Workflow(name="test_workflow") def increment_array(in_array): return in_array + 1 f2 = pe.MapNode(utility.Function(input_names=['in_array'], output_names=['out_array'], function=increment_array), name='increment_array', iterfield=['in_array']) wf.connect(f1, 'random_array', f2, 'in_array') wf.run() # Clean up os.chdir(origdir) shutil.rmtree(tempdir) def make_random_array(size): return np.random.randn(size, size) def should_fail(): tempdir = os.path.realpath(mkdtemp()) origdir = os.getcwd() os.chdir(tempdir) node = pe.Node(utility.Function(input_names=["size"], output_names=["random_array"], function=make_random_array), name="should_fail") try: node.inputs.size = 10 node.run() finally: os.chdir(origdir) shutil.rmtree(tempdir) assert_raises(NameError, should_fail) def test_function_with_imports(): tempdir = os.path.realpath(mkdtemp()) origdir = os.getcwd() os.chdir(tempdir) node = pe.Node(utility.Function(input_names=["size"], output_names=["random_array"], function=make_random_array, imports=["import numpy as np"]), name="should_not_fail") print node.inputs.function_str try: node.inputs.size = 10 node.run() finally: os.chdir(origdir) shutil.rmtree(tempdir) nipype-0.9.2/nipype/interfaces/traits_extension.py000066400000000000000000000206751227300005300224330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """This module contains Trait classes that we've pulled from the traits source and fixed due to various bugs. File and Directory are redefined as the release version had dependencies on TraitsUI, which we do not want Nipype to depend on. At least not yet. Undefined class was missing the __len__ operator, causing edit_traits and configure_traits to fail on List objects. Even though we don't require TraitsUI, this bug was the only thing preventing us from popping up GUIs which users like. These bugs have been in Traits v3.3.0 and v3.2.1. We have reported all of these bugs and they've been fixed in enthought svn repository (usually by Robert Kern). """ import os # perform all external trait imports here import traits if traits.__version__ < '3.7.0': raise ImportError('Traits version 3.7.0 or higher must be installed') import traits.api as traits from traits.trait_handlers import TraitDictObject, TraitListObject from traits.trait_errors import TraitError from traits.trait_base import _Undefined class BaseFile ( traits.BaseStr ): """ Defines a trait whose value must be the name of a file. """ # A description of the type of value this trait accepts: info_text = 'a file name' def __init__ ( self, value = '', filter = None, auto_set = False, entries = 0, exists = False, **metadata ): """ Creates a File trait. Parameters ---------- value : string The default value for the trait filter : string A wildcard string to filter filenames in the file dialog box used by the attribute trait editor. auto_set : boolean Indicates whether the file editor updates the trait value after every key stroke. exists : boolean Indicates whether the trait value must be an existing file or not. Default Value ------------- *value* or '' """ self.filter = filter self.auto_set = auto_set self.entries = entries self.exists = exists if exists: self.info_text = 'an existing file name' super( BaseFile, self ).__init__( value, **metadata ) def validate ( self, object, name, value ): """ Validates that a specified value is valid for this trait. Note: The 'fast validator' version performs this check in C. """ validated_value = super( BaseFile, self ).validate( object, name, value ) if not self.exists: return validated_value elif os.path.isfile( value ): return validated_value self.error( object, name, value ) class File ( BaseFile ): """ Defines a trait whose value must be the name of a file using a C-level fast validator. """ def __init__ ( self, value = '', filter = None, auto_set = False, entries = 0, exists = False, **metadata ): """ Creates a File trait. Parameters ---------- value : string The default value for the trait filter : string A wildcard string to filter filenames in the file dialog box used by the attribute trait editor. auto_set : boolean Indicates whether the file editor updates the trait value after every key stroke. exists : boolean Indicates whether the trait value must be an existing file or not. Default Value ------------- *value* or '' """ if not exists: # Define the C-level fast validator to use: fast_validate = ( 11, basestring ) super( File, self ).__init__( value, filter, auto_set, entries, exists, **metadata ) #------------------------------------------------------------------------------- # 'BaseDirectory' and 'Directory' traits: #------------------------------------------------------------------------------- class BaseDirectory ( traits.BaseStr ): """ Defines a trait whose value must be the name of a directory. """ # A description of the type of value this trait accepts: info_text = 'a directory name' def __init__ ( self, value = '', auto_set = False, entries = 0, exists = False, **metadata ): """ Creates a BaseDirectory trait. Parameters ---------- value : string The default value for the trait auto_set : boolean Indicates whether the directory editor updates the trait value after every key stroke. exists : boolean Indicates whether the trait value must be an existing directory or not. Default Value ------------- *value* or '' """ self.entries = entries self.auto_set = auto_set self.exists = exists if exists: self.info_text = 'an existing directory name' super( BaseDirectory, self ).__init__( value, **metadata ) def validate ( self, object, name, value ): """ Validates that a specified value is valid for this trait. Note: The 'fast validator' version performs this check in C. """ validated_value = super( BaseDirectory, self ).validate( object, name, value ) if not self.exists: return validated_value if os.path.isdir( value ): return validated_value self.error( object, name, value ) class Directory ( BaseDirectory ): """ Defines a trait whose value must be the name of a directory using a C-level fast validator. """ def __init__ ( self, value = '', auto_set = False, entries = 0, exists = False, **metadata ): """ Creates a Directory trait. Parameters ---------- value : string The default value for the trait auto_set : boolean Indicates whether the directory editor updates the trait value after every key stroke. exists : boolean Indicates whether the trait value must be an existing directory or not. Default Value ------------- *value* or '' """ # Define the C-level fast validator to use if the directory existence # test is not required: if not exists: self.fast_validate = ( 11, basestring ) super( Directory, self ).__init__( value, auto_set, entries, exists, **metadata ) """ The functions that pop-up the Traits GUIs, edit_traits and configure_traits, were failing because all of our inputs default to Undefined deep and down in traits/ui/wx/list_editor.py it checks for the len() of the elements of the list. The _Undefined class in traits does not define the __len__ method and would error. I tried defining our own Undefined and even sublassing Undefined, but both of those failed with a TraitError in our initializer when we assign the Undefined to the inputs because of an incompatible type: TraitError: The 'vertical_gradient' trait of a BetInputSpec instance must be a float, but a value of was specified. So... in order to keep the same type but add the missing method, I monkey patched. """ def length(self): return 0 ########################################################################## # Apply monkeypatch here _Undefined.__len__ = length ########################################################################## Undefined = _Undefined() def isdefined(object): return not isinstance(object, _Undefined) def has_metadata(trait, metadata, value=None, recursive=True): ''' Checks if a given trait has a metadata (and optionally if it is set to particular value) ''' count = 0 if hasattr(trait, "_metadata") and metadata in trait._metadata.keys() and (trait._metadata[metadata] == value or value==None): count += 1 if recursive: if hasattr(trait, 'inner_traits'): for inner_trait in trait.inner_traits(): count += has_metadata(inner_trait.trait_type, metadata, recursive) if hasattr(trait, 'handlers') and trait.handlers != None: for handler in trait.handlers: count += has_metadata(handler, metadata, recursive) return count > 0 nipype-0.9.2/nipype/interfaces/utility.py000066400000000000000000000406161227300005300205310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import re from cPickle import dumps, loads import numpy as np import nibabel as nb from nipype.utils.filemanip import (filename_to_list, copyfile, split_filename) from nipype.interfaces.base import (traits, TraitedSpec, DynamicTraitedSpec, File, Undefined, isdefined, OutputMultiPath, InputMultiPath, BaseInterface, BaseInterfaceInputSpec) from nipype.interfaces.io import IOBase, add_traits from nipype.testing import assert_equal from nipype.utils.misc import getsource, create_function_from_source class IdentityInterface(IOBase): """Basic interface class generates identity mappings Examples -------- >>> from nipype.interfaces.utility import IdentityInterface >>> ii = IdentityInterface(fields=['a', 'b'], mandatory_inputs=False) >>> ii.inputs.a >>> ii.inputs.a = 'foo' >>> out = ii._outputs() >>> out.a >>> out = ii.run() >>> out.outputs.a 'foo' >>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True) >>> ii2.inputs.a = 'foo' >>> out = ii2.run() # doctest: +SKIP ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run. """ input_spec = DynamicTraitedSpec output_spec = DynamicTraitedSpec def __init__(self, fields=None, mandatory_inputs=True, **inputs): super(IdentityInterface, self).__init__(**inputs) if fields is None or not fields: raise ValueError('Identity Interface fields must be a non-empty list') # Each input must be in the fields. for in_field in inputs: if in_field not in fields: raise ValueError('Identity Interface input is not in the fields: %s' % in_field) self._fields = fields self._mandatory_inputs = mandatory_inputs add_traits(self.inputs, fields) # Adding any traits wipes out all input values set in superclass initialization, # even it the trait is not in the add_traits argument. The work-around is to reset # the values after adding the traits. self.inputs.set(**inputs) def _add_output_traits(self, base): undefined_traits = {} for key in self._fields: base.add_trait(key, traits.Any) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) return base def _list_outputs(self): #manual mandatory inputs check if self._fields and self._mandatory_inputs: for key in self._fields: value = getattr(self.inputs, key) if not isdefined(value): msg = "%s requires a value for input '%s' because it was listed in 'fields'. \ You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \ (self.__class__.__name__, key) raise ValueError(msg) outputs = self._outputs().get() for key in self._fields: val = getattr(self.inputs, key) if isdefined(val): outputs[key] = val return outputs class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): axis = traits.Enum('vstack', 'hstack', usedefault=True, desc='direction in which to merge, hstack requires same number of elements in each input') no_flatten = traits.Bool(False, usedefault=True, desc='append to outlist instead of extending in vstack mode') class MergeOutputSpec(TraitedSpec): out = traits.List(desc='Merged output') class Merge(IOBase): """Basic interface class to merge inputs into a single list Examples -------- >>> from nipype.interfaces.utility import Merge >>> mi = Merge(3) >>> mi.inputs.in1 = 1 >>> mi.inputs.in2 = [2, 5] >>> mi.inputs.in3 = 3 >>> out = mi.run() >>> out.outputs.out [1, 2, 5, 3] """ input_spec = MergeInputSpec output_spec = MergeOutputSpec def __init__(self, numinputs=0, **inputs): super(Merge, self).__init__(**inputs) self._numinputs = numinputs add_traits(self.inputs, ['in%d' % (i + 1) for i in range(numinputs)]) def _list_outputs(self): outputs = self._outputs().get() out = [] if self.inputs.axis == 'vstack': for idx in range(self._numinputs): value = getattr(self.inputs, 'in%d' % (idx + 1)) if isdefined(value): if isinstance(value, list) and not self.inputs.no_flatten: out.extend(value) else: out.append(value) else: for i in range(len(filename_to_list(self.inputs.in1))): out.insert(i, []) for j in range(self._numinputs): out[i].append(filename_to_list(getattr(self.inputs, 'in%d' % (j + 1)))[i]) if out: outputs['out'] = out return outputs class RenameInputSpec(DynamicTraitedSpec): in_file = File(exists=True, mandatory=True, desc="file to rename") keep_ext = traits.Bool(desc=("Keep in_file extension, replace " "non-extension component of name")) format_string = traits.String(mandatory=True, desc=("Python formatting string for output " "template")) parse_string = traits.String(desc=("Python regexp parse string to define " "replacement inputs")) use_fullpath = traits.Bool(False, usedefault=True, desc="Use full path as input to regex parser") class RenameOutputSpec(TraitedSpec): out_file = traits.File(exists=True, desc="softlink to original file with new name") class Rename(IOBase): """Change the name of a file based on a mapped format string. To use additional inputs that will be defined at run-time, the class constructor must be called with the format template, and the fields identified will become inputs to the interface. Additionally, you may set the parse_string input, which will be run over the input filename with a regular expressions search, and will fill in additional input fields from matched groups. Fields set with inputs have precedence over fields filled in with the regexp match. Examples -------- >>> from nipype.interfaces.utility import Rename >>> rename1 = Rename() >>> rename1.inputs.in_file = "zstat1.nii.gz" >>> rename1.inputs.format_string = "Faces-Scenes.nii.gz" >>> res = rename1.run() # doctest: +SKIP >>> print res.outputs.out_file # doctest: +SKIP 'Faces-Scenes.nii.gz" # doctest: +SKIP >>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d") >>> rename2.inputs.in_file = "functional.nii" >>> rename2.inputs.keep_ext = True >>> rename2.inputs.subject_id = "subj_201" >>> rename2.inputs.run = 2 >>> res = rename2.run() # doctest: +SKIP >>> print res.outputs.out_file # doctest: +SKIP 'subj_201_func_run02.nii' # doctest: +SKIP >>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii") >>> rename3.inputs.in_file = "func_epi_1_1.nii" >>> rename3.inputs.parse_string = "func_(?P\w*)_.*" >>> rename3.inputs.subject_id = "subj_201" >>> rename3.inputs.run = 2 >>> res = rename3.run() # doctest: +SKIP >>> print res.outputs.out_file # doctest: +SKIP 'subj_201_epi_run02.nii' # doctest: +SKIP """ input_spec = RenameInputSpec output_spec = RenameOutputSpec def __init__(self, format_string=None, **inputs): super(Rename, self).__init__(**inputs) if format_string is not None: self.inputs.format_string = format_string self.fmt_fields = re.findall(r"%\((.+?)\)", format_string) add_traits(self.inputs, self.fmt_fields) else: self.fmt_fields = [] def _rename(self): fmt_dict = dict() if isdefined(self.inputs.parse_string): if isdefined(self.inputs.use_fullpath) and self.inputs.use_fullpath: m = re.search(self.inputs.parse_string, self.inputs.in_file) else: m = re.search(self.inputs.parse_string, os.path.split(self.inputs.in_file)[1]) if m: fmt_dict.update(m.groupdict()) for field in self.fmt_fields: val = getattr(self.inputs, field) if isdefined(val): fmt_dict[field] = getattr(self.inputs, field) if self.inputs.keep_ext: fmt_string = "".join([self.inputs.format_string, split_filename(self.inputs.in_file)[2]]) else: fmt_string = self.inputs.format_string return fmt_string % fmt_dict def _run_interface(self, runtime): runtime.returncode = 0 _ = copyfile(self.inputs.in_file, os.path.join(os.getcwd(), self._rename())) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = os.path.join(os.getcwd(), self._rename()) return outputs class SplitInputSpec(BaseInterfaceInputSpec): inlist = traits.List(traits.Any, mandatory=True, desc='list of values to split') splits = traits.List(traits.Int, mandatory=True, desc='Number of outputs in each split - should add to number of inputs') class Split(IOBase): """Basic interface class to split lists into multiple outputs Examples -------- >>> from nipype.interfaces.utility import Split >>> sp = Split() >>> _ = sp.inputs.set(inlist=[1, 2, 3], splits=[2, 1]) >>> out = sp.run() >>> out.outputs.out1 [1, 2] """ input_spec = SplitInputSpec output_spec = DynamicTraitedSpec def _add_output_traits(self, base): undefined_traits = {} for i in range(len(self.inputs.splits)): key = 'out%d' % (i + 1) base.add_trait(key, traits.Any) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) return base def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.splits): if sum(self.inputs.splits) != len(self.inputs.inlist): raise RuntimeError('sum of splits != num of list elements') splits = [0] splits.extend(self.inputs.splits) splits = np.cumsum(splits) for i in range(len(splits) - 1): outputs['out%d' % (i + 1)] = np.array(self.inputs.inlist)[splits[i]:splits[i + 1]].tolist() return outputs class SelectInputSpec(BaseInterfaceInputSpec): inlist = InputMultiPath(traits.Any, mandatory=True, desc='list of values to choose from') index = InputMultiPath(traits.Int, mandatory=True, desc='0-based indices of values to choose') class SelectOutputSpec(TraitedSpec): out = OutputMultiPath(traits.Any, desc='list of selected values') class Select(IOBase): """Basic interface class to select specific elements from a list Examples -------- >>> from nipype.interfaces.utility import Select >>> sl = Select() >>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3]) >>> out = sl.run() >>> out.outputs.out 4 >>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3, 4]) >>> out = sl.run() >>> out.outputs.out [4, 5] """ input_spec = SelectInputSpec output_spec = SelectOutputSpec def _list_outputs(self): outputs = self._outputs().get() out = np.array(self.inputs.inlist)[np.array(self.inputs.index)].tolist() outputs['out'] = out return outputs class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): function_str = traits.Str(mandatory=True, desc='code for function') class Function(IOBase): """Runs arbitrary function as an interface Examples -------- >>> func = 'def func(arg1, arg2=5): return arg1 + arg2' >>> fi = Function(input_names=['arg1', 'arg2'], output_names=['out']) >>> fi.inputs.function_str = func >>> res = fi.run(arg1=1) >>> res.outputs.out 6 """ input_spec = FunctionInputSpec output_spec = DynamicTraitedSpec def __init__(self, input_names, output_names, function=None, imports=None, **inputs): """ Parameters ---------- input_names: single str or list names corresponding to function inputs output_names: single str or list names corresponding to function outputs. has to match the number of outputs function : callable callable python object. must be able to execute in an isolated namespace (possibly in concert with the ``imports`` parameter) imports : list of strings list of import statements that allow the function to execute in an otherwise empty namespace """ super(Function, self).__init__(**inputs) if function: if hasattr(function, '__call__'): try: self.inputs.function_str = getsource(function) except IOError: raise Exception('Interface Function does not accept ' \ 'function objects defined interactively ' \ 'in a python session') elif isinstance(function, str): self.inputs.function_str = dumps(function) else: raise Exception('Unknown type of function') self.inputs.on_trait_change(self._set_function_string, 'function_str') self._input_names = filename_to_list(input_names) self._output_names = filename_to_list(output_names) add_traits(self.inputs, [name for name in self._input_names]) self.imports = imports self._out = {} for name in self._output_names: self._out[name] = None def _set_function_string(self, obj, name, old, new): if name == 'function_str': if hasattr(new, '__call__'): function_source = getsource(new) elif isinstance(new, str): function_source = dumps(new) self.inputs.trait_set(trait_change_notify=False, **{'%s' % name: function_source}) def _add_output_traits(self, base): undefined_traits = {} for key in self._output_names: base.add_trait(key, traits.Any) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) return base def _run_interface(self, runtime): function_handle = create_function_from_source(self.inputs.function_str, self.imports) args = {} for name in self._input_names: value = getattr(self.inputs, name) if isdefined(value): args[name] = value out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out else: if isinstance(out, tuple) and (len(out) != len(self._output_names)): raise RuntimeError('Mismatch in number of expected outputs') else: for idx, name in enumerate(self._output_names): self._out[name] = out[idx] return runtime def _list_outputs(self): outputs = self._outputs().get() for key in self._output_names: outputs[key] = self._out[key] return outputs class AssertEqualInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, mandatory=True) volume2 = File(exists=True, mandatory=True) class AssertEqual(BaseInterface): input_spec = AssertEqualInputSpec def _run_interface(self, runtime): data1 = nb.load(self.inputs.volume1).get_data() data2 = nb.load(self.inputs.volume2).get_data() assert_equal(data1, data2) return runtime nipype-0.9.2/nipype/interfaces/vista/000077500000000000000000000000001227300005300175735ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/vista/__init__.py000066400000000000000000000002331227300005300217020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .vista import (Vnifti2Image, VtoMat)nipype-0.9.2/nipype/interfaces/vista/tests/000077500000000000000000000000001227300005300207355ustar00rootroot00000000000000nipype-0.9.2/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py000066400000000000000000000023451227300005300256660ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.vista.vista import Vnifti2Image def test_Vnifti2Image_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(hash_files=False, name_template='%s.v', name_source=['in_file'], keep_extension=False, position=-1, argstr='-out %s', ), args=dict(argstr='%s', ), terminal_output=dict(nohash=True, mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), in_file=dict(position=1, mandatory=True, argstr='-in %s', ), attributes=dict(position=2, mandatory=False, argstr='-attr %s', ), ) inputs = Vnifti2Image.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Vnifti2Image_outputs(): output_map = dict(out_file=dict(), ) outputs = Vnifti2Image.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/vista/tests/test_auto_VtoMat.py000066400000000000000000000021661227300005300246150ustar00rootroot00000000000000# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.vista.vista import VtoMat def test_VtoMat_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), out_file=dict(hash_files=False, name_template='%s.mat', name_source=['in_file'], keep_extension=False, position=-1, argstr='-out %s', ), args=dict(argstr='%s', ), terminal_output=dict(nohash=True, mandatory=True, ), environ=dict(nohash=True, usedefault=True, ), in_file=dict(position=1, mandatory=True, argstr='-in %s', ), ) inputs = VtoMat.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_VtoMat_outputs(): output_map = dict(out_file=dict(), ) outputs = VtoMat.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value nipype-0.9.2/nipype/interfaces/vista/vista.py000066400000000000000000000045651227300005300213050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File from nipype.utils.filemanip import split_filename import os, os.path as op from nipype.interfaces.traits_extension import isdefined class Vnifti2ImageInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='-in %s', mandatory=True, position=1, desc='in file') attributes = File(exists=True, argstr='-attr %s', mandatory=False, position=2, desc='attribute file') out_file = File(name_template="%s.v", keep_extension=False, argstr='-out %s', hash_files=False, position= -1, desc='output data file', name_source=["in_file"]) class Vnifti2ImageOutputSpec(TraitedSpec): out_file = File(exists=True, desc='Output vista file') class Vnifti2Image(CommandLine): """ Convert a nifti file into a vista file. Example ------- >>> vimage = Vnifti2Image() >>> vimage.inputs.in_file = 'image.nii' >>> vimage.cmdline 'vnifti2image -in image.nii -out image.v' >>> vimage.run() # doctest: +SKIP """ _cmd = 'vnifti2image' input_spec=Vnifti2ImageInputSpec output_spec=Vnifti2ImageOutputSpec class VtoMatInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='-in %s', mandatory=True, position=1, desc='in file') out_file = File(name_template="%s.mat", keep_extension=False, argstr='-out %s', hash_files=False, position= -1, desc='output mat file', name_source=["in_file"]) class VtoMatOutputSpec(TraitedSpec): out_file = File(exists=True, desc='Output mat file') class VtoMat(CommandLine): """ Convert a nifti file into a vista file. Example ------- >>> vimage = VtoMat() >>> vimage.inputs.in_file = 'image.v' >>> vimage.cmdline 'vtomat -in image.v -out image.mat' >>> vimage.run() # doctest: +SKIP """ _cmd = 'vtomat' input_spec=VtoMatInputSpec output_spec=VtoMatOutputSpec nipype-0.9.2/nipype/pipeline/000077500000000000000000000000001227300005300161275ustar00rootroot00000000000000nipype-0.9.2/nipype/pipeline/__init__.py000066400000000000000000000004261227300005300202420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package contains modules for generating pipelines using interfaces """ __docformat__ = 'restructuredtext' from engine import Node, MapNode, JoinNode, Workflow nipype-0.9.2/nipype/pipeline/engine.py000066400000000000000000002761631227300005300177650ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Defines functionality for pipelined execution of interfaces The `Pipeline` class provides core functionality for batch processing. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ from datetime import datetime try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict from copy import deepcopy import cPickle from glob import glob import gzip import inspect import os import os.path as op import re import shutil from shutil import rmtree from socket import gethostname from string import Template import sys from tempfile import mkdtemp from warnings import warn from hashlib import sha1 import numpy as np from ..utils.misc import package_check, str2bool package_check('networkx', '1.3') import networkx as nx from .. import config, logging logger = logging.getLogger('workflow') from ..interfaces.base import (traits, InputMultiPath, CommandLine, Undefined, TraitedSpec, DynamicTraitedSpec, Bunch, InterfaceResult, md5, Interface, TraitDictObject, TraitListObject, isdefined) from ..utils.misc import getsource, create_function_from_source from ..utils.filemanip import (save_json, FileNotFoundError, filename_to_list, list_to_filename, copyfiles, fnames_presuffix, loadpkl, split_filename, load_json, savepkl, write_rst_header, write_rst_dict, write_rst_list) from .utils import (generate_expanded_graph, modify_paths, export_graph, make_output_dir, write_workflow_prov, clean_working_directory, format_dot, topological_sort, get_print_name, merge_dict, evaluate_connect_function) def _write_inputs(node): lines = [] nodename = node.fullname.replace('.', '_') for key, _ in node.inputs.items(): val = getattr(node.inputs, key) if isdefined(val): if type(val) == str: try: func = create_function_from_source(val) except RuntimeError, e: lines.append("%s.inputs.%s = '%s'" % (nodename, key, val)) else: funcname = [name for name in func.func_globals if name != '__builtins__'][0] lines.append(cPickle.loads(val)) if funcname == nodename: lines[-1] = lines[-1].replace(' %s(' % funcname, ' %s_1(' % funcname) funcname = '%s_1' % funcname lines.append('from nipype.utils.misc import getsource') lines.append("%s.inputs.%s = getsource(%s)" % (nodename, key, funcname)) else: lines.append('%s.inputs.%s = %s' % (nodename, key, val)) return lines def format_node(node, format='python', include_config=False): """Format a node in a given output syntax.""" lines = [] name = node.fullname.replace('.', '_') if format == 'python': klass = node._interface importline = 'from %s import %s' % (klass.__module__, klass.__class__.__name__) comment = '# Node: %s' % node.fullname spec = inspect.getargspec(node._interface.__init__) args = spec.args[1:] if args: filled_args = [] for arg in args: if hasattr(node._interface, '_%s' % arg): filled_args.append('%s=%s' % (arg, getattr(node._interface, '_%s' % arg))) args = ', '.join(filled_args) else: args = '' klass_name = klass.__class__.__name__ if isinstance(node, MapNode): nodedef = '%s = MapNode(%s(%s), iterfield=%s, name="%s")' \ % (name, klass_name, args, node.iterfield, name) else: nodedef = '%s = Node(%s(%s), name="%s")' \ % (name, klass_name, args, name) lines = [importline, comment, nodedef] if include_config: lines = [importline, "from collections import OrderedDict", comment, nodedef] lines.append('%s.config = %s' % (name, node.config)) if node.iterables is not None: lines.append('%s.iterables = %s' % (name, node.iterables)) lines.extend(_write_inputs(node)) return lines class WorkflowBase(object): """Defines common attributes and functions for workflows and nodes.""" def __init__(self, name=None, base_dir=None): """ Initialize base parameters of a workflow or node Parameters ---------- name : string (mandatory) Name of this node. Name must be alphanumeric and not contain any special characters (e.g., '.', '@'). base_dir : string base output directory (will be hashed before creations) default=None, which results in the use of mkdtemp """ self.base_dir = base_dir self.config = None self._verify_name(name) self.name = name # for compatibility with node expansion using iterables self._id = self.name self._hierarchy = None @property def inputs(self): raise NotImplementedError @property def outputs(self): raise NotImplementedError @property def fullname(self): fullname = self.name if self._hierarchy: fullname = self._hierarchy + '.' + self.name return fullname def clone(self, name): """Clone a workflowbase object Parameters ---------- name : string (mandatory) A clone of node or workflow must have a new name """ if (name is None) or (name == self.name): raise Exception('Cloning requires a new name') self._verify_name(name) clone = deepcopy(self) clone.name = name clone._id = name clone._hierarchy = None return clone def _check_outputs(self, parameter): return hasattr(self.outputs, parameter) def _check_inputs(self, parameter): return hasattr(self.inputs, parameter) def _verify_name(self, name): valid_name = bool(re.match('^[\w-]+$', name)) if not valid_name: raise Exception('the name must not contain any special characters') def __repr__(self): if self._hierarchy: return '.'.join((self._hierarchy, self._id)) else: return self._id def save(self, filename=None): if filename is None: filename = 'temp.npz' np.savez(filename, object=self) def load(self, filename): return np.load(filename) class Workflow(WorkflowBase): """Controls the setup and execution of a pipeline of processes.""" def __init__(self, name, base_dir=None): """Create a workflow object. Parameters ---------- name : alphanumeric string unique identifier for the workflow base_dir : string, optional path to workflow storage """ super(Workflow, self).__init__(name, base_dir) self._graph = nx.DiGraph() self.config = deepcopy(config._sections) # PUBLIC API def clone(self, name): """Clone a workflow .. note:: Will reset attributes used for executing workflow. See _init_runtime_fields. Parameters ---------- name: alphanumeric name unique name for the workflow """ clone = super(Workflow, self).clone(name) clone._reset_hierarchy() return clone # Graph creation functions def connect(self, *args, **kwargs): """Connect nodes in the pipeline. This routine also checks if inputs and outputs are actually provided by the nodes that are being connected. Creates edges in the directed graph using the nodes and edges specified in the `connection_list`. Uses the NetworkX method DiGraph.add_edges_from. Parameters ---------- args : list or a set of four positional arguments Four positional arguments of the form:: connect(source, sourceoutput, dest, destinput) source : nodewrapper node sourceoutput : string (must be in source.outputs) dest : nodewrapper node destinput : string (must be in dest.inputs) A list of 3-tuples of the following form:: [(source, target, [('sourceoutput/attribute', 'targetinput'), ...]), ...] Or:: [(source, target, [(('sourceoutput1', func, arg2, ...), 'targetinput'), ...]), ...] sourceoutput1 will always be the first argument to func and func will be evaluated and the results sent ot targetinput currently func needs to define all its needed imports within the function as we use the inspect module to get at the source code and execute it remotely """ if len(args) == 1: connection_list = args[0] elif len(args) == 4: connection_list = [(args[0], args[2], [(args[1], args[3])])] else: raise Exception('unknown set of parameters to connect function') if not kwargs: disconnect = False else: disconnect = kwargs['disconnect'] newnodes = [] for srcnode, destnode, _ in connection_list: if self in [srcnode, destnode]: msg = ('Workflow connect cannot contain itself as node:' ' src[%s] dest[%s] workflow[%s]') % (srcnode, destnode, self.name) raise IOError(msg) if (srcnode not in newnodes) and not self._has_node(srcnode): newnodes.append(srcnode) if (destnode not in newnodes) and not self._has_node(destnode): newnodes.append(destnode) if newnodes: self._check_nodes(newnodes) for node in newnodes: if node._hierarchy is None: node._hierarchy = self.name not_found = [] connected_ports = {} for srcnode, destnode, connects in connection_list: if destnode not in connected_ports: connected_ports[destnode] = [] # check to see which ports of destnode are already # connected. if not disconnect and (destnode in self._graph.nodes()): for edge in self._graph.in_edges_iter(destnode): data = self._graph.get_edge_data(*edge) for sourceinfo, destname in data['connect']: if destname not in connected_ports[destnode]: connected_ports[destnode] += [destname] for source, dest in connects: # Currently datasource/sink/grabber.io modules # determine their inputs/outputs depending on # connection settings. Skip these modules in the check if dest in connected_ports[destnode]: raise Exception(""" Trying to connect %s:%s to %s:%s but input '%s' of node '%s' is already connected. """ % (srcnode, source, destnode, dest, dest, destnode)) if not (hasattr(destnode, '_interface') and '.io' in str(destnode._interface.__class__)): if not destnode._check_inputs(dest): not_found.append(['in', destnode.name, dest]) if not (hasattr(srcnode, '_interface') and '.io' in str(srcnode._interface.__class__)): if isinstance(source, tuple): # handles the case that source is specified # with a function sourcename = source[0] elif isinstance(source, str): sourcename = source else: raise Exception(('Unknown source specification in ' 'connection from output of %s') % srcnode.name) if sourcename and not srcnode._check_outputs(sourcename): not_found.append(['out', srcnode.name, sourcename]) connected_ports[destnode] += [dest] infostr = [] for info in not_found: infostr += ["Module %s has no %sput called %s\n" % (info[1], info[0], info[2])] if not_found: raise Exception('\n'.join(['Some connections were not found'] + infostr)) # turn functions into strings for srcnode, destnode, connects in connection_list: for idx, (src, dest) in enumerate(connects): if isinstance(src, tuple) and not isinstance(src[1], str): function_source = getsource(src[1]) connects[idx] = ((src[0], function_source, src[2:]), dest) # add connections for srcnode, destnode, connects in connection_list: edge_data = self._graph.get_edge_data(srcnode, destnode, None) if edge_data: logger.debug('(%s, %s): Edge data exists: %s' % (srcnode, destnode, str(edge_data))) for data in connects: if data not in edge_data['connect']: edge_data['connect'].append(data) if disconnect: logger.debug('Removing connection: %s' % str(data)) edge_data['connect'].remove(data) if edge_data['connect']: self._graph.add_edges_from([(srcnode, destnode, edge_data)]) else: #pass logger.debug('Removing connection: %s->%s' % (srcnode, destnode)) self._graph.remove_edges_from([(srcnode, destnode)]) elif not disconnect: logger.debug('(%s, %s): No edge data' % (srcnode, destnode)) self._graph.add_edges_from([(srcnode, destnode, {'connect': connects})]) edge_data = self._graph.get_edge_data(srcnode, destnode) logger.debug('(%s, %s): new edge data: %s' % (srcnode, destnode, str(edge_data))) def disconnect(self, *args): """Disconnect two nodes See the docstring for connect for format. """ # yoh: explicit **dict was introduced for compatibility with Python 2.5 return self.connect(*args, **dict(disconnect=True)) def add_nodes(self, nodes): """ Add nodes to a workflow Parameters ---------- nodes : list A list of WorkflowBase-based objects """ newnodes = [] all_nodes = self._get_all_nodes() for node in nodes: if self._has_node(node): raise IOError('Node %s already exists in the workflow' % node) if isinstance(node, Workflow): for subnode in node._get_all_nodes(): if subnode in all_nodes: raise IOError(('Subnode %s of node %s already exists ' 'in the workflow') % (subnode, node)) newnodes.append(node) if not newnodes: logger.debug('no new nodes to add') return for node in newnodes: if not issubclass(node.__class__, WorkflowBase): raise Exception('Node %s must be a subclass of WorkflowBase' % str(node)) self._check_nodes(newnodes) for node in newnodes: if node._hierarchy is None: node._hierarchy = self.name self._graph.add_nodes_from(newnodes) def remove_nodes(self, nodes): """ Remove nodes from a workflow Parameters ---------- nodes : list A list of WorkflowBase-based objects """ self._graph.remove_nodes_from(nodes) # Input-Output access @property def inputs(self): return self._get_inputs() @property def outputs(self): return self._get_outputs() def get_node(self, name): """Return an internal node by name """ nodenames = name.split('.') nodename = nodenames[0] outnode = [node for node in self._graph.nodes() if str(node).endswith('.' + nodename)] if outnode: outnode = outnode[0] if nodenames[1:] and issubclass(outnode.__class__, Workflow): outnode = outnode.get_node('.'.join(nodenames[1:])) else: outnode = None return outnode def list_node_names(self): """List names of all nodes in a workflow """ outlist = [] for node in nx.topological_sort(self._graph): if isinstance(node, Workflow): outlist.extend(['.'.join((node.name, nodename)) for nodename in node.list_node_names()]) else: outlist.append(node.name) return sorted(outlist) def write_graph(self, dotfilename='graph.dot', graph2use='hierarchical', format="png", simple_form=True): """Generates a graphviz dot file and a png file Parameters ---------- graph2use: 'orig', 'hierarchical' (default), 'flat', 'exec' orig - creates a top level graph without expanding internal workflow nodes; flat - expands workflow nodes recursively; exec - expands workflows to depict iterables format: 'png', 'svg' simple_form: boolean (default: True) Determines if the node name used in the graph should be of the form 'nodename (package)' when True or 'nodename.Class.package' when False. """ graphtypes = ['orig', 'flat', 'hierarchical', 'exec'] if graph2use not in graphtypes: raise ValueError('Unknown graph2use keyword. Must be one of: ' + str(graphtypes)) base_dir, dotfilename = os.path.split(dotfilename) if base_dir == '': if self.base_dir: base_dir = self.base_dir if self.name: base_dir = os.path.join(base_dir, self.name) else: base_dir = os.getcwd() base_dir = make_output_dir(base_dir) if graph2use == 'hierarchical': dotfilename = os.path.join(base_dir, dotfilename) self.write_hierarchical_dotfile(dotfilename=dotfilename, colored=False, simple_form=simple_form) format_dot(dotfilename, format=format) else: graph = self._graph if graph2use in ['flat', 'exec']: graph = self._create_flat_graph() if graph2use == 'exec': graph = generate_expanded_graph(deepcopy(graph)) export_graph(graph, base_dir, dotfilename=dotfilename, format=format, simple_form=simple_form) def write_hierarchical_dotfile(self, dotfilename=None, colored=True, simple_form=True): dotlist = ['digraph %s{' % self.name] if colored: dotlist.append(' ' + 'colorscheme=pastel28;') dotlist.append(self._get_dot(prefix=' ', colored=colored, simple_form=simple_form)) dotlist.append('}') dotstr = '\n'.join(dotlist) if dotfilename: fp = open(dotfilename, 'wt') fp.writelines(dotstr) fp.close() else: logger.info(dotstr) def export(self, filename=None, prefix="output", format="python", include_config=False): """Export object into a different format Parameters ---------- filename: string file to save the code to; overrides prefix prefix: string prefix to use for output file format: string one of "python" include_config: boolean whether to include node and workflow config values """ formats = ["python"] if format not in formats: raise ValueError('format must be one of: %s' % '|'.join(formats)) flatgraph = self._create_flat_graph() nodes = nx.topological_sort(flatgraph) lines = ['# Workflow'] importlines = ['from nipype.pipeline.engine import Workflow, ' 'Node, MapNode'] functions = {} if format == "python": connect_template = '%s.connect(%%s, %%s, %%s, "%%s")' % self.name connect_template2 = '%s.connect(%%s, "%%s", %%s, "%%s")' \ % self.name wfdef = '%s = Workflow("%s")' % (self.name, self.name) lines.append(wfdef) if include_config: lines.append('%s.config = %s' % (self.name, self.config)) for idx, node in enumerate(nodes): nodename = node.fullname.replace('.', '_') # write nodes nodelines = format_node(node, format='python', include_config=include_config) for line in nodelines: if line.startswith('from'): if line not in importlines: importlines.append(line) else: lines.append(line) # write connections for u, _, d in flatgraph.in_edges_iter(nbunch=node, data=True): for cd in d['connect']: if isinstance(cd[0], tuple): args = list(cd[0]) if args[1] in functions: funcname = functions[args[1]] else: func = create_function_from_source(args[1]) funcname = [name for name in func.func_globals if name != '__builtins__'][0] functions[args[1]] = funcname args[1] = funcname args = tuple([arg for arg in args if arg]) line_args = (u.fullname.replace('.', '_'), args, nodename, cd[1]) line = connect_template % line_args line = line.replace("'%s'" % funcname, funcname) lines.append(line) else: line_args = (u.fullname.replace('.', '_'), cd[0], nodename, cd[1]) lines.append(connect_template2 % line_args) functionlines = ['# Functions'] for function in functions: functionlines.append(cPickle.loads(function).rstrip()) all_lines = importlines + functionlines + lines if not filename: filename = '%s%s.py' % (prefix, self.name) with open(filename, 'wt') as fp: fp.writelines('\n'.join(all_lines)) return all_lines def run(self, plugin=None, plugin_args=None, updatehash=False): """ Execute the workflow Parameters ---------- plugin: plugin name or object Plugin to use for execution. You can create your own plugins for execution. plugin_args : dictionary containing arguments to be sent to plugin constructor. see individual plugin doc strings for details. """ if plugin is None: plugin = config.get('execution', 'plugin') if type(plugin) is not str: runner = plugin else: name = 'nipype.pipeline.plugins' try: __import__(name) except ImportError: msg = 'Could not import plugin module: %s' % name logger.error(msg) raise ImportError(msg) else: plugin_mod = getattr(sys.modules[name], '%sPlugin' % plugin) runner = plugin_mod(plugin_args=plugin_args) flatgraph = self._create_flat_graph() self.config = merge_dict(deepcopy(config._sections), self.config) if 'crashdump_dir' in self.config: warn(("Deprecated: workflow.config['crashdump_dir']\n" "Please use config['execution']['crashdump_dir']")) crash_dir = self.config['crashdump_dir'] self.config['execution']['crashdump_dir'] = crash_dir del self.config['crashdump_dir'] logger.info(str(sorted(self.config))) self._set_needed_outputs(flatgraph) execgraph = generate_expanded_graph(deepcopy(flatgraph)) for index, node in enumerate(execgraph.nodes()): node.config = merge_dict(deepcopy(self.config), node.config) node.base_dir = self.base_dir node.index = index if isinstance(node, MapNode): node.use_plugin = (plugin, plugin_args) self._configure_exec_nodes(execgraph) if str2bool(self.config['execution']['create_report']): self._write_report_info(self.base_dir, self.name, execgraph) runner.run(execgraph, updatehash=updatehash, config=self.config) datestr = datetime.utcnow().strftime('%Y%m%dT%H%M%S') if str2bool(self.config['execution']['write_provenance']): prov_base = os.path.join(self.base_dir, 'workflow_provenance_%s' % datestr) logger.info('Provenance file prefix: %s' % prov_base) write_workflow_prov(execgraph, prov_base, format='all') return execgraph # PRIVATE API AND FUNCTIONS def _write_report_info(self, workingdir, name, graph): if workingdir is None: workingdir = os.getcwd() report_dir = os.path.join(workingdir, name) if not os.path.exists(report_dir): os.makedirs(report_dir) shutil.copyfile(os.path.join(os.path.dirname(__file__), 'report_template.html'), os.path.join(report_dir, 'index.html')) shutil.copyfile(os.path.join(os.path.dirname(__file__), '..', 'external', 'd3.v3.min.js'), os.path.join(report_dir, 'd3.v3.min.js')) nodes, groups = topological_sort(graph, depth_first=True) graph_file = os.path.join(report_dir, 'graph1.json') json_dict = {'nodes': [], 'links': [], 'groups': [], 'maxN': 0} for i, node in enumerate(nodes): report_file = "%s/_report/report.rst" % \ node.output_dir().replace(report_dir, '') result_file = "%s/result_%s.pklz" % \ (node.output_dir().replace(report_dir, ''), node.name) json_dict['nodes'].append(dict(name='%d_%s' % (i, node.name), report=report_file, result=result_file, group=groups[i])) maxN = 0 for gid in np.unique(groups): procs = [i for i, val in enumerate(groups) if val == gid] N = len(procs) if N > maxN: maxN = N json_dict['groups'].append(dict(procs=procs, total=N, name='Group_%05d' % gid)) json_dict['maxN'] = maxN for u, v in graph.in_edges_iter(): json_dict['links'].append(dict(source=nodes.index(u), target=nodes.index(v), value=1)) save_json(graph_file, json_dict) graph_file = os.path.join(report_dir, 'graph.json') template = '%%0%dd_' % np.ceil(np.log10(len(nodes))).astype(int) def getname(u, i): name_parts = u.fullname.split('.') #return '.'.join(name_parts[:-1] + [template % i + name_parts[-1]]) return template % i + name_parts[-1] json_dict = [] for i, node in enumerate(nodes): imports = [] for u, v in graph.in_edges_iter(nbunch=node): imports.append(getname(u, nodes.index(u))) json_dict.append(dict(name=getname(node, i), size=1, group=groups[i], imports=imports)) save_json(graph_file, json_dict) def _set_needed_outputs(self, graph): """Initialize node with list of which outputs are needed.""" rm_outputs = self.config['execution']['remove_unnecessary_outputs'] if not str2bool(rm_outputs): return for node in graph.nodes(): node.needed_outputs = [] for edge in graph.out_edges_iter(node): data = graph.get_edge_data(*edge) for sourceinfo, _ in sorted(data['connect']): if isinstance(sourceinfo, tuple): input_name = sourceinfo[0] else: input_name = sourceinfo if input_name not in node.needed_outputs: node.needed_outputs += [input_name] if node.needed_outputs: node.needed_outputs = sorted(node.needed_outputs) def _configure_exec_nodes(self, graph): """Ensure that each node knows where to get inputs from """ for node in graph.nodes(): node.input_source = {} for edge in graph.in_edges_iter(node): data = graph.get_edge_data(*edge) for sourceinfo, field in sorted(data['connect']): node.input_source[field] = \ (os.path.join(edge[0].output_dir(), 'result_%s.pklz' % edge[0].name), sourceinfo) def _check_nodes(self, nodes): """Checks if any of the nodes are already in the graph """ node_names = [node.name for node in self._graph.nodes()] node_lineage = [node._hierarchy for node in self._graph.nodes()] for node in nodes: if node.name in node_names: idx = node_names.index(node.name) if node_lineage[idx] in [node._hierarchy, self.name]: raise IOError('Duplicate node name %s found.' % node.name) else: node_names.append(node.name) def _has_attr(self, parameter, subtype='in'): """Checks if a parameter is available as an input or output """ if subtype == 'in': subobject = self.inputs else: subobject = self.outputs attrlist = parameter.split('.') cur_out = subobject for attr in attrlist: if not hasattr(cur_out, attr): return False cur_out = getattr(cur_out, attr) return True def _get_parameter_node(self, parameter, subtype='in'): """Returns the underlying node corresponding to an input or output parameter """ if subtype == 'in': subobject = self.inputs else: subobject = self.outputs attrlist = parameter.split('.') cur_out = subobject for attr in attrlist[:-1]: cur_out = getattr(cur_out, attr) return cur_out.traits()[attrlist[-1]].node def _check_outputs(self, parameter): return self._has_attr(parameter, subtype='out') def _check_inputs(self, parameter): return self._has_attr(parameter, subtype='in') def _get_inputs(self): """Returns the inputs of a workflow This function does not return any input ports that are already connected """ inputdict = TraitedSpec() for node in self._graph.nodes(): inputdict.add_trait(node.name, traits.Instance(TraitedSpec)) if isinstance(node, Workflow): setattr(inputdict, node.name, node.inputs) else: taken_inputs = [] for _, _, d in self._graph.in_edges_iter(nbunch=node, data=True): for cd in d['connect']: taken_inputs.append(cd[1]) unconnectedinputs = TraitedSpec() for key, trait in node.inputs.items(): if key not in taken_inputs: unconnectedinputs.add_trait(key, traits.Trait(trait, node=node)) value = getattr(node.inputs, key) setattr(unconnectedinputs, key, value) setattr(inputdict, node.name, unconnectedinputs) getattr(inputdict, node.name).on_trait_change(self._set_input) return inputdict def _get_outputs(self): """Returns all possible output ports that are not already connected """ outputdict = TraitedSpec() for node in self._graph.nodes(): outputdict.add_trait(node.name, traits.Instance(TraitedSpec)) if isinstance(node, Workflow): setattr(outputdict, node.name, node.outputs) elif node.outputs: outputs = TraitedSpec() for key, _ in node.outputs.items(): outputs.add_trait(key, traits.Any(node=node)) setattr(outputs, key, None) setattr(outputdict, node.name, outputs) return outputdict def _set_input(self, object, name, newvalue): """Trait callback function to update a node input """ object.traits()[name].node.set_input(name, newvalue) def _set_node_input(self, node, param, source, sourceinfo): """Set inputs of a node given the edge connection""" if isinstance(sourceinfo, str): val = source.get_output(sourceinfo) elif isinstance(sourceinfo, tuple): if callable(sourceinfo[1]): val = sourceinfo[1](source.get_output(sourceinfo[0]), *sourceinfo[2:]) newval = val if isinstance(val, TraitDictObject): newval = dict(val) if isinstance(val, TraitListObject): newval = val[:] logger.debug('setting node input: %s->%s', param, str(newval)) node.set_input(param, deepcopy(newval)) def _get_all_nodes(self): allnodes = [] for node in self._graph.nodes(): if isinstance(node, Workflow): allnodes.extend(node._get_all_nodes()) else: allnodes.append(node) return allnodes def _has_node(self, wanted_node): for node in self._graph.nodes(): if wanted_node == node: return True if isinstance(node, Workflow): if node._has_node(wanted_node): return True return False def _create_flat_graph(self): """Make a simple DAG where no node is a workflow.""" logger.debug('Creating flat graph for workflow: %s', self.name) workflowcopy = deepcopy(self) workflowcopy._generate_flatgraph() return workflowcopy._graph def _reset_hierarchy(self): """Reset the hierarchy on a graph """ for node in self._graph.nodes(): if isinstance(node, Workflow): node._reset_hierarchy() for innernode in node._graph.nodes(): innernode._hierarchy = '.'.join((self.name, innernode._hierarchy)) else: node._hierarchy = self.name def _generate_flatgraph(self): """Generate a graph containing only Nodes or MapNodes """ logger.debug('expanding workflow: %s', self) nodes2remove = [] if not nx.is_directed_acyclic_graph(self._graph): raise Exception(('Workflow: %s is not a directed acyclic graph ' '(DAG)') % self.name) nodes = nx.topological_sort(self._graph) for node in nodes: logger.debug('processing node: %s' % node) if isinstance(node, Workflow): nodes2remove.append(node) # use in_edges instead of in_edges_iter to allow # disconnections to take place properly. otherwise, the # edge dict is modified. for u, _, d in self._graph.in_edges(nbunch=node, data=True): logger.debug('in: connections-> %s' % str(d['connect'])) for cd in deepcopy(d['connect']): logger.debug("in: %s" % str(cd)) dstnode = node._get_parameter_node(cd[1], subtype='in') srcnode = u srcout = cd[0] dstin = cd[1].split('.')[-1] logger.debug('in edges: %s %s %s %s' % (srcnode, srcout, dstnode, dstin)) self.disconnect(u, cd[0], node, cd[1]) self.connect(srcnode, srcout, dstnode, dstin) # do not use out_edges_iter for reasons stated in in_edges for _, v, d in self._graph.out_edges(nbunch=node, data=True): logger.debug('out: connections-> %s' % str(d['connect'])) for cd in deepcopy(d['connect']): logger.debug("out: %s" % str(cd)) dstnode = v if isinstance(cd[0], tuple): parameter = cd[0][0] else: parameter = cd[0] srcnode = node._get_parameter_node(parameter, subtype='out') if isinstance(cd[0], tuple): srcout = list(cd[0]) srcout[0] = parameter.split('.')[-1] srcout = tuple(srcout) else: srcout = parameter.split('.')[-1] dstin = cd[1] logger.debug('out edges: %s %s %s %s' % (srcnode, srcout, dstnode, dstin)) self.disconnect(node, cd[0], v, cd[1]) self.connect(srcnode, srcout, dstnode, dstin) # expand the workflow node #logger.debug('expanding workflow: %s', node) node._generate_flatgraph() for innernode in node._graph.nodes(): innernode._hierarchy = '.'.join((self.name, innernode._hierarchy)) self._graph.add_nodes_from(node._graph.nodes()) self._graph.add_edges_from(node._graph.edges(data=True)) if nodes2remove: self._graph.remove_nodes_from(nodes2remove) logger.debug('finished expanding workflow: %s', self) def _get_dot(self, prefix=None, hierarchy=None, colored=True, simple_form=True): """Create a dot file with connection info """ if prefix is None: prefix = ' ' if hierarchy is None: hierarchy = [] level = (len(prefix) / 2) + 1 dotlist = ['%slabel="%s";' % (prefix, self.name)] if colored: dotlist.append('%scolor=%d;' % (prefix, level)) for node in nx.topological_sort(self._graph): fullname = '.'.join(hierarchy + [node.fullname]) nodename = fullname.replace('.', '_') if not isinstance(node, Workflow): node_class_name = get_print_name(node, simple_form=simple_form) if not simple_form: node_class_name = '.'.join(node_class_name.split('.')[1:]) if hasattr(node, 'iterables') and node.iterables: dotlist.append(('%s[label="%s", style=filled, colorscheme' '=greys7 color=2];') % (nodename, node_class_name)) else: dotlist.append('%s[label="%s"];' % (nodename, node_class_name)) for node in nx.topological_sort(self._graph): if isinstance(node, Workflow): fullname = '.'.join(hierarchy + [node.fullname]) nodename = fullname.replace('.', '_') dotlist.append('subgraph cluster_%s {' % nodename) if colored: dotlist.append(prefix + prefix + 'style=filled;') dotlist.append(node._get_dot(prefix=prefix + prefix, hierarchy=hierarchy + [self.name], colored=colored, simple_form=simple_form)) dotlist.append('}') else: for subnode in self._graph.successors_iter(node): if node._hierarchy != subnode._hierarchy: continue if not isinstance(subnode, Workflow): nodefullname = '.'.join(hierarchy + [node.fullname]) subnodefullname = '.'.join(hierarchy + [subnode.fullname]) nodename = nodefullname.replace('.', '_') subnodename = subnodefullname.replace('.', '_') for _ in self._graph.get_edge_data(node, subnode)['connect']: dotlist.append('%s -> %s;' % (nodename, subnodename)) logger.debug('connection: ' + dotlist[-1]) # add between workflow connections for u, v, d in self._graph.edges_iter(data=True): uname = '.'.join(hierarchy + [u.fullname]) vname = '.'.join(hierarchy + [v.fullname]) for src, dest in d['connect']: uname1 = uname vname1 = vname if isinstance(src, tuple): srcname = src[0] else: srcname = src if '.' in srcname: uname1 += '.' + '.'.join(srcname.split('.')[:-1]) if '.' in dest and '@' not in dest: if not isinstance(v, Workflow): if 'datasink' not in \ str(v._interface.__class__).lower(): vname1 += '.' + '.'.join(dest.split('.')[:-1]) else: vname1 += '.' + '.'.join(dest.split('.')[:-1]) if uname1.split('.')[:-1] != vname1.split('.')[:-1]: dotlist.append('%s -> %s;' % (uname1.replace('.', '_'), vname1.replace('.', '_'))) logger.debug('cross connection: ' + dotlist[-1]) return ('\n' + prefix).join(dotlist) class Node(WorkflowBase): """Wraps interface objects for use in pipeline A Node creates a sandbox-like directory for executing the underlying interface. It will copy or link inputs into this directory to ensure that input data are not overwritten. A hash of the input state is used to determine if the Node inputs have changed and whether the node needs to be re-executed. Examples -------- >>> from nipype import Node, spm >>> realign = Node(spm.Realign(), 'realign') >>> realign.inputs.in_files = 'functional.nii' >>> realign.inputs.register_to_mean = True >>> realign.run() # doctest: +SKIP """ def __init__(self, interface, name, iterables=None, itersource=None, synchronize=False, overwrite=None, needed_outputs=None, run_without_submitting=False, **kwargs): """ Parameters ---------- interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) name : alphanumeric string node specific name iterables : generator Input field and list to iterate using the pipeline engine for example to iterate over different frac values in fsl.Bet() for a single field the input can be a tuple, otherwise a list of tuples node.iterables = ('frac',[0.5,0.6,0.7]) node.iterables = [('fwhm',[2,4]),('fieldx',[0.5,0.6,0.7])] If this node has an itersource, then the iterables values is a dictionary which maps an iterable source field value to the target iterables field values, e.g.: inputspec.iterables = ('images',['img1.nii', 'img2.nii']]) node.itersource = ('inputspec', ['frac']) node.iterables = ('frac', {'img1.nii': [0.5, 0.6], img2.nii': [0.6, 0.7]}) If this node's synchronize flag is set, then an alternate form of the iterables is a [fields, values] list, where fields is the list of iterated fields and values is the list of value tuples for the given fields, e.g.: node.synchronize = True node.iterables = [('frac', 'threshold'), [(0.5, True), (0.6, False)]] itersource: tuple The (name, fields) iterables source which specifies the name of the predecessor iterable node and the input fields to use from that source node. The output field values comprise the key to the iterables parameter value mapping dictionary. synchronize: boolean Flag indicating whether iterables are synchronized. If the iterables are synchronized, then this iterable node is expanded once per iteration over all of the iterables values. Otherwise, this iterable node is expanded once per each permutation of the iterables values. overwrite : Boolean Whether to overwrite contents of output directory if it already exists. If directory exists and hash matches it assumes that process has been executed needed_outputs : list of output_names Force the node to keep only specific outputs. By default all outputs are kept. Setting this attribute will delete any output files and directories from the node's working directory that are not part of the `needed_outputs`. run_without_submitting : boolean Run the node without submitting to a job engine or to a multiprocessing pool """ base_dir = None if 'base_dir' in kwargs: base_dir = kwargs['base_dir'] super(Node, self).__init__(name, base_dir) if interface is None: raise IOError('Interface must be provided') if not isinstance(interface, Interface): raise IOError('interface must be an instance of an Interface') self._interface = interface self.name = name self._result = None self.iterables = iterables self.synchronize = synchronize self.itersource = itersource self.overwrite = overwrite self.parameterization = None self.run_without_submitting = run_without_submitting self.input_source = {} self.needed_outputs = [] self.plugin_args = {} if needed_outputs: self.needed_outputs = sorted(needed_outputs) self._got_inputs = False @property def interface(self): """Return the underlying interface object""" return self._interface @property def result(self): if self._result: return self._result else: cwd = self.output_dir() result, _, _ = self._load_resultfile(cwd) return result @property def inputs(self): """Return the inputs of the underlying interface""" return self._interface.inputs @property def outputs(self): """Return the output fields of the underlying interface""" return self._interface._outputs() def output_dir(self): """Return the location of the output directory for the node""" if self.base_dir is None: self.base_dir = mkdtemp() outputdir = self.base_dir if self._hierarchy: outputdir = os.path.join(outputdir, *self._hierarchy.split('.')) if self.parameterization: if not str2bool(self.config['execution']['parameterize_dirs']): param_dirs = [self._parameterization_dir(p) for p in self.parameterization] outputdir = os.path.join(outputdir, *param_dirs) else: outputdir = os.path.join(outputdir, *self.parameterization) return os.path.abspath(os.path.join(outputdir, self.name)) def set_input(self, parameter, val): """ Set interface input value""" logger.debug('setting nodelevel(%s) input %s = %s' % (str(self), parameter, str(val))) setattr(self.inputs, parameter, deepcopy(val)) def get_output(self, parameter): """Retrieve a particular output of the node""" val = None if self._result: val = getattr(self._result.outputs, parameter) else: cwd = self.output_dir() result, _, _ = self._load_resultfile(cwd) if result and result.outputs: val = getattr(result.outputs, parameter) return val def help(self): """ Print interface help""" self._interface.help() def hash_exists(self, updatehash=False): # Get a dictionary with hashed filenames and a hashvalue # of the dictionary itself. hashed_inputs, hashvalue = self._get_hashval() outdir = self.output_dir() if os.path.exists(outdir): logger.debug(os.listdir(outdir)) hashfiles = glob(os.path.join(outdir, '_0x*.json')) logger.debug(hashfiles) if len(hashfiles) > 1: logger.info(hashfiles) logger.info('Removing multiple hashfiles and forcing node to rerun') for hashfile in hashfiles: os.unlink(hashfile) hashfile = os.path.join(outdir, '_0x%s.json' % hashvalue) logger.debug(hashfile) if updatehash and os.path.exists(outdir): logger.debug("Updating hash: %s" % hashvalue) for file in glob(os.path.join(outdir, '_0x*.json')): os.remove(file) self._save_hashfile(hashfile, hashed_inputs) return os.path.exists(hashfile), hashvalue, hashfile, hashed_inputs def run(self, updatehash=False): """Execute the node in its directory. Parameters ---------- updatehash: boolean Update the hash stored in the output directory """ # check to see if output directory and hash exist if self.config is None: self.config = deepcopy(config._sections) else: self.config = merge_dict(deepcopy(config._sections), self.config) if not self._got_inputs: self._get_inputs() self._got_inputs = True outdir = self.output_dir() logger.info("Executing node %s in dir: %s" % (self._id, outdir)) if os.path.exists(outdir): logger.debug(os.listdir(outdir)) hash_info = self.hash_exists(updatehash=updatehash) hash_exists, hashvalue, hashfile, hashed_inputs = hash_info logger.debug(('updatehash, overwrite, always_run, hash_exists', updatehash, self.overwrite, self._interface.always_run, hash_exists)) if (not updatehash and (((self.overwrite is None and self._interface.always_run) or self.overwrite) or not hash_exists)): logger.debug("Node hash: %s" % hashvalue) # by rerunning we mean only nodes that did finish to run previously json_pat = op.join(outdir, '_0x*.json') json_unfinished_pat = op.join(outdir, '_0x*_unfinished.json') need_rerun = (os.path.exists(outdir) and not isinstance(self, MapNode) and len(glob(json_pat)) != 0 and len(glob(json_unfinished_pat)) == 0) if need_rerun: logger.debug("Rerunning node") logger.debug(("updatehash = %s, " "self.overwrite = %s, " "self._interface.always_run = %s, " "os.path.exists(%s) = %s, " "hash_method = %s") % (str(updatehash), str(self.overwrite), str(self._interface.always_run), hashfile, str(os.path.exists(hashfile)), self.config['execution']['hash_method'].lower())) log_debug = config.get('logging', 'workflow_level') == 'DEBUG' if log_debug and not op.exists(hashfile): exp_hash_paths = glob(json_pat) if len(exp_hash_paths) == 1: split_out = split_filename(exp_hash_paths[0]) exp_hash_file_base = split_out[1] exp_hash = exp_hash_file_base[len('_0x'):] logger.debug("Previous node hash = %s" % exp_hash) try: prev_inputs = load_json(exp_hash_paths[0]) except: pass else: logging.logdebug_dict_differences(prev_inputs, hashed_inputs) cannot_rerun = (str2bool( self.config['execution']['stop_on_first_rerun']) and not (self.overwrite is None and self._interface.always_run)) if cannot_rerun: raise Exception(("Cannot rerun when 'stop_on_first_rerun' " "is set to True")) hashfile_unfinished = os.path.join(outdir, '_0x%s_unfinished.json' % hashvalue) if op.exists(hashfile): os.remove(hashfile) rm_outdir = (op.exists(outdir) and not (op.exists(hashfile_unfinished) and self._interface.can_resume) and not isinstance(self, MapNode)) if rm_outdir: logger.debug("Removing old %s and its contents" % outdir) rmtree(outdir) else: logger.debug(("%s found and can_resume is True or Node is a " "MapNode - resuming execution") % hashfile_unfinished) if isinstance(self, MapNode): # remove old json files for filename in glob(os.path.join(outdir, '_0x*.json')): os.unlink(filename) outdir = make_output_dir(outdir) self._save_hashfile(hashfile_unfinished, hashed_inputs) self.write_report(report_type='preexec', cwd=outdir) savepkl(os.path.join(outdir, '_node.pklz'), self) savepkl(os.path.join(outdir, '_inputs.pklz'), self.inputs.get_traitsfree()) try: self._run_interface() except: os.remove(hashfile_unfinished) raise shutil.move(hashfile_unfinished, hashfile) self.write_report(report_type='postexec', cwd=outdir) else: if not os.path.exists(os.path.join(outdir, '_inputs.pklz')): logger.debug('%s: creating inputs file' % self.name) savepkl(os.path.join(outdir, '_inputs.pklz'), self.inputs.get_traitsfree()) if not os.path.exists(os.path.join(outdir, '_node.pklz')): logger.debug('%s: creating node file' % self.name) savepkl(os.path.join(outdir, '_node.pklz'), self) logger.debug("Hashfile exists. Skipping execution") self._run_interface(execute=False, updatehash=updatehash) logger.debug('Finished running %s in dir: %s\n' % (self._id, outdir)) return self._result # Private functions def _parameterization_dir(self, param): """ Returns the directory name for the given parameterization string as follows: - If the parameterization is longer than 32 characters, then return the SHA-1 hex digest. - Otherwise, return the parameterization unchanged. """ if len(param) > 32: return sha1(param).hexdigest() else: return param def _get_hashval(self): """Return a hash of the input state""" if not self._got_inputs: self._get_inputs() self._got_inputs = True hashed_inputs, hashvalue = self.inputs.get_hashval( hash_method=self.config['execution']['hash_method']) rm_extra = self.config['execution']['remove_unnecessary_outputs'] if str2bool(rm_extra) and self.needed_outputs: hashobject = md5() hashobject.update(hashvalue) sorted_outputs = sorted(self.needed_outputs) hashobject.update(str(sorted_outputs)) hashvalue = hashobject.hexdigest() hashed_inputs['needed_outputs'] = sorted_outputs return hashed_inputs, hashvalue def _save_hashfile(self, hashfile, hashed_inputs): try: save_json(hashfile, hashed_inputs) except (IOError, TypeError): err_type = sys.exc_info()[0] if err_type is TypeError: # XXX - SG current workaround is to just # create the hashed file and not put anything # in it fd = open(hashfile, 'wt') fd.writelines(str(hashed_inputs)) fd.close() logger.debug(('Unable to write a particular type to the json ' 'file')) else: logger.critical('Unable to open the file in write mode: %s' % hashfile) def _get_inputs(self): """Retrieve inputs from pointers to results file This mechanism can be easily extended/replaced to retrieve data from other data sources (e.g., XNAT, HTTP, etc.,.) """ logger.debug('Setting node inputs') for key, info in self.input_source.items(): logger.debug('input: %s' % key) results_file = info[0] logger.debug('results file: %s' % results_file) results = loadpkl(results_file) output_value = Undefined if isinstance(info[1], tuple): output_name = info[1][0] value = getattr(results.outputs, output_name) if isdefined(value): output_value = evaluate_connect_function(info[1][1], info[1][2], value) else: output_name = info[1] try: output_value = results.outputs.get()[output_name] except TypeError: output_value = results.outputs.dictcopy()[output_name] logger.debug('output: %s' % output_name) try: self.set_input(key, deepcopy(output_value)) except traits.TraitError, e: msg = ['Error setting node input:', 'Node: %s' % self.name, 'input: %s' % key, 'results_file: %s' % results_file, 'value: %s' % str(output_value)] e.args = (e.args[0] + "\n" + '\n'.join(msg),) raise def _run_interface(self, execute=True, updatehash=False): if updatehash: return old_cwd = os.getcwd() os.chdir(self.output_dir()) self._result = self._run_command(execute) os.chdir(old_cwd) def _save_results(self, result, cwd): resultsfile = os.path.join(cwd, 'result_%s.pklz' % self.name) if result.outputs: try: outputs = result.outputs.get() except TypeError: outputs = result.outputs.dictcopy() # outputs was a bunch result.outputs.set(**modify_paths(outputs, relative=True, basedir=cwd)) savepkl(resultsfile, result) logger.debug('saved results in %s' % resultsfile) if result.outputs: result.outputs.set(**outputs) def _load_resultfile(self, cwd): """Load results if it exists in cwd Parameter --------- cwd : working directory of node Returns ------- result : InterfaceResult structure aggregate : boolean indicating whether node should aggregate_outputs attribute error : boolean indicating whether there was some mismatch in versions of traits used to store result and hence node needs to rerun """ aggregate = True resultsoutputfile = os.path.join(cwd, 'result_%s.pklz' % self.name) result = None attribute_error = False if os.path.exists(resultsoutputfile): pkl_file = gzip.open(resultsoutputfile, 'rb') try: result = cPickle.load(pkl_file) except (traits.TraitError, AttributeError, ImportError), err: if isinstance(err, (AttributeError, ImportError)): attribute_error = True logger.debug(('attribute error: %s probably using ' 'different trait pickled file') % str(err)) else: logger.debug(('some file does not exist. hence trait ' 'cannot be set')) else: if result.outputs: try: outputs = result.outputs.get() except TypeError: outputs = result.outputs.dictcopy() # outputs == Bunch try: result.outputs.set(**modify_paths(outputs, relative=False, basedir=cwd)) except FileNotFoundError: logger.debug(('conversion to full path results in ' 'non existent file')) aggregate = False pkl_file.close() logger.debug('Aggregate: %s', aggregate) return result, aggregate, attribute_error def _load_results(self, cwd): result, aggregate, attribute_error = self._load_resultfile(cwd) # try aggregating first if aggregate: logger.debug('aggregating results') if attribute_error: old_inputs = loadpkl(os.path.join(cwd, '_inputs.pklz')) self.inputs.set(**old_inputs) if not isinstance(self, MapNode): self._copyfiles_to_wd(cwd, True, linksonly=True) aggouts = self._interface.aggregate_outputs( needed_outputs=self.needed_outputs) runtime = Bunch(cwd=cwd, returncode=0, environ=deepcopy(os.environ.data), hostname=gethostname()) result = InterfaceResult( interface=self._interface.__class__, runtime=runtime, inputs=self._interface.inputs.get_traitsfree(), outputs=aggouts) self._save_results(result, cwd) else: logger.debug('aggregating mapnode results') self._run_interface() result = self._result return result def _run_command(self, execute, copyfiles=True): cwd = os.getcwd() if execute and copyfiles: self._originputs = deepcopy(self._interface.inputs) if execute: runtime = Bunch(returncode=1, environ=deepcopy(os.environ.data), hostname=gethostname()) result = InterfaceResult( interface=self._interface.__class__, runtime=runtime, inputs=self._interface.inputs.get_traitsfree()) self._result = result logger.debug('Executing node') if copyfiles: self._copyfiles_to_wd(cwd, execute) if issubclass(self._interface.__class__, CommandLine): try: cmd = self._interface.cmdline except Exception, msg: self._result.runtime.stderr = msg raise cmdfile = os.path.join(cwd, 'command.txt') fd = open(cmdfile, 'wt') fd.writelines(cmd + "\n") fd.close() logger.info('Running: %s' % cmd) try: result = self._interface.run() except Exception, msg: self._result.runtime.stderr = msg raise dirs2keep = None if isinstance(self, MapNode): dirs2keep = [os.path.join(cwd, 'mapflow')] result.outputs = clean_working_directory(result.outputs, cwd, self._interface.inputs, self.needed_outputs, self.config, dirs2keep=dirs2keep) self._save_results(result, cwd) else: logger.info("Collecting precomputed outputs") try: result = self._load_results(cwd) except (FileNotFoundError, AttributeError): # if aggregation does not work, rerun the node logger.info(("Some of the outputs were not found: " "rerunning node.")) result = self._run_command(execute=True, copyfiles=False) return result def _strip_temp(self, files, wd): out = [] for f in files: if isinstance(f, list): out.append(self._strip_temp(f, wd)) else: out.append(f.replace(os.path.join(wd, '_tempinput'), wd)) return out def _copyfiles_to_wd(self, outdir, execute, linksonly=False): """ copy files over and change the inputs""" if hasattr(self._interface, '_get_filecopy_info'): logger.debug('copying files to wd [execute=%s, linksonly=%s]' % (str(execute), str(linksonly))) if execute and linksonly: olddir = outdir outdir = os.path.join(outdir, '_tempinput') os.makedirs(outdir) for info in self._interface._get_filecopy_info(): files = self.inputs.get().get(info['key']) if not isdefined(files): continue if files: infiles = filename_to_list(files) if execute: if linksonly: if not info['copy']: newfiles = copyfiles(infiles, [outdir], copy=info['copy'], create_new=True) else: newfiles = fnames_presuffix(infiles, newpath=outdir) newfiles = self._strip_temp( newfiles, op.abspath(olddir).split(os.path.sep)[-1]) else: newfiles = copyfiles(infiles, [outdir], copy=info['copy'], create_new=True) else: newfiles = fnames_presuffix(infiles, newpath=outdir) if not isinstance(files, list): newfiles = list_to_filename(newfiles) setattr(self.inputs, info['key'], newfiles) if execute and linksonly: rmtree(outdir) def update(self, **opts): self.inputs.update(**opts) def write_report(self, report_type=None, cwd=None): if not str2bool(self.config['execution']['create_report']): return report_dir = os.path.join(cwd, '_report') report_file = os.path.join(report_dir, 'report.rst') if not os.path.exists(report_dir): os.makedirs(report_dir) if report_type == 'preexec': logger.debug('writing pre-exec report to %s' % report_file) fp = open(report_file, 'wt') fp.writelines(write_rst_header('Node: %s' % get_print_name(self), level=0)) fp.writelines(write_rst_list(['Hierarchy : %s' % self.fullname, 'Exec ID : %s' % self._id])) fp.writelines(write_rst_header('Original Inputs', level=1)) fp.writelines(write_rst_dict(self.inputs.get())) if report_type == 'postexec': logger.debug('writing post-exec report to %s' % report_file) fp = open(report_file, 'at') fp.writelines(write_rst_header('Execution Inputs', level=1)) fp.writelines(write_rst_dict(self.inputs.get())) exit_now = (not hasattr(self.result, 'outputs') or self.result.outputs is None) if exit_now: return fp.writelines(write_rst_header('Execution Outputs', level=1)) if isinstance(self.result.outputs, Bunch): fp.writelines(write_rst_dict(self.result.outputs.dictcopy())) elif self.result.outputs: fp.writelines(write_rst_dict(self.result.outputs.get())) if isinstance(self, MapNode): fp.close() return fp.writelines(write_rst_header('Runtime info', level=1)) if hasattr(self.result.runtime, 'cmdline'): fp.writelines(write_rst_dict( {'hostname': self.result.runtime.hostname, 'duration': self.result.runtime.duration, 'command': self.result.runtime.cmdline})) else: fp.writelines(write_rst_dict( {'hostname': self.result.runtime.hostname, 'duration': self.result.runtime.duration})) if hasattr(self.result.runtime, 'merged'): fp.writelines(write_rst_header('Terminal output', level=2)) fp.writelines(write_rst_list(self.result.runtime.merged)) if hasattr(self.result.runtime, 'environ'): fp.writelines(write_rst_header('Environment', level=2)) fp.writelines(write_rst_dict(self.result.runtime.environ)) fp.close() class JoinNode(Node): """Wraps interface objects that join inputs into a list. Examples -------- >>> import nipype.pipeline.engine as pe >>> from nipype import Node, JoinNode, Workflow >>> from nipype.interfaces.utility import IdentityInterface >>> from nipype.interfaces import (ants, dcm2nii, fsl) >>> wf = Workflow(name='preprocess') >>> inputspec = Node(IdentityInterface(fields=['image']), ... name='inputspec') >>> inputspec.iterables = [('image', ... ['img1.nii', 'img2.nii', 'img3.nii'])] >>> img2flt = Node(fsl.ImageMaths(out_data_type='float'), ... name='img2flt') >>> wf.connect(inputspec, 'image', img2flt, 'in_file') >>> average = JoinNode(ants.AverageImages(), joinsource='inputspec', ... joinfield='images', name='average') >>> wf.connect(img2flt, 'out_file', average, 'images') >>> realign = Node(fsl.FLIRT(), name='realign') >>> wf.connect(img2flt, 'out_file', realign, 'in_file') >>> wf.connect(average, 'output_average_image', realign, 'reference') >>> strip = Node(fsl.BET(), name='strip') >>> wf.connect(realign, 'out_file', strip, 'in_file') """ def __init__(self, interface, name, joinsource, joinfield=None, unique=False, **kwargs): """ Parameters ---------- interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) name : alphanumeric string node specific name joinsource : node name name of the join predecessor iterable node joinfield : string or list of strings name(s) of list input fields that will be aggregated. The default is all of the join node input fields. unique : flag indicating whether to ignore duplicate input values See Node docstring for additional keyword arguments. """ super(JoinNode, self).__init__(interface, name, **kwargs) self.joinsource = joinsource """the join predecessor iterable node""" if not joinfield: # default is the interface fields joinfield = self._interface.inputs.copyable_trait_names() elif isinstance(joinfield, str): joinfield = [joinfield] self.joinfield = joinfield """the fields to join""" self._inputs = self._override_join_traits(self._interface.inputs, self.joinfield) """the override inputs""" self._unique = unique """flag indicating whether to ignore duplicate input values""" self._next_slot_index = 0 """the joinfield index assigned to an iterated input""" @property def joinsource(self): return self._joinsource @joinsource.setter def joinsource(self, value): """Set the joinsource property. If the given value is a Node, then the joinsource is set to the node name. """ if isinstance(value, Node): value = value.name self._joinsource = value @property def inputs(self): """The JoinNode inputs include the join field overrides.""" return self._inputs def _add_join_item_fields(self): """Add new join item fields assigned to the next iterated input This method is intended solely for workflow graph expansion. Examples -------- >>> from nipype.interfaces.utility import IdentityInterface >>> import nipype.pipeline.engine as pe >>> from nipype import Node, JoinNode, Workflow >>> inputspec = Node(IdentityInterface(fields=['image']), ... name='inputspec'), >>> join = JoinNode(IdentityInterface(fields=['images', 'mask']), ... joinsource='inputspec', joinfield='images', name='join') >>> join._add_join_item_fields() {'images': 'imagesJ1'} Return the {base field: slot field} dictionary """ # create the new join item fields idx = self._next_slot_index newfields = dict([(field, self._add_join_item_field(field, idx)) for field in self.joinfield]) # increment the join slot index logger.debug("Added the %s join item fields %s." % (self, newfields)) self._next_slot_index += 1 return newfields def _add_join_item_field(self, field, index): """Add new join item fields qualified by the given index Return the new field name """ # the new field name name = self._join_item_field_name(field, index) # make a copy of the join trait trait = self._inputs.trait(field, False, True) # add the join item trait to the override traits self._inputs.add_trait(name, trait) return name def _join_item_field_name(self, field, index): """Return the field suffixed by the index + 1""" return "%sJ%d" % (field, index + 1) def _override_join_traits(self, basetraits, fields): """Convert the given join fields to accept an input that is a list item rather than a list. Non-join fields delegate to the interface traits. Return the override DynamicTraitedSpec """ dyntraits = DynamicTraitedSpec() if fields is None: fields = basetraits.copyable_trait_names() else: # validate the fields for field in fields: if not basetraits.trait(field): raise ValueError("The JoinNode %s does not have a field" " named %s" % (self.name, field)) for name, trait in basetraits.items(): # if a join field has a single inner trait, then the item # trait is that inner trait. Otherwise, the item trait is # a new Any trait. if name in fields and len(trait.inner_traits) == 1: item_trait = trait.inner_traits[0] dyntraits.add_trait(name, item_trait) logger.debug("Converted the join node %s field %s" " trait type from %s to %s" % (self, name, trait.trait_type.info(), item_trait.info())) else: dyntraits.add_trait(name, traits.Any) setattr(dyntraits, name, Undefined) return dyntraits def _run_command(self, execute, copyfiles=True): """Collates the join inputs prior to delegating to the superclass.""" self._collate_join_field_inputs() return super(JoinNode, self)._run_command(execute, copyfiles) def _collate_join_field_inputs(self): """ Collects each override join item field into the interface join field input.""" for field in self.inputs.copyable_trait_names(): if field in self.joinfield: # collate the join field val = self._collate_input_value(field) try: setattr(self._interface.inputs, field, val) except Exception as e: raise ValueError(">>JN %s %s %s %s %s: %s" % (self, field, val, self.inputs.copyable_trait_names(), self.joinfield, e)) elif hasattr(self._interface.inputs, field): # copy the non-join field val = getattr(self._inputs, field) if isdefined(val): setattr(self._interface.inputs, field, val) logger.debug("Collated %d inputs into the %s node join fields" % (self._next_slot_index, self)) def _collate_input_value(self, field): """ Collects the join item field values into a list or set value for the given field, as follows: - If the field trait is a Set, then the values are collected into a set. - Otherwise, the values are collected into a list which preserves the iterables order. If the ``unique`` flag is set, then duplicate values are removed but the iterables order is preserved. """ val = [self._slot_value(field, idx) for idx in range(self._next_slot_index)] basetrait = self._interface.inputs.trait(field) if isinstance(basetrait.trait_type, traits.Set): return set(val) elif self._unique: return list(OrderedDict.fromkeys(val)) else: return val def _slot_value(self, field, index): slot_field = self._join_item_field_name(field, index) try: return getattr(self._inputs, slot_field) except AttributeError as e: raise AttributeError("The join node %s does not have a slot field %s" " to hold the %s value at index %d: %s" % (self, slot_field, field, index, e)) class MapNode(Node): """Wraps interface objects that need to be iterated on a list of inputs. Examples -------- >>> from nipype import MapNode, fsl >>> realign = MapNode(fsl.MCFLIRT(), 'in_file', 'realign') >>> realign.inputs.in_file = ['functional.nii', ... 'functional2.nii', ... 'functional3.nii'] >>> realign.run() # doctest: +SKIP """ def __init__(self, interface, iterfield, name, **kwargs): """ Parameters ---------- interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) iterfield : string or list of strings name(s) of input fields that will receive a list of whatever kind of input they take. the node will be run separately for each value in these lists. for more than one input, the values are paired (i.e. it does not compute a combinatorial product). name : alphanumeric string node specific name See Node docstring for additional keyword arguments. """ super(MapNode, self).__init__(interface, name, **kwargs) if isinstance(iterfield, str): iterfield = [iterfield] self.iterfield = iterfield self._inputs = self._create_dynamic_traits(self._interface.inputs, fields=self.iterfield) self._inputs.on_trait_change(self._set_mapnode_input) self._got_inputs = False def _create_dynamic_traits(self, basetraits, fields=None, nitems=None): """Convert specific fields of a trait to accept multiple inputs """ output = DynamicTraitedSpec() if fields is None: fields = basetraits.copyable_trait_names() for name, spec in basetraits.items(): if name in fields and ((nitems is None) or (nitems > 1)): logger.debug('adding multipath trait: %s' % name) output.add_trait(name, InputMultiPath(spec.trait_type)) else: output.add_trait(name, traits.Trait(spec)) setattr(output, name, Undefined) value = getattr(basetraits, name) if isdefined(value): setattr(output, name, value) value = getattr(output, name) return output def set_input(self, parameter, val): """ Set interface input value or nodewrapper attribute Priority goes to interface. """ logger.debug('setting nodelevel(%s) input %s = %s' % (str(self), parameter, str(val))) self._set_mapnode_input(self.inputs, parameter, deepcopy(val)) def _set_mapnode_input(self, object, name, newvalue): logger.debug('setting mapnode(%s) input: %s -> %s' % (str(self), name, str(newvalue))) if name in self.iterfield: setattr(self._inputs, name, newvalue) else: setattr(self._interface.inputs, name, newvalue) def _get_hashval(self): """ Compute hash including iterfield lists.""" if not self._got_inputs: self._get_inputs() self._got_inputs = True self._check_iterfield() hashinputs = deepcopy(self._interface.inputs) for name in self.iterfield: hashinputs.remove_trait(name) hashinputs.add_trait( name, InputMultiPath( self._interface.inputs.traits()[name].trait_type)) logger.debug('setting hashinput %s-> %s' % (name, getattr(self._inputs, name))) setattr(hashinputs, name, getattr(self._inputs, name)) hashed_inputs, hashvalue = hashinputs.get_hashval( hash_method=self.config['execution']['hash_method']) rm_extra = self.config['execution']['remove_unnecessary_outputs'] if str2bool(rm_extra) and self.needed_outputs: hashobject = md5() hashobject.update(hashvalue) sorted_outputs = sorted(self.needed_outputs) hashobject.update(str(sorted_outputs)) hashvalue = hashobject.hexdigest() hashed_inputs['needed_outputs'] = sorted_outputs return hashed_inputs, hashvalue @property def inputs(self): return self._inputs @property def outputs(self): if self._interface._outputs(): return Bunch(self._interface._outputs().get()) else: return None def _make_nodes(self, cwd=None): if cwd is None: cwd = self.output_dir() nitems = len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) for i in range(nitems): nodename = '_' + self.name + str(i) node = Node(deepcopy(self._interface), name=nodename) node.overwrite = self.overwrite node.run_without_submitting = self.run_without_submitting node.plugin_args = self.plugin_args node._interface.inputs.set( **deepcopy(self._interface.inputs.get())) for field in self.iterfield: fieldvals = filename_to_list(getattr(self.inputs, field)) logger.debug('setting input %d %s %s' % (i, field, fieldvals[i])) setattr(node.inputs, field, fieldvals[i]) node.config = self.config node.base_dir = os.path.join(cwd, 'mapflow') yield i, node def _node_runner(self, nodes, updatehash=False): for i, node in nodes: err = None try: node.run(updatehash=updatehash) except Exception, err: if str2bool(self.config['execution']['stop_on_first_crash']): self._result = node.result raise yield i, node, err def _collate_results(self, nodes): self._result = InterfaceResult(interface=[], runtime=[], provenance=[], inputs=[], outputs=self.outputs) returncode = [] for i, node, err in nodes: self._result.runtime.insert(i, None) if node.result: if hasattr(node.result, 'runtime'): self._result.interface.insert(i, node.result.interface) self._result.inputs.insert(i, node.result.inputs) self._result.runtime[i] = node.result.runtime if hasattr(node.result, 'provenance'): self._result.provenance.insert(i, node.result.provenance) returncode.insert(i, err) if self.outputs: for key, _ in self.outputs.items(): rm_extra = (self.config['execution'] ['remove_unnecessary_outputs']) if str2bool(rm_extra) and self.needed_outputs: if key not in self.needed_outputs: continue values = getattr(self._result.outputs, key) if not isdefined(values): values = [] if node.result.outputs: values.insert(i, node.result.outputs.get()[key]) else: values.insert(i, None) defined_vals = [isdefined(val) for val in values] if any(defined_vals) and self._result.outputs: setattr(self._result.outputs, key, values) if returncode and any([code is not None for code in returncode]): msg = [] for i, code in enumerate(returncode): if code is not None: msg += ['Subnode %d failed' % i] msg += ['Error:', str(code)] raise Exception('Subnodes of node: %s failed:\n%s' % (self.name, '\n'.join(msg))) def write_report(self, report_type=None, cwd=None): if not str2bool(self.config['execution']['create_report']): return if report_type == 'preexec': super(MapNode, self).write_report(report_type=report_type, cwd=cwd) if report_type == 'postexec': super(MapNode, self).write_report(report_type=report_type, cwd=cwd) report_dir = os.path.join(cwd, '_report') report_file = os.path.join(report_dir, 'report.rst') fp = open(report_file, 'at') fp.writelines(write_rst_header('Subnode reports', level=1)) nitems = len(filename_to_list( getattr(self.inputs, self.iterfield[0]))) subnode_report_files = [] for i in range(nitems): nodename = '_' + self.name + str(i) subnode_report_files.insert(i, 'subnode %d' % i + ' : ' + os.path.join(cwd, 'mapflow', nodename, '_report', 'report.rst')) fp.writelines(write_rst_list(subnode_report_files)) fp.close() def get_subnodes(self): if not self._got_inputs: self._get_inputs() self._got_inputs = True self._check_iterfield() self.write_report(report_type='preexec', cwd=self.output_dir()) return [node for _, node in self._make_nodes()] def num_subnodes(self): if not self._got_inputs: self._get_inputs() self._got_inputs = True self._check_iterfield() return len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) def _get_inputs(self): old_inputs = self._inputs.get() self._inputs = self._create_dynamic_traits(self._interface.inputs, fields=self.iterfield) self._inputs.set(**old_inputs) super(MapNode, self)._get_inputs() def _check_iterfield(self): """Checks iterfield * iterfield must be in inputs * number of elements must match across iterfield """ for iterfield in self.iterfield: if not isdefined(getattr(self.inputs, iterfield)): raise ValueError(("Input %s was not set but it is listed " "in iterfields.") % iterfield) if len(self.iterfield) > 1: first_len = len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) for iterfield in self.iterfield[1:]: if first_len != len(filename_to_list(getattr(self.inputs, iterfield))): raise ValueError(("All iterfields of a MapNode have to " "have the same length. %s") % str(self.inputs)) def _run_interface(self, execute=True, updatehash=False): """Run the mapnode interface This is primarily intended for serial execution of mapnode. A parallel execution requires creation of new nodes that can be spawned """ old_cwd = os.getcwd() cwd = self.output_dir() os.chdir(cwd) self._check_iterfield() if execute: nitems = len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) nodenames = ['_' + self.name + str(i) for i in range(nitems)] # map-reduce formulation self._collate_results(self._node_runner(self._make_nodes(cwd), updatehash=updatehash)) self._save_results(self._result, cwd) # remove any node directories no longer required dirs2remove = [] for path in glob(os.path.join(cwd, 'mapflow', '*')): if os.path.isdir(path): if path.split(os.path.sep)[-1] not in nodenames: dirs2remove.append(path) for path in dirs2remove: shutil.rmtree(path) else: self._result = self._load_results(cwd) os.chdir(old_cwd) nipype-0.9.2/nipype/pipeline/plugins/000077500000000000000000000000001227300005300176105ustar00rootroot00000000000000nipype-0.9.2/nipype/pipeline/plugins/API.rst000066400000000000000000000002051227300005300207500ustar00rootroot00000000000000Execution plugin API ==================== Current status: class plugin_runner(PluginBase): def run(graph, config, updatehash) nipype-0.9.2/nipype/pipeline/plugins/__init__.py000066400000000000000000000011111227300005300217130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .debug import DebugPlugin from .linear import LinearPlugin from .ipythonx import IPythonXPlugin from .pbs import PBSPlugin from .sge import SGEPlugin from .condor import CondorPlugin from .dagman import CondorDAGManPlugin from .multiproc import MultiProcPlugin from .ipython import IPythonPlugin from .somaflow import SomaFlowPlugin from .pbsgraph import PBSGraphPlugin from .sgegraph import SGEGraphPlugin from .lsf import LSFPlugin from .slurm import SLURMPlugin nipype-0.9.2/nipype/pipeline/plugins/base.py000066400000000000000000000632051227300005300211020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Common graph operations for execution """ from copy import deepcopy from glob import glob import os import pwd import shutil from socket import gethostname import sys from time import strftime, sleep, time from traceback import format_exception, format_exc from warnings import warn import numpy as np import scipy.sparse as ssp from ..utils import (nx, dfs_preorder, topological_sort) from ..engine import (MapNode, str2bool) from nipype.utils.filemanip import savepkl, loadpkl from nipype.interfaces.utility import Function from ... import logging logger = logging.getLogger('workflow') iflogger = logging.getLogger('interface') def report_crash(node, traceback=None, hostname=None): """Writes crash related information to a file """ name = node._id if node.result and hasattr(node.result, 'runtime') and \ node.result.runtime: if isinstance(node.result.runtime, list): host = node.result.runtime[0].hostname else: host = node.result.runtime.hostname else: if hostname: host = hostname else: host = gethostname() message = ['Node %s failed to run on host %s.' % (name, host)] logger.error(message) if not traceback: exc_type, exc_value, exc_traceback = sys.exc_info() traceback = format_exception(exc_type, exc_value, exc_traceback) timeofcrash = strftime('%Y%m%d-%H%M%S') login_name = pwd.getpwuid(os.geteuid())[0] crashfile = 'crash-%s-%s-%s.npz' % (timeofcrash, login_name, name) crashdir = node.config['execution']['crashdump_dir'] if crashdir is None: crashdir = os.getcwd() if not os.path.exists(crashdir): os.makedirs(crashdir) crashfile = os.path.join(crashdir, crashfile) logger.info('Saving crash info to %s' % crashfile) logger.info(''.join(traceback)) np.savez(crashfile, node=node, traceback=traceback) return crashfile def report_nodes_not_run(notrun): """List nodes that crashed with crashfile info Optionally displays dependent nodes that weren't executed as a result of the crash. """ if notrun: logger.info("***********************************") for info in notrun: logger.error("could not run node: %s" % '.'.join((info['node']._hierarchy, info['node']._id))) logger.info("crashfile: %s" % info['crashfile']) logger.debug("The following dependent nodes were not run") for subnode in info['dependents']: logger.debug(subnode._id) logger.info("***********************************") raise RuntimeError(('Workflow did not execute cleanly. ' 'Check log for details')) def create_pyscript(node, updatehash=False, store_exception=True): # pickle node timestamp = strftime('%Y%m%d_%H%M%S') if node._hierarchy: suffix = '%s_%s_%s' % (timestamp, node._hierarchy, node._id) batch_dir = os.path.join(node.base_dir, node._hierarchy.split('.')[0], 'batch') else: suffix = '%s_%s' % (timestamp, node._id) batch_dir = os.path.join(node.base_dir, 'batch') if not os.path.exists(batch_dir): os.makedirs(batch_dir) pkl_file = os.path.join(batch_dir, 'node_%s.pklz' % suffix) savepkl(pkl_file, dict(node=node, updatehash=updatehash)) mpl_backend = node.config["execution"]["matplotlib_backend"] # create python script to load and trap exception cmdstr = """import os import sys try: import matplotlib matplotlib.use('%s') except ImportError: pass from nipype import config, logging from nipype.utils.filemanip import loadpkl, savepkl from socket import gethostname from traceback import format_exception info = None pklfile = '%s' batchdir = '%s' from nipype.utils.filemanip import loadpkl, savepkl try: if not sys.version_info < (2, 7): from collections import OrderedDict config_dict=%s config.update_config(config_dict) config.update_matplotlib() logging.update_logging(config) traceback=None cwd = os.getcwd() info = loadpkl(pklfile) result = info['node'].run(updatehash=info['updatehash']) except Exception, e: etype, eval, etr = sys.exc_info() traceback = format_exception(etype,eval,etr) if info is None or not os.path.exists(info['node'].output_dir()): result = None resultsfile = os.path.join(batchdir, 'crashdump_%s.pklz') else: result = info['node'].result resultsfile = os.path.join(info['node'].output_dir(), 'result_%%s.pklz'%%info['node'].name) """ if store_exception: cmdstr += """ savepkl(resultsfile, dict(result=result, hostname=gethostname(), traceback=traceback)) """ else: cmdstr += """ if info is None: savepkl(resultsfile, dict(result=result, hostname=gethostname(), traceback=traceback)) else: from nipype.pipeline.plugins.base import report_crash report_crash(info['node'], traceback, gethostname()) raise Exception(e) """ cmdstr = cmdstr % (mpl_backend, pkl_file, batch_dir, node.config, suffix) pyscript = os.path.join(batch_dir, 'pyscript_%s.py' % suffix) fp = open(pyscript, 'wt') fp.writelines(cmdstr) fp.close() return pyscript class PluginBase(object): """Base class for plugins""" def __init__(self, plugin_args=None): if plugin_args and 'status_callback' in plugin_args: self._status_callback = plugin_args['status_callback'] else: self._status_callback = None return def run(self, graph, config, updatehash=False): raise NotImplementedError class DistributedPluginBase(PluginBase): """Execute workflow with a distribution engine """ def __init__(self, plugin_args=None): """Initialize runtime attributes to none procs: list (N) of underlying interface elements to be processed proc_done: a boolean vector (N) signifying whether a process has been executed proc_pending: a boolean vector (N) signifying whether a process is currently running. Note: A process is finished only when both proc_done==True and proc_pending==False depidx: a boolean matrix (NxN) storing the dependency structure accross processes. Process dependencies are derived from each column. """ super(DistributedPluginBase, self).__init__(plugin_args=plugin_args) self.procs = None self.depidx = None self.refidx = None self.mapnodes = None self.mapnodesubids = None self.proc_done = None self.proc_pending = None self.max_jobs = np.inf if plugin_args and 'max_jobs' in plugin_args: self.max_jobs = plugin_args['max_jobs'] def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline using distributed approaches """ logger.info("Running in parallel.") self._config = config # Generate appropriate structures for worker-manager model self._generate_dependency_list(graph) self.pending_tasks = [] self.readytorun = [] self.mapnodes = [] self.mapnodesubids = {} # setup polling - TODO: change to threaded model notrun = [] while np.any(self.proc_done == False) | \ np.any(self.proc_pending == True): toappend = [] # trigger callbacks for any pending results while self.pending_tasks: taskid, jobid = self.pending_tasks.pop() try: result = self._get_result(taskid) if result: if result['traceback']: notrun.append(self._clean_queue(jobid, graph, result=result)) else: self._task_finished_cb(jobid) self._remove_node_dirs() self._clear_task(taskid) else: toappend.insert(0, (taskid, jobid)) except Exception: result = {'result': None, 'traceback': format_exc()} notrun.append(self._clean_queue(jobid, graph, result=result)) if toappend: self.pending_tasks.extend(toappend) num_jobs = len(self.pending_tasks) if num_jobs < self.max_jobs: if np.isinf(self.max_jobs): slots = None else: slots = self.max_jobs - num_jobs self._send_procs_to_workers(updatehash=updatehash, slots=slots, graph=graph) sleep(2) self._remove_node_dirs() report_nodes_not_run(notrun) def _get_result(self, taskid): raise NotImplementedError def _submit_job(self, node, updatehash=False): raise NotImplementedError def _report_crash(self, node, result=None): raise NotImplementedError def _clear_task(self, taskid): raise NotImplementedError def _clean_queue(self, jobid, graph, result=None): if str2bool(self._config['execution']['stop_on_first_crash']): raise RuntimeError("".join(result['traceback'])) crashfile = self._report_crash(self.procs[jobid], result=result) if self._status_callback: self._status_callback(self.procs[jobid], 'exception') if jobid in self.mapnodesubids: # remove current jobid self.proc_pending[jobid] = False self.proc_done[jobid] = True # remove parent mapnode jobid = self.mapnodesubids[jobid] self.proc_pending[jobid] = False self.proc_done[jobid] = True # remove dependencies from queue return self._remove_node_deps(jobid, crashfile, graph) def _submit_mapnode(self, jobid): if jobid in self.mapnodes: return True self.mapnodes.append(jobid) mapnodesubids = self.procs[jobid].get_subnodes() numnodes = len(mapnodesubids) logger.info('Adding %d jobs for mapnode %s' % (numnodes, self.procs[jobid]._id)) for i in range(numnodes): self.mapnodesubids[self.depidx.shape[0] + i] = jobid self.procs.extend(mapnodesubids) self.depidx = ssp.vstack((self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))), 'lil') self.depidx = ssp.hstack((self.depidx, ssp.lil_matrix( np.zeros((self.depidx.shape[0], numnodes)))), 'lil') self.depidx[-numnodes:, jobid] = 1 self.proc_done = np.concatenate((self.proc_done, np.zeros(numnodes, dtype=bool))) self.proc_pending = np.concatenate((self.proc_pending, np.zeros(numnodes, dtype=bool))) return False def _send_procs_to_workers(self, updatehash=False, slots=None, graph=None): """ Sends jobs to workers using ipython's taskclient interface """ while np.any(self.proc_done == False): # Check to see if a job is available jobids = np.flatnonzero((self.proc_done == False) & (self.depidx.sum(axis=0) == 0).__array__()) if len(jobids) > 0: # send all available jobs logger.info('Submitting %d jobs' % len(jobids)) for jobid in jobids[:slots]: if isinstance(self.procs[jobid], MapNode): try: num_subnodes = self.procs[jobid].num_subnodes() except Exception: self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue if num_subnodes > 1: submit = self._submit_mapnode(jobid) if not submit: continue # change job status in appropriate queues self.proc_done[jobid] = True self.proc_pending[jobid] = True # Send job to task manager and add to pending tasks logger.info('Executing: %s ID: %d' % (self.procs[jobid]._id, jobid)) if self._status_callback: self._status_callback(self.procs[jobid], 'start') continue_with_submission = True if str2bool(self.procs[jobid].config['execution']['local_hash_check']): logger.debug('checking hash locally') try: hash_exists, _, _, _ = self.procs[ jobid].hash_exists() logger.debug('Hash exists %s' % str(hash_exists)) if (hash_exists and (self.procs[jobid].overwrite == False or (self.procs[jobid].overwrite == None and not self.procs[jobid]._interface.always_run))): continue_with_submission = False self._task_finished_cb(jobid) self._remove_node_dirs() except Exception: self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue_with_submission = False logger.debug('Finished checking hash %s' % str(continue_with_submission)) if continue_with_submission: if self.procs[jobid].run_without_submitting: logger.debug('Running node %s on master thread' % self.procs[jobid]) try: self.procs[jobid].run() except Exception: self._clean_queue(jobid, graph) self._task_finished_cb(jobid) self._remove_node_dirs() else: tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) if tid is None: self.proc_done[jobid] = False self.proc_pending[jobid] = False else: self.pending_tasks.insert(0, (tid, jobid)) else: break def _task_finished_cb(self, jobid): """ Extract outputs and assign to inputs of dependent tasks This is called when a job is completed. """ logger.info('[Job finished] jobname: %s jobid: %d' % (self.procs[jobid]._id, jobid)) if self._status_callback: self._status_callback(self.procs[jobid], 'end') # Update job and worker queues self.proc_pending[jobid] = False # update the job dependency structure rowview = self.depidx.getrowview(jobid) rowview[rowview.nonzero()] = 0 if jobid not in self.mapnodesubids: self.refidx[self.refidx[:, jobid].nonzero()[0], jobid] = 0 def _generate_dependency_list(self, graph): """ Generates a dependency list for a list of graphs. """ self.procs, _ = topological_sort(graph) try: self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs, format='lil') except: self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs) self.refidx = deepcopy(self.depidx) self.refidx.astype = np.int self.proc_done = np.zeros(len(self.procs), dtype=bool) self.proc_pending = np.zeros(len(self.procs), dtype=bool) def _remove_node_deps(self, jobid, crashfile, graph): subnodes = [s for s in dfs_preorder(graph, self.procs[jobid])] for node in subnodes: idx = self.procs.index(node) self.proc_done[idx] = True self.proc_pending[idx] = False return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile) def _remove_node_dirs(self): """Removes directories whose outputs have already been used up """ if str2bool(self._config['execution']['remove_node_directories']): for idx in np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]: if idx in self.mapnodesubids: continue if self.proc_done[idx] and (not self.proc_pending[idx]): self.refidx[idx, idx] = -1 outdir = self.procs[idx]._output_directory() logger.info(('[node dependencies finished] ' 'removing node: %s from directory %s') % (self.procs[idx]._id, outdir)) shutil.rmtree(outdir) class SGELikeBatchManagerBase(DistributedPluginBase): """Execute workflow with SGE/OGE/PBS like batch system """ def __init__(self, template, plugin_args=None): super(SGELikeBatchManagerBase, self).__init__(plugin_args=plugin_args) self._template = template self._qsub_args = None if plugin_args: if 'template' in plugin_args: self._template = plugin_args['template'] if os.path.isfile(self._template): self._template = open(self._template).read() if 'qsub_args' in plugin_args: self._qsub_args = plugin_args['qsub_args'] self._pending = {} def _is_pending(self, taskid): """Check if a task is pending in the batch system """ raise NotImplementedError def _submit_batchtask(self, scriptfile, node): """Submit a task to the batch system """ raise NotImplementedError def _get_result(self, taskid): if taskid not in self._pending: raise Exception('Task %d not found' % taskid) if self._is_pending(taskid): return None node_dir = self._pending[taskid] # MIT HACK # on the pbs system at mit the parent node directory needs to be # accessed before internal directories become available. there # is a disconnect when the queueing engine knows a job is # finished to when the directories become statable. t = time() timeout = float(self._config['execution']['job_finished_timeout']) timed_out = True while (time() - t) < timeout: try: logger.debug(os.listdir(os.path.realpath(os.path.join(node_dir, '..')))) logger.debug(os.listdir(node_dir)) glob(os.path.join(node_dir, 'result_*.pklz')).pop() timed_out = False break except Exception, e: logger.debug(e) sleep(2) if timed_out: result_data = {'hostname': 'unknown', 'result': None, 'traceback': None} results_file = None try: raise IOError(('Job (%s) finished or terminated, but results file ' 'does not exist. Batch dir contains crashdump ' 'file if node raised an exception' % node_dir)) except IOError, e: result_data['traceback'] = format_exc() else: results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] result_data = loadpkl(results_file) result_out = dict(result=None, traceback=None) if isinstance(result_data, dict): result_out['result'] = result_data['result'] result_out['traceback'] = result_data['traceback'] result_out['hostname'] = result_data['hostname'] if results_file: crash_file = os.path.join(node_dir, 'crashstore.pklz') os.rename(results_file, crash_file) else: result_out['result'] = result_data return result_out def _submit_job(self, node, updatehash=False): """submit job and return taskid """ pyscript = create_pyscript(node, updatehash=updatehash) batch_dir, name = os.path.split(pyscript) name = '.'.join(name.split('.')[:-1]) batchscript = '\n'.join((self._template, '%s %s' % (sys.executable, pyscript))) batchscriptfile = os.path.join(batch_dir, 'batchscript_%s.sh' % name) fp = open(batchscriptfile, 'wt') fp.writelines(batchscript) fp.close() return self._submit_batchtask(batchscriptfile, node) def _report_crash(self, node, result=None): if result and result['traceback']: node._result = result['result'] node._traceback = result['traceback'] return report_crash(node, traceback=result['traceback']) else: return report_crash(node) def _clear_task(self, taskid): del self._pending[taskid] class GraphPluginBase(PluginBase): """Base class for plugins that distribute graphs to workflows """ def __init__(self, plugin_args=None): if plugin_args and 'status_callback' in plugin_args: warn('status_callback not supported for Graph submission plugins') super(GraphPluginBase, self).__init__(plugin_args=plugin_args) def run(self, graph, config, updatehash=False): pyfiles = [] dependencies = {} self._config = config nodes = nx.topological_sort(graph) logger.debug('Creating executable python files for each node') for idx, node in enumerate(nodes): pyfiles.append(create_pyscript(node, updatehash=updatehash, store_exception=False)) dependencies[idx] = [nodes.index(prevnode) for prevnode in graph.predecessors(node)] self._submit_graph(pyfiles, dependencies, nodes) def _get_args(self, node, keywords): values = () for keyword in keywords: value = getattr(self, "_" + keyword) if keyword == "template" and os.path.isfile(value): value = open(value).read() if hasattr(node, "plugin_args") and isinstance(node.plugin_args, dict) and keyword in node.plugin_args: if keyword == "template" and os.path.isfile(node.plugin_args[keyword]): tmp_value = open(node.plugin_args[keyword]).read() else: tmp_value = node.plugin_args[keyword] if 'overwrite' in node.plugin_args and node.plugin_args['overwrite']: value = tmp_value else: value += tmp_value values += (value, ) return values def _submit_graph(self, pyfiles, dependencies, nodes): """ pyfiles: list of files corresponding to a topological sort dependencies: dictionary of dependencies based on the toplogical sort """ raise NotImplementedError def _get_result(self, taskid): if taskid not in self._pending: raise Exception('Task %d not found' % taskid) if self._is_pending(taskid): return None node_dir = self._pending[taskid] logger.debug(os.listdir(os.path.realpath(os.path.join(node_dir, '..')))) logger.debug(os.listdir(node_dir)) glob(os.path.join(node_dir, 'result_*.pklz')).pop() results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] result_data = loadpkl(results_file) result_out = dict(result=None, traceback=None) if isinstance(result_data, dict): result_out['result'] = result_data['result'] result_out['traceback'] = result_data['traceback'] result_out['hostname'] = result_data['hostname'] if results_file: crash_file = os.path.join(node_dir, 'crashstore.pklz') os.rename(results_file, crash_file) else: result_out['result'] = result_data return result_out nipype-0.9.2/nipype/pipeline/plugins/condor.py000066400000000000000000000107571227300005300214600ustar00rootroot00000000000000"""Parallel workflow execution via Condor """ import os from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) from nipype.interfaces.base import CommandLine from time import sleep class CondorPlugin(SGELikeBatchManagerBase): """Execute using Condor This plugin doesn't work with a plain stock-Condor installation, but requires a 'qsub' emulation script for Condor, called 'condor_qsub'. This script is shipped with the Condor package from NeuroDebian, or can be downloaded from its Git repository at http://anonscm.debian.org/gitweb/?p=pkg-exppsy/condor.git;a=blob_plain;f=debian/condor_qsub;hb=HEAD The plugin_args input to run can be used to control the Condor execution. Currently supported options are: - template : template to use for batch job submission. This can be an SGE-style script with the (limited) set of options supported by condor_qsub - qsub_args : arguments to be prepended to the job execution script in the qsub call """ def __init__(self, **kwargs): template = """ #$ -V #$ -S /bin/sh """ self._retry_timeout = 2 self._max_tries = 2 if 'plugin_args' in kwargs and kwargs['plugin_args']: if 'retry_timeout' in kwargs['plugin_args']: self._retry_timeout = kwargs['plugin_args']['retry_timeout'] if 'max_tries' in kwargs['plugin_args']: self._max_tries = kwargs['plugin_args']['max_tries'] super(CondorPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): cmd = CommandLine('condor_q', terminal_output='allatonce') cmd.inputs.args = '%d' % taskid # check condor cluster oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName('CRITICAL')) result = cmd.run(ignore_exception=True) iflogger.setLevel(oldlevel) if result.runtime.stdout.count('\n%d' % taskid): return True return False def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('condor_qsub', environ=os.environ.data, terminal_output='allatonce') path = os.path.dirname(scriptfile) qsubargs = '' if self._qsub_args: qsubargs = self._qsub_args if 'qsub_args' in node.plugin_args: if 'overwrite' in node.plugin_args and\ node.plugin_args['overwrite']: qsubargs = node.plugin_args['qsub_args'] else: qsubargs += (" " + node.plugin_args['qsub_args']) if self._qsub_args: qsubargs = self._qsub_args if '-o' not in qsubargs: qsubargs = '%s -o %s' % (qsubargs, path) if '-e' not in qsubargs: qsubargs = '%s -e %s' % (qsubargs, path) if node._hierarchy: jobname = '.'.join((os.environ.data['LOGNAME'], node._hierarchy, node._id)) else: jobname = '.'.join((os.environ.data['LOGNAME'], node._id)) jobnameitems = jobname.split('.') jobnameitems.reverse() jobname = '.'.join(jobnameitems) cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName('CRITICAL')) tries = 0 while True: try: result = cmd.run() except Exception, e: if tries < self._max_tries: tries += 1 sleep(self._retry_timeout) # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) raise RuntimeError('\n'.join((('Could not submit condor ' 'cluster' ' for node %s') % node._id, str(e)))) else: break iflogger.setLevel(oldlevel) # retrieve condor clusterid taskid = int(result.runtime.stdout.split(' ')[2]) self._pending[taskid] = node.output_dir() logger.debug('submitted condor cluster: %d for node %s' % (taskid, node._id)) return taskid nipype-0.9.2/nipype/pipeline/plugins/dagman.py000066400000000000000000000172161227300005300214200ustar00rootroot00000000000000"""Parallel workflow execution via Condor DAGMan """ import os import sys import uuid import time from warnings import warn from .base import (GraphPluginBase, logger) from ...interfaces.base import CommandLine class CondorDAGManPlugin(GraphPluginBase): """Execute using Condor DAGMan The plugin_args input to run can be used to control the DAGMan execution. The value of most arguments can be a literal string or a filename, where in the latter case the content of the file will be used as the argument value. Currently supported options are: - submit_template : submit spec template for individual jobs in a DAG (see CondorDAGManPlugin.default_submit_template for the default. - initial_specs : additional submit specs that are prepended to any job's submit file - override_specs : additional submit specs that are appended to any job's submit file - wrapper_cmd : path to an exectuable that will be started instead of a node script. This is useful for wrapper script that execute certain functionality prior or after a node runs. If this option is given the wrapper command is called with the respective Python exectuable and the path to the node script as final arguments - wrapper_args : optional additional arguments to a wrapper command - dagman_args : arguments to be prepended to the arguments of the condor_submit_dag call - block : if True the plugin call will block until Condor has finished prcoessing the entire workflow (default: False) """ default_submit_template = """ universe = vanilla notification = Never executable = %(executable)s arguments = %(nodescript)s output = %(basename)s.out error = %(basename)s.err log = %(basename)s.log getenv = True """ def _get_str_or_file(self, arg): if os.path.isfile(arg): content = open(arg).read() else: content = arg return content # XXX feature wishlist # - infer data file dependencies from jobs # - infer CPU requirements from jobs # - infer memory requirements from jobs # - looks like right now all jobs come in here, regardless of whether they # actually have to run. would be good to be able to decide whether they # actually have to be scheduled (i.e. output already exist). def __init__(self, **kwargs): for var, id_, val in \ (('_template', 'submit_template', self.default_submit_template), ('_initial_specs', 'template', ''), ('_initial_specs', 'initial_specs', ''), ('_override_specs', 'submit_specs', ''), ('_override_specs', 'override_specs', ''), ('_wrapper_cmd', 'wrapper_cmd', None), ('_wrapper_args', 'wrapper_args', ''), ('_block', 'block', False), ('_dagman_args', 'dagman_args', '')): if 'plugin_args' in kwargs \ and not kwargs['plugin_args'] is None \ and id_ in kwargs['plugin_args']: if id_ == 'wrapper_cmd': val = os.path.abspath(kwargs['plugin_args'][id_]) elif id_ == 'block': val = kwargs['plugin_args'][id_] else: val = self._get_str_or_file(kwargs['plugin_args'][id_]) setattr(self, var, val) # TODO remove after some time if 'plugin_args' in kwargs \ and not kwargs['plugin_args'] is None: plugin_args = kwargs['plugin_args'] if 'template' in plugin_args: warn("the 'template' argument is deprecated, use 'initial_specs' instead") if 'submit_specs' in plugin_args: warn("the 'submit_specs' argument is deprecated, use 'override_specs' instead") super(CondorDAGManPlugin, self).__init__(**kwargs) def _submit_graph(self, pyfiles, dependencies, nodes): # location of all scripts, place dagman output in here too batch_dir, _ = os.path.split(pyfiles[0]) # DAG description filename dagfilename = os.path.join(batch_dir, 'workflow-%s.dag' % uuid.uuid4()) with open(dagfilename, 'wt') as dagfileptr: # loop over all scripts, create submit files, and define them # as jobs in the DAG for idx, pyscript in enumerate(pyfiles): node = nodes[idx] # XXX redundant with previous value? or could it change between # scripts? template, initial_specs, override_specs, wrapper_cmd, wrapper_args = \ self._get_args(node, ["template", "initial_specs", "override_specs", "wrapper_cmd", "wrapper_args"]) # add required slots to the template template = '%s\n%s\n%s\nqueue\n' % ( '%(initial_specs)s', template, '%(override_specs)s') batch_dir, name = os.path.split(pyscript) name = '.'.join(name.split('.')[:-1]) specs = dict( # TODO make parameter for this, initial_specs=initial_specs, executable=sys.executable, nodescript=pyscript, basename=os.path.join(batch_dir, name), override_specs=override_specs ) if not wrapper_cmd is None: specs['executable'] = wrapper_cmd specs['nodescript'] = \ '%s %s %s' % (wrapper_args % specs, # give access to variables sys.executable, pyscript) submitspec = template % specs # write submit spec for this job submitfile = os.path.join(batch_dir, '%s.submit' % name) with open(submitfile, 'wt') as submitfileprt: submitfileprt.writelines(submitspec) submitfileprt.close() # define job in DAG dagfileptr.write('JOB %i %s\n' % (idx, submitfile)) # define dependencies in DAG for child in dependencies: parents = dependencies[child] if len(parents): dagfileptr.write('PARENT %s CHILD %i\n' % (' '.join([str(i) for i in parents]), child)) # hand over DAG to condor_dagman cmd = CommandLine('condor_submit_dag', environ=os.environ.data, terminal_output='allatonce') # needs -update_submit or re-running a workflow will fail cmd.inputs.args = '%s -update_submit %s' % (self._dagman_args, dagfilename) cmd.run() logger.info('submitted all jobs to Condor DAGMan') if self._block: # wait for DAGMan to settle down, no time wasted it is already running time.sleep(10) if not os.path.exists('%s.condor.sub' % dagfilename): raise EnvironmentError("DAGMan did not create its submit file, please check the logs") # wait for completion logger.info('waiting for DAGMan to finish') lockfilename = '%s.lock' % dagfilename while os.path.exists(lockfilename): time.sleep(5) nipype-0.9.2/nipype/pipeline/plugins/debug.py000066400000000000000000000021621227300005300212510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Debug plugin """ from .base import (PluginBase, logger) from ..utils import (nx) class DebugPlugin(PluginBase): """Execute workflow in series """ def __init__(self, plugin_args=None): super(DebugPlugin, self).__init__(plugin_args=plugin_args) if plugin_args and "callable" in plugin_args and \ hasattr(plugin_args['callable'], '__call__'): self._callable = plugin_args['callable'] else: raise ValueError('plugin_args must contain a callable function') def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline in a serial order. Parameters ---------- graph : networkx digraph defines order of execution """ if not isinstance(graph, nx.DiGraph): raise ValueError('Input must be a networkx digraph object') logger.info("Executing debug plugin") for node in nx.topological_sort(graph): self._callable(node, graph) nipype-0.9.2/nipype/pipeline/plugins/ipython.py000066400000000000000000000074761227300005300216720ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Parallel workflow execution via IPython controller """ from cPickle import dumps import sys IPython_not_loaded = False try: from IPython import __version__ as IPyversion from IPython.parallel.error import TimeoutError except: IPython_not_loaded = True from .base import (DistributedPluginBase, logger, report_crash) def execute_task(pckld_task, node_config, updatehash): from socket import gethostname from traceback import format_exc from nipype import config, logging traceback=None result=None try: config.update_config(node_config) logging.update_logging(config) from cPickle import loads task = loads(pckld_task) result = task.run(updatehash=updatehash) except: traceback = format_exc() result = task.result return result, traceback, gethostname() class IPythonPlugin(DistributedPluginBase): """Execute workflow with ipython """ def __init__(self, plugin_args=None): if IPython_not_loaded: raise ImportError('IPython parallel could not be imported') super(IPythonPlugin, self).__init__(plugin_args=plugin_args) self.iparallel = None self.taskclient = None self.taskmap = {} self._taskid = 0 def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline is distributed approaches based on IPython's parallel processing interface """ # retrieve clients again try: name = 'IPython.parallel' __import__(name) self.iparallel = sys.modules[name] except ImportError: raise ImportError("Ipython kernel not found. Parallel execution " \ "will be unavailable") try: self.taskclient = self.iparallel.Client() except Exception, e: if isinstance(e, TimeoutError): raise Exception("No IPython clients found.") if isinstance(e, ValueError): raise Exception("Ipython kernel not installed") raise e return super(IPythonPlugin, self).run(graph, config, updatehash=updatehash) def _get_result(self, taskid): if taskid not in self.taskmap: raise ValueError('Task %d not in pending list'%taskid) if self.taskmap[taskid].ready(): result, traceback, hostname = self.taskmap[taskid].get() result_out = dict(result=None, traceback=None) result_out['result'] = result result_out['traceback'] = traceback result_out['hostname'] = hostname return result_out else: return None def _submit_job(self, node, updatehash=False): pckld_node = dumps(node, 2) result_object = self.taskclient.load_balanced_view().apply(execute_task, pckld_node, node.config, updatehash) self._taskid += 1 self.taskmap[self._taskid] = result_object return self._taskid def _report_crash(self, node, result=None): if result and result['traceback']: node._result = result['result'] node._traceback = result['traceback'] return report_crash(node, traceback=result['traceback']) else: return report_crash(node) def _clear_task(self, taskid): if IPyversion >= '0.11': logger.debug("Clearing id: %d"%taskid) self.taskclient.purge_results(self.taskmap[taskid]) del self.taskmap[taskid] nipype-0.9.2/nipype/pipeline/plugins/ipythonx.py000066400000000000000000000056351227300005300220550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Parallel workflow execution via IPython controller """ import sys IPython_not_loaded = False try: from IPython import __version__ as IPyversion from IPython.kernel.contexts import ConnectionRefusedError except: IPython_not_loaded = True from .base import (DistributedPluginBase, logger, report_crash) class IPythonXPlugin(DistributedPluginBase): """Execute workflow with ipython """ def __init__(self, plugin_args=None): if IPython_not_loaded: raise ImportError('IPython parallel could not be imported') super(IPythonXPlugin, self).__init__(plugin_args=plugin_args) self.ipyclient = None self.taskclient = None def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline is distributed approaches based on IPython's parallel processing interface """ # retrieve clients again try: name = 'IPython.kernel.client' __import__(name) self.ipyclient = sys.modules[name] except ImportError: raise ImportError("Ipython kernel not found. Parallel execution " \ "will be unavailable") try: self.taskclient = self.ipyclient.TaskClient() except Exception, e: if isinstance(e, ConnectionRefusedError): raise Exception("No IPython clients found.") if isinstance(e, ValueError): raise Exception("Ipython kernel not installed") return super(IPythonXPlugin, self).run(graph, config, updatehash=updatehash) def _get_result(self, taskid): return self.taskclient.get_task_result(taskid, block=False) def _submit_job(self, node, updatehash=False): cmdstr = """import sys from traceback import format_exception traceback=None result=None try: result = task.run(updatehash=updatehash) except: etype, eval, etr = sys.exc_info() traceback = format_exception(etype,eval,etr) result = task.result """ task = self.ipyclient.StringTask(cmdstr, push = dict(task=node, updatehash=updatehash), pull = ['result','traceback']) return self.taskclient.run(task, block = False) def _report_crash(self, node, result=None): if result and result['traceback']: node._result = result['result'] node._traceback = result['traceback'] return report_crash(node, traceback=result['traceback']) else: return report_crash(node) def _clear_task(self, taskid): if IPyversion >= '0.10.1': logger.debug("Clearing id: %d"%taskid) self.taskclient.clear(taskid) nipype-0.9.2/nipype/pipeline/plugins/linear.py000066400000000000000000000037531227300005300214440ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Local serial workflow execution """ import os from .base import (PluginBase, logger, report_crash, report_nodes_not_run, str2bool) from ..utils import (nx, dfs_preorder, topological_sort) class LinearPlugin(PluginBase): """Execute workflow in series """ def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline in a serial order. Parameters ---------- graph : networkx digraph defines order of execution """ if not isinstance(graph, nx.DiGraph): raise ValueError('Input must be a networkx digraph object') logger.info("Running serially.") old_wd = os.getcwd() notrun = [] donotrun = [] nodes, _ = topological_sort(graph) for node in nodes: try: if node in donotrun: continue if self._status_callback: self._status_callback(node, 'start') node.run(updatehash=updatehash) if self._status_callback: self._status_callback(node, 'end') except: os.chdir(old_wd) if str2bool(config['execution']['stop_on_first_crash']): raise # bare except, but i really don't know where a # node might fail crashfile = report_crash(node) # remove dependencies from queue subnodes = [s for s in dfs_preorder(graph, node)] notrun.append(dict(node = node, dependents = subnodes, crashfile = crashfile)) donotrun.extend(subnodes) if self._status_callback: self._status_callback(node, 'exception') report_nodes_not_run(notrun) nipype-0.9.2/nipype/pipeline/plugins/lsf.py000066400000000000000000000113221227300005300207450ustar00rootroot00000000000000"""Parallel workflow execution via LSF """ import os from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) from nipype.interfaces.base import CommandLine from time import sleep import re class LSFPlugin(SGELikeBatchManagerBase): """Execute using LSF Cluster Submission The plugin_args input to run can be used to control the LSF execution. Currently supported options are: - template : template to use for batch job submission - bsub_args : arguments to be prepended to the job execution script in the bsub call """ def __init__(self, **kwargs): template = """ #$ -S /bin/sh """ self._retry_timeout = 2 self._max_tries = 2 self._bsub_args = '' if 'plugin_args' in kwargs and kwargs['plugin_args']: if 'retry_timeout' in kwargs['plugin_args']: self._retry_timeout = kwargs['plugin_args']['retry_timeout'] if 'max_tries' in kwargs['plugin_args']: self._max_tries = kwargs['plugin_args']['max_tries'] if 'bsub_args' in kwargs['plugin_args']: self._bsub_args = kwargs['plugin_args']['bsub_args'] super(LSFPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): """LSF lists a status of 'PEND' when a job has been submitted but is waiting to be picked up, and 'RUN' when it is actively being processed. But _is_pending should return True until a job has finished and is ready to be checked for completeness. So return True if status is either 'PEND' or 'RUN'""" cmd = CommandLine('bjobs', terminal_output='allatonce') cmd.inputs.args = '%d' % taskid # check lsf task oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName('CRITICAL')) result = cmd.run(ignore_exception=True) iflogger.setLevel(oldlevel) # logger.debug(result.runtime.stdout) if 'DONE' in result.runtime.stdout or 'EXIT' in result.runtime.stdout: return False else: return True def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('bsub', environ=os.environ.data, terminal_output='allatonce') path = os.path.dirname(scriptfile) bsubargs = '' if self._bsub_args: bsubargs = self._bsub_args if 'bsub_args' in node.plugin_args: if 'overwrite' in node.plugin_args and\ node.plugin_args['overwrite']: bsubargs = node.plugin_args['bsub_args'] else: bsubargs += (" " + node.plugin_args['bsub_args']) if '-o' not in bsubargs: # -o outfile bsubargs = '%s -o %s' % (bsubargs, scriptfile + ".log") if '-e' not in bsubargs: bsubargs = '%s -e %s' % (bsubargs, scriptfile + ".log") # -e error file if node._hierarchy: jobname = '.'.join((os.environ.data['LOGNAME'], node._hierarchy, node._id)) else: jobname = '.'.join((os.environ.data['LOGNAME'], node._id)) jobnameitems = jobname.split('.') jobnameitems.reverse() jobname = '.'.join(jobnameitems) cmd.inputs.args = '%s -J %s sh %s' % (bsubargs, jobname, scriptfile) # -J job_name_spec logger.debug('bsub ' + cmd.inputs.args) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName('CRITICAL')) tries = 0 while True: try: result = cmd.run() except Exception, e: if tries < self._max_tries: tries += 1 sleep( self._retry_timeout) # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) raise RuntimeError('\n'.join((('Could not submit lsf task' ' for node %s') % node._id, str(e)))) else: break iflogger.setLevel(oldlevel) # retrieve lsf taskid match = re.search('<(\d*)>', result.runtime.stdout) if match: taskid = int(match.groups()[0]) else: raise ScriptError("Can't parse submission job output id: %s" % result.runtime.stdout) self._pending[taskid] = node.output_dir() logger.debug('submitted lsf task: %d for node %s' % (taskid, node._id)) return taskid nipype-0.9.2/nipype/pipeline/plugins/multiproc.py000066400000000000000000000063251227300005300222060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Parallel workflow execution via multiprocessing Support for child processes running as non-daemons based on http://stackoverflow.com/a/8963618/1183453 """ from multiprocessing import Process, Pool, cpu_count, pool from traceback import format_exception import sys from .base import (DistributedPluginBase, report_crash) def run_node(node, updatehash): result = dict(result=None, traceback=None) try: result['result'] = node.run(updatehash=updatehash) except: etype, eval, etr = sys.exc_info() result['traceback'] = format_exception(etype,eval,etr) result['result'] = node.result return result class NonDaemonProcess(Process): """A non-daemon process to support internal multiprocessing. """ def _get_daemon(self): return False def _set_daemon(self, value): pass daemon = property(_get_daemon, _set_daemon) class NonDaemonPool(pool.Pool): """A process pool with non-daemon processes. """ Process = NonDaemonProcess class MultiProcPlugin(DistributedPluginBase): """Execute workflow with multiprocessing The plugin_args input to run can be used to control the multiprocessing execution. Currently supported options are: - n_procs : number of processes to use - non_daemon : boolean flag to execute as non-daemon processes """ def __init__(self, plugin_args=None): super(MultiProcPlugin, self).__init__(plugin_args=plugin_args) self._taskresult = {} self._taskid = 0 non_daemon = True n_procs = cpu_count() if plugin_args: if 'n_procs' in plugin_args: n_procs = plugin_args['n_procs'] if 'non_daemon' in plugin_args: non_daemon = plugin_args['non_daemon'] if non_daemon: # run the execution using the non-daemon pool subclass self.pool = NonDaemonPool(processes=n_procs) else: self.pool = Pool(processes=n_procs) def _get_result(self, taskid): if taskid not in self._taskresult: raise RuntimeError('Multiproc task %d not found'%taskid) if not self._taskresult[taskid].ready(): return None return self._taskresult[taskid].get() def _submit_job(self, node, updatehash=False): self._taskid += 1 try: if node.inputs.terminal_output == 'stream': node.inputs.terminal_output = 'allatonce' except: pass self._taskresult[self._taskid] = self.pool.apply_async(run_node, (node, updatehash,)) return self._taskid def _report_crash(self, node, result=None): if result and result['traceback']: node._result = result['result'] node._traceback = result['traceback'] return report_crash(node, traceback=result['traceback']) else: return report_crash(node) def _clear_task(self, taskid): del self._taskresult[taskid] nipype-0.9.2/nipype/pipeline/plugins/pbs.py000066400000000000000000000100511227300005300207430ustar00rootroot00000000000000"""Parallel workflow execution via PBS/Torque """ import os from time import sleep import subprocess from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) from nipype.interfaces.base import CommandLine class PBSPlugin(SGELikeBatchManagerBase): """Execute using PBS/Torque The plugin_args input to run can be used to control the SGE execution. Currently supported options are: - template : template to use for batch job submission - qsub_args : arguments to be prepended to the job execution script in the qsub call - max_jobname_len: maximum length of the job name. Default 15. """ # Addtional class variables _max_jobname_len = 15 def __init__(self, **kwargs): template = """ #PBS -V """ self._retry_timeout = 2 self._max_tries = 2 self._max_jobname_length = 15 if 'plugin_args' in kwargs and kwargs['plugin_args']: if 'retry_timeout' in kwargs['plugin_args']: self._retry_timeout = kwargs['plugin_args']['retry_timeout'] if 'max_tries' in kwargs['plugin_args']: self._max_tries = kwargs['plugin_args']['max_tries'] if 'max_jobname_len' in kwargs['plugin_args']: self._max_jobname_len = kwargs['plugin_args']['max_jobname_len'] super(PBSPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): # subprocess.Popen requires taskid to be a string proc = subprocess.Popen(["qstat", str(taskid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, e = proc.communicate() errmsg = 'Unknown Job Id' # %s' % taskid return errmsg not in e def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('qsub', environ=os.environ.data, terminal_output='allatonce') path = os.path.dirname(scriptfile) qsubargs = '' if self._qsub_args: qsubargs = self._qsub_args if 'qsub_args' in node.plugin_args: if 'overwrite' in node.plugin_args and \ node.plugin_args['overwrite']: qsubargs = node.plugin_args['qsub_args'] else: qsubargs += (" " + node.plugin_args['qsub_args']) if '-o' not in qsubargs: qsubargs = '%s -o %s' % (qsubargs, path) if '-e' not in qsubargs: qsubargs = '%s -e %s' % (qsubargs, path) if node._hierarchy: jobname = '.'.join((os.environ.data['LOGNAME'], node._hierarchy, node._id)) else: jobname = '.'.join((os.environ.data['LOGNAME'], node._id)) jobnameitems = jobname.split('.') jobnameitems.reverse() jobname = '.'.join(jobnameitems) jobname = jobname[0:self._max_jobname_len] cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName('CRITICAL')) tries = 0 while True: try: result = cmd.run() except Exception, e: if tries < self._max_tries: tries += 1 sleep(self._retry_timeout) # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) raise RuntimeError('\n'.join((('Could not submit pbs task' ' for node %s') % node._id, str(e)))) else: break iflogger.setLevel(oldlevel) # retrieve pbs taskid taskid = result.runtime.stdout.split('.')[0] self._pending[taskid] = node.output_dir() logger.debug('submitted pbs task: %s for node %s' % (taskid, node._id)) return taskid nipype-0.9.2/nipype/pipeline/plugins/pbsgraph.py000066400000000000000000000044201227300005300217700ustar00rootroot00000000000000"""Parallel workflow execution via PBS/Torque """ import os import sys from .base import (GraphPluginBase, logger) from ...interfaces.base import CommandLine from .sgegraph import SGEGraphPlugin class PBSGraphPlugin(SGEGraphPlugin): """Execute using PBS/Torque The plugin_args input to run can be used to control the SGE execution. Currently supported options are: - template : template to use for batch job submission - qsub_args : arguments to be prepended to the job execution script in the qsub call """ _template = """ #PBS -V """ def _submit_graph(self, pyfiles, dependencies, nodes): batch_dir, _ = os.path.split(pyfiles[0]) submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') with open(submitjobsfile, 'wt') as fp: fp.writelines('#!/usr/bin/env sh\n') for idx, pyscript in enumerate(pyfiles): node = nodes[idx] template, qsub_args = self._get_args( node, ["template", "qsub_args"]) batch_dir, name = os.path.split(pyscript) name = '.'.join(name.split('.')[:-1]) batchscript = '\n'.join((template, '%s %s' % (sys.executable, pyscript))) batchscriptfile = os.path.join(batch_dir, 'batchscript_%s.sh' % name) with open(batchscriptfile, 'wt') as batchfp: batchfp.writelines(batchscript) batchfp.close() deps = '' if idx in dependencies: values = ['$job%05d' % jobid for jobid in dependencies[idx]] if len(values): deps = '-W depend=afterok:%s' % ':'.join(values) fp.writelines('job%05d=`qsub %s %s %s`\n' % (idx, deps, qsub_args, batchscriptfile)) cmd = CommandLine('sh', environ=os.environ.data, terminal_output='allatonce') cmd.inputs.args = '%s' % submitjobsfile cmd.run() logger.info('submitted all jobs to queue') nipype-0.9.2/nipype/pipeline/plugins/setup.py000066400000000000000000000007141227300005300213240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('plugins', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/pipeline/plugins/sge.py000066400000000000000000000103171227300005300207420ustar00rootroot00000000000000"""Parallel workflow execution via SGE """ import os import re import subprocess from time import sleep from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) from nipype.interfaces.base import CommandLine def qsubSanitizeJobName(testjobname): """ Ensure that qsub job names must begin with a letter. Numbers and punctuation are not allowed. >>> qsubSanitizeJobName('01') 'J01' >>> qsubSanitizeJobName('a01') 'a01' """ if testjobname[0].isalpha(): return testjobname else: return 'J'+testjobname class SGEPlugin(SGELikeBatchManagerBase): """Execute using SGE (OGE not tested) The plugin_args input to run can be used to control the SGE execution. Currently supported options are: - template : template to use for batch job submission - qsub_args : arguments to be prepended to the job execution script in the qsub call """ def __init__(self, **kwargs): template = """ #$ -V #$ -S /bin/sh """ self._retry_timeout = 2 self._max_tries = 2 if 'plugin_args' in kwargs and kwargs['plugin_args']: if 'retry_timeout' in kwargs['plugin_args']: self._retry_timeout = kwargs['plugin_args']['retry_timeout'] if 'max_tries' in kwargs['plugin_args']: self._max_tries = kwargs['plugin_args']['max_tries'] super(SGEPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): # subprocess.Popen requires taskid to be a string proc = subprocess.Popen(["qstat", '-j', str(taskid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) o, _ = proc.communicate() return o.startswith('=') def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('qsub', environ=os.environ.data, terminal_output='allatonce') path = os.path.dirname(scriptfile) qsubargs = '' if self._qsub_args: qsubargs = self._qsub_args if 'qsub_args' in node.plugin_args: if 'overwrite' in node.plugin_args and\ node.plugin_args['overwrite']: qsubargs = node.plugin_args['qsub_args'] else: qsubargs += (" " + node.plugin_args['qsub_args']) if '-o' not in qsubargs: qsubargs = '%s -o %s' % (qsubargs, path) if '-e' not in qsubargs: qsubargs = '%s -e %s' % (qsubargs, path) if node._hierarchy: jobname = '.'.join((os.environ.data['LOGNAME'], node._hierarchy, node._id)) else: jobname = '.'.join((os.environ.data['LOGNAME'], node._id)) jobnameitems = jobname.split('.') jobnameitems.reverse() jobname = '.'.join(jobnameitems) jobname = qsubSanitizeJobName(jobname) cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName('CRITICAL')) tries = 0 while True: try: result = cmd.run() except Exception, e: if tries < self._max_tries: tries += 1 sleep(self._retry_timeout) # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) raise RuntimeError('\n'.join((('Could not submit sge task' ' for node %s') % node._id, str(e)))) else: break iflogger.setLevel(oldlevel) # retrieve sge taskid lines = [line for line in result.runtime.stdout.split('\n') if line] taskid = int(re.match("Your job ([0-9]*) .* has been submitted", lines[-1]).groups()[0]) self._pending[taskid] = node.output_dir() logger.debug('submitted sge task: %d for node %s' % (taskid, node._id)) return taskid nipype-0.9.2/nipype/pipeline/plugins/sgegraph.py000066400000000000000000000072731227300005300217730ustar00rootroot00000000000000"""Parallel workflow execution via SGE """ import os import sys from .base import (GraphPluginBase, logger) from ...interfaces.base import CommandLine class SGEGraphPlugin(GraphPluginBase): """Execute using SGE The plugin_args input to run can be used to control the SGE execution. Currently supported options are: - template : template to use for batch job submission - qsub_args : arguments to be prepended to the job execution script in the qsub call """ _template = """ #!/bin/bash #$ -V #$ -S /bin/bash """ def __init__(self, **kwargs): self._qsub_args = '' if 'plugin_args' in kwargs: plugin_args = kwargs['plugin_args'] if 'template' in plugin_args: self._template = plugin_args['template'] if os.path.isfile(self._template): self._template = open(self._template).read() if 'qsub_args' in plugin_args: self._qsub_args = plugin_args['qsub_args'] super(SGEGraphPlugin, self).__init__(**kwargs) def _submit_graph(self, pyfiles, dependencies, nodes): batch_dir, _ = os.path.split(pyfiles[0]) submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') with open(submitjobsfile, 'wt') as fp: fp.writelines('#!/usr/bin/env bash\n') for idx, pyscript in enumerate(pyfiles): node = nodes[idx] template, qsub_args = self._get_args( node, ["template", "qsub_args"]) batch_dir, name = os.path.split(pyscript) name = '.'.join(name.split('.')[:-1]) batchscript = '\n'.join((template, '%s %s' % (sys.executable, pyscript))) batchscriptfile = os.path.join(batch_dir, 'batchscript_%s.sh' % name) batchscriptoutfile = batchscriptfile + '.o' batchscripterrfile = batchscriptfile + '.e' with open(batchscriptfile, 'wt') as batchfp: batchfp.writelines(batchscript) batchfp.close() deps = '' if idx in dependencies: values = ' ' for jobid in dependencies[idx]: values += 'job%05d,' % jobid if 'job' in values: values = values.rstrip(',') deps = '-hold_jid%s' % values jobname = 'job%05d' % (idx) ## Do not use default output locations if they are set in self._qsub_args stderrFile = '' if self._qsub_args.count('-e ') == 0: stderrFile = '-e {errFile}'.format( errFile=batchscripterrfile) stdoutFile = '' if self._qsub_args.count('-o ') == 0: stdoutFile = '-o {outFile}'.format( outFile=batchscriptoutfile) full_line = '{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript})\n'.format( jobNm=jobname, outFileOption=stdoutFile, errFileOption=stderrFile, extraQSubArgs=qsub_args, dependantIndex=deps, batchscript=batchscriptfile) fp.writelines(full_line) cmd = CommandLine('bash', environ=os.environ.data, terminal_output='allatonce') cmd.inputs.args = '%s' % submitjobsfile cmd.run() logger.info('submitted all jobs to queue') nipype-0.9.2/nipype/pipeline/plugins/slurm.py000066400000000000000000000115671227300005300213360ustar00rootroot00000000000000''' Created on Aug 2, 2013 @author: chadcumba Parallel workflow execution with SLURM ''' import os import re import subprocess from time import sleep from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) from nipype.interfaces.base import CommandLine class SLURMPlugin(SGELikeBatchManagerBase): ''' Execute using SLURM The plugin_args input to run can be used to control the SLURM execution. Currently supported options are: - template : template to use for batch job submission - sbatch_args: arguments to pass prepend to the sbatch call ''' def __init__(self, **kwargs): template="#!/bin/bash" self._retry_timeout = 2 self._max_tries = 2 self._template = template self._sbatch_args = None if 'plugin_args' in kwargs and kwargs['plugin_args']: if 'retry_timeout' in kwargs['plugin_args']: self._retry_timeout = kwargs['plugin_args']['retry_timeout'] if 'max_tries' in kwargs['plugin_args']: self._max_tries = kwargs['plugin_args']['max_tries'] if 'template' in kwargs['plugin_args']: self._template = kwargs['plugin_args']['template'] if os.path.isfile(self._template): self._template = open(self._template).read() if 'sbatch_args' in kwargs['plugin_args']: self._sbatch_args = kwargs['plugin_args']['sbatch_args'] self._pending = {} super(SLURMPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): # subprocess.Popen requires taskid to be a string proc = subprocess.Popen(["showq", '-u'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) o, _ = proc.communicate() return o.find(str(taskid)) > -1 def _submit_batchtask(self, scriptfile, node): """ This is more or less the _submit_batchtask from sge.py with flipped variable names, different command line switches, and different output formatting/processing """ cmd = CommandLine('sbatch', environ=os.environ.data, terminal_output='allatonce') path = os.path.dirname(scriptfile) sbatch_args = '' if self._sbatch_args: sbatch_args = self._sbatch_args if 'sbatch_args' in node.plugin_args: if 'overwrite' in node.plugin_args and\ node.plugin_args['overwrite']: sbatch_args = node.plugin_args['sbatch_args'] else: sbatch_args += (" " + node.plugin_args['sbatch_args']) if '-o' not in sbatch_args: sbatch_args = '%s -o %s' % (sbatch_args, os.path.join(path, 'slurm-%j.out')) if '-e' not in sbatch_args: sbatch_args = '%s -e %s' % (sbatch_args, os.path.join(path, 'slurm-%j.out')) if '-p' not in sbatch_args: sbatch_args = '%s -p normal' % (sbatch_args) if '-n' not in sbatch_args: sbatch_args = '%s -n 16' % (sbatch_args) if '-t' not in sbatch_args: sbatch_args = '%s -t 1:00:00' % (sbatch_args) if node._hierarchy: jobname = '.'.join((os.environ.data['LOGNAME'], node._hierarchy, node._id)) else: jobname = '.'.join((os.environ.data['LOGNAME'], node._id)) jobnameitems = jobname.split('.') jobnameitems.reverse() jobname = '.'.join(jobnameitems) cmd.inputs.args = '%s -J %s %s' % (sbatch_args, jobname, scriptfile) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName('CRITICAL')) tries = 0 while True: try: result = cmd.run() except Exception, e: if tries < self._max_tries: tries += 1 sleep(self._retry_timeout) # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) raise RuntimeError('\n'.join((('Could not submit sbatch task' ' for node %s') % node._id, str(e)))) else: break logger.debug('Ran command ({0})'.format(cmd.cmdline)) iflogger.setLevel(oldlevel) # retrieve taskid lines = [line for line in result.runtime.stdout.split('\n') if line] taskid = int(re.match("Submitted batch job ([0-9]*)", lines[-1]).groups()[0]) self._pending[taskid] = node.output_dir() logger.debug('submitted sbatch task: %d for node %s' % (taskid, node._id)) return taskid nipype-0.9.2/nipype/pipeline/plugins/somaflow.py000066400000000000000000000025451227300005300220170ustar00rootroot00000000000000"""Parallel workflow execution via PBS/Torque """ import os import sys soma_not_loaded = False try: from soma.workflow.client import (Job, Workflow, WorkflowController, Helper) except: soma_not_loaded = True from .base import (GraphPluginBase, logger) class SomaFlowPlugin(GraphPluginBase): """Execute using Soma workflow """ def __init__(self, plugin_args=None): if soma_not_loaded: raise ImportError('SomaFlow could not be imported') super(SomaFlowPlugin, self).__init__(plugin_args=plugin_args) def _submit_graph(self, pyfiles, dependencies, nodes): jobs = [] soma_deps = [] for idx, fname in enumerate(pyfiles): name = os.path.splitext(os.path.split(fname)[1])[0] jobs.append(Job(command=[sys.executable, fname], name=name)) for key, values in dependencies.items(): for val in values: soma_deps.append((jobs[val], jobs[key])) wf = Workflow(jobs, soma_deps) logger.info('serializing workflow') Helper.serialize('workflow', wf) controller = WorkflowController() logger.info('submitting workflow') wf_id = controller.submit_workflow(wf) Helper.wait_workflow(wf_id, controller) nipype-0.9.2/nipype/pipeline/plugins/tests/000077500000000000000000000000001227300005300207525ustar00rootroot00000000000000nipype-0.9.2/nipype/pipeline/plugins/tests/__init__.py000066400000000000000000000001621227300005300230620ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: nipype-0.9.2/nipype/pipeline/plugins/tests/test_base.py000066400000000000000000000022001227300005300232670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for the engine module """ import numpy as np import scipy.sparse as ssp from nipype.testing import (assert_raises, assert_equal, assert_true, assert_false, skipif) import nipype.pipeline.plugins.base as pb def test_scipy_sparse(): foo = ssp.lil_matrix(np.eye(3, k=1)) goo = foo.getrowview(0) goo[goo.nonzero()] = 0 yield assert_equal, foo[0,1], 0 ''' Can use the following code to test that a mapnode crash continues successfully Need to put this into a nose-test with a timeout import nipype.interfaces.utility as niu import nipype.pipeline.engine as pe wf = pe.Workflow(name='test') def func(arg1): if arg1 == 2: raise Exception('arg cannot be ' + str(arg1)) return arg1 funkynode = pe.MapNode(niu.Function(function=func, input_names=['arg1'], output_names=['out']), iterfield=['arg1'], name = 'functor') funkynode.inputs.arg1 = [1,2] wf.add_nodes([funkynode]) wf.base_dir = '/tmp' wf.run(plugin='MultiProc') '''nipype-0.9.2/nipype/pipeline/plugins/tests/test_callback.py000066400000000000000000000063411227300005300241230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for workflow callbacks """ from tempfile import mkdtemp from shutil import rmtree from nipype.testing import assert_equal import nipype.interfaces.utility as niu import nipype.pipeline.engine as pe def func(): return def bad_func(): raise Exception class Status: def __init__(self): self.statuses = [] def callback(self, node, status): self.statuses.append((node, status)) def test_callback_normal(): so = Status() wf = pe.Workflow(name='test', base_dir=mkdtemp()) f_node = pe.Node(niu.Function(function=func, input_names=[], output_names=[]), name='f_node') wf.add_nodes([f_node]) wf.config['execution'] = {'crashdump_dir': wf.base_dir} wf.run(plugin="Linear", plugin_args={'status_callback': so.callback}) assert_equal(len(so.statuses), 2) for (n, s) in so.statuses: yield assert_equal, n.name, 'f_node' yield assert_equal, so.statuses[0][1], 'start' yield assert_equal, so.statuses[1][1], 'end' rmtree(wf.base_dir) def test_callback_exception(): so = Status() wf = pe.Workflow(name='test', base_dir=mkdtemp()) f_node = pe.Node(niu.Function(function=bad_func, input_names=[], output_names=[]), name='f_node') wf.add_nodes([f_node]) wf.config['execution'] = {'crashdump_dir': wf.base_dir} try: wf.run(plugin="Linear", plugin_args={'status_callback': so.callback}) except: pass assert_equal(len(so.statuses), 2) for (n, s) in so.statuses: yield assert_equal, n.name, 'f_node' yield assert_equal, so.statuses[0][1], 'start' yield assert_equal, so.statuses[1][1], 'exception' rmtree(wf.base_dir) def test_callback_multiproc_normal(): so = Status() wf = pe.Workflow(name='test', base_dir=mkdtemp()) f_node = pe.Node(niu.Function(function=func, input_names=[], output_names=[]), name='f_node') wf.add_nodes([f_node]) wf.config['execution'] = {'crashdump_dir': wf.base_dir} wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) assert_equal(len(so.statuses), 2) for (n, s) in so.statuses: yield assert_equal, n.name, 'f_node' yield assert_equal, so.statuses[0][1], 'start' yield assert_equal, so.statuses[1][1], 'end' rmtree(wf.base_dir) def test_callback_multiproc_exception(): so = Status() wf = pe.Workflow(name='test', base_dir=mkdtemp()) f_node = pe.Node(niu.Function(function=bad_func, input_names=[], output_names=[]), name='f_node') wf.add_nodes([f_node]) wf.config['execution'] = {'crashdump_dir': wf.base_dir} try: wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) except: pass assert_equal(len(so.statuses), 2) for (n, s) in so.statuses: yield assert_equal, n.name, 'f_node' yield assert_equal, so.statuses[0][1], 'start' yield assert_equal, so.statuses[1][1], 'exception' rmtree(wf.base_dir) nipype-0.9.2/nipype/pipeline/plugins/tests/test_debug.py000066400000000000000000000030601227300005300234500ustar00rootroot00000000000000import os import nipype.interfaces.base as nib from tempfile import mkdtemp from shutil import rmtree from nipype.testing import assert_raises, assert_false import nipype.pipeline.engine as pe class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') class TestInterface(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = [1, self.inputs.input1] return outputs def callme(node, graph): pass def test_debug(): cur_dir = os.getcwd() temp_dir = mkdtemp(prefix='test_engine_') os.chdir(temp_dir) pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.MapNode(interface=TestInterface(), iterfield=['input1'], name='mod2') pipe.connect([(mod1,mod2,[('output1','input1')])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 run_wf = lambda: pipe.run(plugin="Debug") yield assert_raises, ValueError, run_wf try: pipe.run(plugin="Debug", plugin_args={'callable': callme}) exception_raised = False except Exception: exception_raised = True yield assert_false, exception_raised os.chdir(cur_dir) rmtree(temp_dir)nipype-0.9.2/nipype/pipeline/plugins/tests/test_linear.py000066400000000000000000000027421227300005300236420ustar00rootroot00000000000000import os import nipype.interfaces.base as nib from tempfile import mkdtemp from shutil import rmtree from nipype.testing import assert_equal import nipype.pipeline.engine as pe class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') class TestInterface(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = [1, self.inputs.input1] return outputs def test_run_in_series(): cur_dir = os.getcwd() temp_dir = mkdtemp(prefix='test_engine_') os.chdir(temp_dir) pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.MapNode(interface=TestInterface(), iterfield=['input1'], name='mod2') pipe.connect([(mod1,mod2,[('output1','input1')])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="Linear") names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.mod1')] result = node.get_output('output1') yield assert_equal, result, [1, 1] os.chdir(cur_dir) rmtree(temp_dir)nipype-0.9.2/nipype/pipeline/plugins/tests/test_multiproc.py000066400000000000000000000027451227300005300244110ustar00rootroot00000000000000import os import nipype.interfaces.base as nib from tempfile import mkdtemp from shutil import rmtree from nipype.testing import assert_equal import nipype.pipeline.engine as pe class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') class TestInterface(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = [1, self.inputs.input1] return outputs def test_run_multiproc(): cur_dir = os.getcwd() temp_dir = mkdtemp(prefix='test_engine_') os.chdir(temp_dir) pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.MapNode(interface=TestInterface(), iterfield=['input1'], name='mod2') pipe.connect([(mod1,mod2,[('output1','input1')])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="MultiProc") names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.mod1')] result = node.get_output('output1') yield assert_equal, result, [1, 1] os.chdir(cur_dir) rmtree(temp_dir)nipype-0.9.2/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py000066400000000000000000000100661227300005300264420ustar00rootroot00000000000000import os from tempfile import mkdtemp from shutil import rmtree from nipype.testing import assert_equal, assert_true import nipype.pipeline.engine as pe from nipype.interfaces.utility import Function def mytestFunction(insum=0): ''' Run a multiprocessing job and spawn child processes. ''' # need to import here since this is executed as an external process import multiprocessing import tempfile import time import os numberOfThreads = 2 # list of processes t = [None] * numberOfThreads # list of alive flags a = [None] * numberOfThreads # list of tempFiles f = [None] * numberOfThreads def dummyFunction(filename): ''' This function writes the value 45 to the given filename. ''' j = 0 for i in range(0, 10): j += i # j is now 45 (0+1+2+3+4+5+6+7+8+9) with open(filename, 'w') as f: f.write(str(j)) for n in xrange(numberOfThreads): # mark thread as alive a[n] = True # create a temp file to use as the data exchange container tmpFile = tempfile.mkstemp('.txt', 'test_engine_')[1] f[n] = tmpFile # keep track of the temp file t[n] = multiprocessing.Process(target=dummyFunction, args=(tmpFile,)) # fire up the job t[n].start() # block until all processes are done allDone = False while not allDone: time.sleep(1) for n in xrange(numberOfThreads): a[n] = t[n].is_alive() if not any(a): # if no thread is alive allDone = True # here, all processes are done # read in all temp files and sum them up total = insum for file in f: with open(file) as fd: total += int(fd.read()) os.remove(file) return total def run_multiproc_nondaemon_with_flag(nondaemon_flag): ''' Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag. ''' cur_dir = os.getcwd() temp_dir = mkdtemp(prefix='test_engine_') os.chdir(temp_dir) pipe = pe.Workflow(name='pipe') f1 = pe.Node(interface=Function(function=mytestFunction, input_names=['insum'], output_names=['sum_out']), name='f1') f2 = pe.Node(interface=Function(function=mytestFunction, input_names=['insum'], output_names=['sum_out']), name='f2') pipe.connect([(f1, f2, [('sum_out', 'insum')])]) pipe.base_dir = os.getcwd() f1.inputs.insum = 0 pipe.config = {'execution': {'stop_on_first_crash': True}} # execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag # to enable child processes which start other multiprocessing jobs execgraph = pipe.run(plugin="MultiProc", plugin_args={'n_procs': 2, 'non_daemon': nondaemon_flag}) names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.f2')] result = node.get_output('sum_out') os.chdir(cur_dir) rmtree(temp_dir) return result def test_run_multiproc_nondaemon_false(): ''' This is the entry point for the test. Two times a pipe of several multiprocessing jobs gets executed. First, without the nondaemon flag. Second, with the nondaemon flag. Since the processes of the pipe start child processes, the execution only succeeds when the non_daemon flag is on. ''' shouldHaveFailed = False try: # with nondaemon_flag = False, the execution should fail run_multiproc_nondaemon_with_flag(False) except: shouldHaveFailed = True yield assert_true, shouldHaveFailed def test_run_multiproc_nondaemon_true(): # with nondaemon_flag = True, the execution should succeed result = run_multiproc_nondaemon_with_flag(True) yield assert_equal, result, 180 # n_procs (2) * numberOfThreads (2) * 45 == 180 nipype-0.9.2/nipype/pipeline/plugins/tests/test_pbs.py000066400000000000000000000030201227300005300231420ustar00rootroot00000000000000import os from shutil import rmtree from tempfile import mkdtemp from time import sleep import nipype.interfaces.base as nib from nipype.testing import assert_equal, skipif import nipype.pipeline.engine as pe class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') class TestInterface(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = [1, self.inputs.input1] return outputs @skipif(True) def test_run_pbsgraph(): cur_dir = os.getcwd() temp_dir = mkdtemp(prefix='test_engine_') os.chdir(temp_dir) pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.MapNode(interface=TestInterface(), iterfield=['input1'], name='mod2') pipe.connect([(mod1,mod2,[('output1','input1')])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="PBSGraph") names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.mod1')] result = node.get_output('output1') yield assert_equal, result, [1, 1] os.chdir(cur_dir) rmtree(temp_dir)nipype-0.9.2/nipype/pipeline/plugins/tests/test_somaflow.py000066400000000000000000000031311227300005300242100ustar00rootroot00000000000000import os from shutil import rmtree from tempfile import mkdtemp from time import sleep import nipype.interfaces.base as nib from nipype.testing import assert_equal, skipif import nipype.pipeline.engine as pe from nipype.pipeline.plugins.somaflow import soma_not_loaded class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') class TestInterface(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = [1, self.inputs.input1] return outputs @skipif(soma_not_loaded) def test_run_somaflow(): cur_dir = os.getcwd() temp_dir = mkdtemp(prefix='test_engine_') os.chdir(temp_dir) pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.MapNode(interface=TestInterface(), iterfield=['input1'], name='mod2') pipe.connect([(mod1,mod2,[('output1','input1')])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="SomaFlow") names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.mod1')] result = node.get_output('output1') yield assert_equal, result, [1, 1] os.chdir(cur_dir) rmtree(temp_dir)nipype-0.9.2/nipype/pipeline/report_template.html000066400000000000000000000150431227300005300222260ustar00rootroot00000000000000

Flare imports
hierarchical edge bundling

tension:
nipype-0.9.2/nipype/pipeline/setup.py000066400000000000000000000010441227300005300176400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('pipeline', parent_package, top_path) config.add_subpackage('plugins') config.add_data_dir('tests') config.add_data_files('report_template.html') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/pipeline/tests/000077500000000000000000000000001227300005300172715ustar00rootroot00000000000000nipype-0.9.2/nipype/pipeline/tests/__init__.py000066400000000000000000000003071227300005300214020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from nipype.testing import skip_if_no_package skip_if_no_package('networkx', '1.0') nipype-0.9.2/nipype/pipeline/tests/test_engine.py000066400000000000000000000535201227300005300221540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for the engine module """ from copy import deepcopy from glob import glob import os from shutil import rmtree from tempfile import mkdtemp import networkx as nx from nipype.testing import (assert_raises, assert_equal, assert_true, assert_false) import nipype.interfaces.base as nib import nipype.pipeline.engine as pe from nipype import logging class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') class TestInterface(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = [1, self.inputs.input1] return outputs def test_init(): yield assert_raises, Exception, pe.Workflow pipe = pe.Workflow(name='pipe') yield assert_equal, type(pipe._graph), nx.DiGraph def test_connect(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') pipe.connect([(mod1,mod2,[('output1','input1')])]) yield assert_true, mod1 in pipe._graph.nodes() yield assert_true, mod2 in pipe._graph.nodes() yield assert_equal, pipe._graph.get_edge_data(mod1,mod2), {'connect':[('output1','input1')]} def test_add_nodes(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') pipe.add_nodes([mod1,mod2]) yield assert_true, mod1 in pipe._graph.nodes() yield assert_true, mod2 in pipe._graph.nodes() # Test graph expansion. The following set tests the building blocks # of the graph expansion routine. # XXX - SG I'll create a graphical version of these tests and actually # ensure that all connections are tested later def test1(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') pipe.add_nodes([mod1]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 1 yield assert_equal, len(pipe._execgraph.edges()), 0 def test2(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod1.iterables = dict(input1=lambda:[1,2],input2=lambda:[1,2]) pipe.add_nodes([mod1]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 4 yield assert_equal, len(pipe._execgraph.edges()), 0 def test3(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod1.iterables = {} mod2 = pe.Node(interface=TestInterface(),name='mod2') mod2.iterables = dict(input1=lambda:[1,2]) pipe.connect([(mod1,mod2,[('output1','input2')])]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 3 yield assert_equal, len(pipe._execgraph.edges()), 2 def test4(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') mod1.iterables = dict(input1=lambda:[1,2]) mod2.iterables = {} pipe.connect([(mod1,mod2,[('output1','input2')])]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 4 yield assert_equal, len(pipe._execgraph.edges()), 2 def test5(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') mod1.iterables = dict(input1=lambda:[1,2]) mod2.iterables = dict(input1=lambda:[1,2]) pipe.connect([(mod1,mod2,[('output1','input2')])]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 6 yield assert_equal, len(pipe._execgraph.edges()), 4 def test6(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') mod3 = pe.Node(interface=TestInterface(),name='mod3') mod1.iterables = {} mod2.iterables = dict(input1=lambda:[1,2]) mod3.iterables = {} pipe.connect([(mod1,mod2,[('output1','input2')]), (mod2,mod3,[('output1','input2')])]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 5 yield assert_equal, len(pipe._execgraph.edges()), 4 def test7(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') mod3 = pe.Node(interface=TestInterface(),name='mod3') mod1.iterables = dict(input1=lambda:[1,2]) mod2.iterables = {} mod3.iterables = {} pipe.connect([(mod1,mod3,[('output1','input1')]), (mod2,mod3,[('output1','input2')])]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 5 yield assert_equal, len(pipe._execgraph.edges()), 4 def test8(): pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') mod3 = pe.Node(interface=TestInterface(),name='mod3') mod1.iterables = dict(input1=lambda:[1,2]) mod2.iterables = dict(input1=lambda:[1,2]) mod3.iterables = {} pipe.connect([(mod1,mod3,[('output1','input1')]), (mod2,mod3,[('output1','input2')])]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) yield assert_equal, len(pipe._execgraph.nodes()), 8 yield assert_equal, len(pipe._execgraph.edges()), 8 edgenum = sorted([(len(pipe._execgraph.in_edges(node)) + \ len(pipe._execgraph.out_edges(node))) \ for node in pipe._execgraph.nodes()]) yield assert_true, edgenum[0]>0 def test_expansion(): pipe1 = pe.Workflow(name='pipe1') mod1 = pe.Node(interface=TestInterface(),name='mod1') mod2 = pe.Node(interface=TestInterface(),name='mod2') pipe1.connect([(mod1,mod2,[('output1','input2')])]) pipe2 = pe.Workflow(name='pipe2') mod3 = pe.Node(interface=TestInterface(),name='mod3') mod4 = pe.Node(interface=TestInterface(),name='mod4') pipe2.connect([(mod3,mod4,[('output1','input2')])]) pipe3 = pe.Workflow(name="pipe3") pipe3.connect([(pipe1, pipe2, [('mod2.output1','mod4.input1')])]) pipe4 = pe.Workflow(name="pipe4") mod5 = pe.Node(interface=TestInterface(),name='mod5') pipe4.add_nodes([mod5]) pipe5 = pe.Workflow(name="pipe5") pipe5.add_nodes([pipe4]) pipe6 = pe.Workflow(name="pipe6") pipe6.connect([(pipe5, pipe3, [('pipe4.mod5.output1','pipe2.mod3.input1')])]) error_raised = False try: pipe6._flatgraph = pipe6._create_flat_graph() except: error_raised = True yield assert_false, error_raised def test_iterable_expansion(): import nipype.pipeline.engine as pe wf1 = pe.Workflow(name='test') node1 = pe.Node(TestInterface(),name='node1') node2 = pe.Node(TestInterface(),name='node2') node1.iterables = ('input1',[1,2]) wf1.connect(node1,'output1', node2, 'input2') wf3 = pe.Workflow(name='group') for i in [0,1,2]: wf3.add_nodes([wf1.clone(name='test%d'%i)]) wf3._flatgraph = wf3._create_flat_graph() yield assert_equal, len(pe.generate_expanded_graph(wf3._flatgraph).nodes()),12 def test_synchronize_expansion(): import nipype.pipeline.engine as pe wf1 = pe.Workflow(name='test') node1 = pe.Node(TestInterface(),name='node1') node1.iterables = [('input1',[1,2]),('input2',[3,4,5])] node1.synchronize = True node2 = pe.Node(TestInterface(),name='node2') wf1.connect(node1,'output1', node2, 'input2') wf3 = pe.Workflow(name='group') for i in [0,1,2]: wf3.add_nodes([wf1.clone(name='test%d'%i)]) wf3._flatgraph = wf3._create_flat_graph() # Each expanded graph clone has: # 3 node1 expansion nodes and # 1 node2 replicate per node1 replicate # => 2 * 3 = 6 nodes per expanded subgraph # => 18 nodes in the group yield assert_equal, len(pe.generate_expanded_graph(wf3._flatgraph).nodes()), 18 def test_synchronize_tuples_expansion(): import nipype.pipeline.engine as pe wf1 = pe.Workflow(name='test') node1 = pe.Node(TestInterface(),name='node1') node2 = pe.Node(TestInterface(),name='node2') node1.iterables = [('input1','input2'), [(1,3), (2,4), (None,5)]] node1.synchronize = True wf1.connect(node1,'output1', node2, 'input2') wf3 = pe.Workflow(name='group') for i in [0,1,2]: wf3.add_nodes([wf1.clone(name='test%d'%i)]) wf3._flatgraph = wf3._create_flat_graph() # Identical to test_synchronize_expansion yield assert_equal, len(pe.generate_expanded_graph(wf3._flatgraph).nodes()), 18 def test_itersource_expansion(): import nipype.pipeline.engine as pe wf1 = pe.Workflow(name='test') node1 = pe.Node(TestInterface(),name='node1') node1.iterables = ('input1',[1,2]) node2 = pe.Node(TestInterface(),name='node2') wf1.connect(node1,'output1', node2, 'input1') node3 = pe.Node(TestInterface(),name='node3') node3.itersource = ('node1', 'input1') node3.iterables = [('input1', {1:[3,4], 2:[5,6,7]})] wf1.connect(node2,'output1', node3, 'input1') node4 = pe.Node(TestInterface(),name='node4') wf1.connect(node3,'output1', node4, 'input1') wf3 = pe.Workflow(name='group') for i in [0,1,2]: wf3.add_nodes([wf1.clone(name='test%d'%i)]) wf3._flatgraph = wf3._create_flat_graph() # each expanded graph clone has: # 2 node1 expansion nodes, # 1 node2 per node1 replicate, # 2 node3 replicates for the node1 input1 value 1, # 3 node3 replicates for the node1 input1 value 2 and # 1 node4 successor per node3 replicate # => 2 + 2 + (2 + 3) + 5 = 14 nodes per expanded graph clone # => 3 * 14 = 42 nodes in the group yield assert_equal, len(pe.generate_expanded_graph(wf3._flatgraph).nodes()), 42 def test_itersource_synchronize1_expansion(): import nipype.pipeline.engine as pe wf1 = pe.Workflow(name='test') node1 = pe.Node(TestInterface(),name='node1') node1.iterables = [('input1',[1,2]), ('input2',[3,4])] node1.synchronize = True node2 = pe.Node(TestInterface(),name='node2') wf1.connect(node1,'output1', node2, 'input1') node3 = pe.Node(TestInterface(),name='node3') node3.itersource = ('node1', ['input1', 'input2']) node3.iterables = [('input1', {(1,3):[5,6]}), ('input2', {(1,3):[7,8], (2,4): [9]})] wf1.connect(node2,'output1', node3, 'input1') node4 = pe.Node(TestInterface(),name='node4') wf1.connect(node3,'output1', node4, 'input1') wf3 = pe.Workflow(name='group') for i in [0,1,2]: wf3.add_nodes([wf1.clone(name='test%d'%i)]) wf3._flatgraph = wf3._create_flat_graph() # each expanded graph clone has: # 2 node1 expansion nodes, # 1 node2 per node1 replicate, # 2 node3 replicates for the node1 input1 value 1, # 3 node3 replicates for the node1 input1 value 2 and # 1 node4 successor per node3 replicate # => 2 + 2 + (2 + 3) + 5 = 14 nodes per expanded graph clone # => 3 * 14 = 42 nodes in the group yield assert_equal, len(pe.generate_expanded_graph(wf3._flatgraph).nodes()), 42 def test_itersource_synchronize2_expansion(): import nipype.pipeline.engine as pe wf1 = pe.Workflow(name='test') node1 = pe.Node(TestInterface(),name='node1') node1.iterables = [('input1',[1,2]), ('input2',[3,4])] node1.synchronize = True node2 = pe.Node(TestInterface(),name='node2') wf1.connect(node1,'output1', node2, 'input1') node3 = pe.Node(TestInterface(),name='node3') node3.itersource = ('node1', ['input1', 'input2']) node3.synchronize = True node3.iterables = [('input1', 'input2'), {(1,3):[(5,7), (6,8)], (2,4):[(None,9)]}] wf1.connect(node2,'output1', node3, 'input1') node4 = pe.Node(TestInterface(),name='node4') wf1.connect(node3,'output1', node4, 'input1') wf3 = pe.Workflow(name='group') for i in [0,1,2]: wf3.add_nodes([wf1.clone(name='test%d'%i)]) wf3._flatgraph = wf3._create_flat_graph() # each expanded graph clone has: # 2 node1 expansion nodes, # 1 node2 per node1 replicate, # 2 node3 replicates for the node1 input1 value 1, # 1 node3 replicates for the node1 input1 value 2 and # 1 node4 successor per node3 replicate # => 2 + 2 + (2 + 1) + 3 = 10 nodes per expanded graph clone # => 3 * 10 = 30 nodes in the group yield assert_equal, len(pe.generate_expanded_graph(wf3._flatgraph).nodes()), 30 def test_disconnect(): import nipype.pipeline.engine as pe from nipype.interfaces.utility import IdentityInterface a = pe.Node(IdentityInterface(fields=['a','b']),name='a') b = pe.Node(IdentityInterface(fields=['a','b']),name='b') flow1 = pe.Workflow(name='test') flow1.connect(a,'a',b,'a') flow1.disconnect(a,'a',b,'a') yield assert_equal, flow1._graph.edges(), [] def test_doubleconnect(): import nipype.pipeline.engine as pe from nipype.interfaces.utility import IdentityInterface a = pe.Node(IdentityInterface(fields=['a','b']),name='a') b = pe.Node(IdentityInterface(fields=['a','b']),name='b') flow1 = pe.Workflow(name='test') flow1.connect(a,'a',b,'a') x = lambda: flow1.connect(a,'b',b,'a') yield assert_raises, Exception, x c = pe.Node(IdentityInterface(fields=['a','b']),name='c') flow1 = pe.Workflow(name='test2') x = lambda : flow1.connect([(a, c, [('b', 'b')]), (b, c, [('a', 'b')])]) yield assert_raises, Exception, x ''' Test for order of iterables import nipype.pipeline.engine as pe import nipype.interfaces.utility as niu wf1 = pe.Workflow(name='wf1') node1 = pe.Node(interface=niu.IdentityInterface(fields=['a1','b1']), name='node1') node1.iterables = ('a1', [1,2]) wf1.add_nodes([node1]) wf2 = pe.Workflow(name='wf2') node2 = pe.Node(interface=niu.IdentityInterface(fields=['a2','b2']), name='node2') wf2.add_nodes([node2]) wf1.connect(node1, 'a1', wf2, 'node2.a2') node4 = pe.Node(interface=niu.IdentityInterface(fields=['a4','b4']), name='node4') #node4.iterables = ('a4', [5,6]) wf2.connect(node2, 'b2', node4, 'b4') wf3 = pe.Workflow(name='wf3') node3 = pe.Node(interface=niu.IdentityInterface(fields=['a3','b3']), name='node3') node3.iterables = ('b3', [3,4]) wf3.add_nodes([node3]) wf1.connect(wf3, 'node3.b3', wf2, 'node2.b2') wf1.base_dir = os.path.join(os.getcwd(),'testit') wf1.run(inseries=True, createdirsonly=True) wf1.write_graph(graph2use='exec') ''' ''' import nipype.pipeline.engine as pe import nipype.interfaces.spm as spm import os from nipype.utils.config import config from StringIO import StringIO config.readfp(StringIO(""" [execution] remove_unnecessary_outputs = true """)) segment = pe.Node(interface=spm.Segment(), name="segment") segment.inputs.data = os.path.abspath("data/T1.nii") segment.inputs.gm_output_type = [True, True, True] segment.inputs.wm_output_type = [True, True, True] smooth_gm = pe.Node(interface=spm.Smooth(), name="smooth_gm") workflow = pe.Workflow(name="workflow_cleanup_test") workflow.base_dir = os.path.abspath('./workflow_cleanup_test') workflow.connect([(segment, smooth_gm, [('native_gm_image','in_files')])]) workflow.run() #adding new node that uses one of the previously deleted outputs of segment; this should force segment to rerun smooth_wm = pe.Node(interface=spm.Smooth(), name="smooth_wm") workflow.connect([(segment, smooth_wm, [('native_wm_image','in_files')])]) workflow.run() workflow.run() ''' # Node def test_node_init(): yield assert_raises, Exception, pe.Node try: node = pe.Node(TestInterface, name='test') except IOError: exception = True else: exception = False yield assert_true, exception def test_workflow_add(): from nipype.interfaces.utility import IdentityInterface as ii n1 = pe.Node(ii(fields=['a','b']),name='n1') n2 = pe.Node(ii(fields=['c','d']),name='n2') n3 = pe.Node(ii(fields=['c','d']),name='n1') w1 = pe.Workflow(name='test') w1.connect(n1,'a',n2,'c') yield assert_raises, IOError, w1.add_nodes, [n1] yield assert_raises, IOError, w1.add_nodes, [n2] yield assert_raises, IOError, w1.add_nodes, [n3] yield assert_raises, IOError, w1.connect, [(w1,n2,[('n1.a','d')])] def test_node_get_output(): mod1 = pe.Node(interface=TestInterface(),name='mod1') mod1.inputs.input1 = 1 mod1.run() yield assert_equal, mod1.get_output('output1'), [1, 1] mod1._result = None yield assert_equal, mod1.get_output('output1'), [1, 1] def test_mapnode_iterfield_check(): mod1 = pe.MapNode(TestInterface(), iterfield=['input1'], name='mod1') yield assert_raises, ValueError, mod1._check_iterfield mod1 = pe.MapNode(TestInterface(), iterfield=['input1', 'input2'], name='mod1') mod1.inputs.input1 = [1,2] mod1.inputs.input2 = 3 yield assert_raises, ValueError, mod1._check_iterfield def test_node_hash(): cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) from nipype.interfaces.utility import Function def func1(): return 1 def func2(a): return a+1 n1 = pe.Node(Function(input_names=[], output_names=['a'], function=func1), name='n1') n2 = pe.Node(Function(input_names=['a'], output_names=['b'], function=func2), name='n2') w1 = pe.Workflow(name='test') modify = lambda x: x+1 n1.inputs.a = 1 w1.connect(n1, ('a', modify), n2,'a') w1.base_dir = wd # generate outputs w1.run(plugin='Linear') # ensure plugin is being called w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'false', 'crashdump_dir': wd} error_raised = False # create dummy distributed plugin class from nipype.pipeline.plugins.base import DistributedPluginBase class RaiseError(DistributedPluginBase): def _submit_job(self, node, updatehash=False): raise Exception('Submit called') try: w1.run(plugin=RaiseError()) except Exception, e: pe.logger.info('Exception: %s' % str(e)) error_raised = True yield assert_true, error_raised #yield assert_true, 'Submit called' in e # rerun to ensure we have outputs w1.run(plugin='Linear') # set local check w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'true', 'crashdump_dir': wd} error_raised = False try: w1.run(plugin=RaiseError()) except Exception, e: pe.logger.info('Exception: %s' % str(e)) error_raised = True yield assert_false, error_raised os.chdir(cwd) rmtree(wd) def test_old_config(): cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) from nipype.interfaces.utility import Function def func1(): return 1 def func2(a): return a+1 n1 = pe.Node(Function(input_names=[], output_names=['a'], function=func1), name='n1') n2 = pe.Node(Function(input_names=['a'], output_names=['b'], function=func2), name='n2') w1 = pe.Workflow(name='test') modify = lambda x: x+1 n1.inputs.a = 1 w1.connect(n1, ('a', modify), n2,'a') w1.base_dir = wd w1.config = {'crashdump_dir': wd} # generate outputs error_raised = False try: w1.run(plugin='Linear') except Exception, e: pe.logger.info('Exception: %s' % str(e)) error_raised = True yield assert_false, error_raised os.chdir(cwd) rmtree(wd) def test_mapnode_json(): """Tests that mapnodes don't generate excess jsons """ cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) from nipype import MapNode, Function, Workflow def func1(in1): return in1 + 1 n1 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], name='n1') n1.inputs.in1 = [1] w1 = Workflow(name='test') w1.base_dir = wd w1.config = {'crashdump_dir': wd} w1.add_nodes([n1]) w1.run() n1.inputs.in1 = [2] w1.run() # should rerun n1.inputs.in1 = [1] eg = w1.run() node = eg.nodes()[0] outjson = glob(os.path.join(node.output_dir(), '_0x*.json')) yield assert_equal, len(outjson), 1 # check that multiple json's don't trigger rerun with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp: fp.write('dummy file') w1.config['execution'].update(**{'stop_on_first_rerun': True}) error_raised = False try: w1.run() except: error_raised = True yield assert_false, error_raised os.chdir(cwd) rmtree(wd) nipype-0.9.2/nipype/pipeline/tests/test_join.py000066400000000000000000000462271227300005300216540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for join expansion """ from copy import deepcopy import os from shutil import rmtree from tempfile import mkdtemp import networkx as nx from nipype.testing import (assert_equal, assert_true) import nipype.interfaces.base as nib import nipype.pipeline.engine as pe from nipype.interfaces.utility import IdentityInterface class IncrementInputSpec(nib.TraitedSpec): input1 = nib.traits.Int(mandatory=True, desc='input') inc = nib.traits.Int(usedefault=True, default_value=1, desc='increment') class IncrementOutputSpec(nib.TraitedSpec): output1 = nib.traits.Int(desc='ouput') class IncrementInterface(nib.BaseInterface): input_spec = IncrementInputSpec output_spec = IncrementOutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = self.inputs.input1 + self.inputs.inc return outputs _sums = [] _sum_operands = [] class SumInputSpec(nib.TraitedSpec): input1 = nib.traits.List(nib.traits.Int, mandatory=True, desc='input') class SumOutputSpec(nib.TraitedSpec): output1 = nib.traits.Int(desc='ouput') operands = nib.traits.List(nib.traits.Int, desc='operands') class SumInterface(nib.BaseInterface): input_spec = SumInputSpec output_spec = SumOutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): global _sum global _sum_operands outputs = self._outputs().get() outputs['operands'] = self.inputs.input1 _sum_operands.append(outputs['operands']) outputs['output1'] = sum(self.inputs.input1) _sums.append(outputs['output1']) return outputs _set_len = None """The Set interface execution result.""" class SetInputSpec(nib.TraitedSpec): input1 = nib.traits.Set(nib.traits.Int, mandatory=True, desc='input') class SetOutputSpec(nib.TraitedSpec): output1 = nib.traits.Int(desc='ouput') class SetInterface(nib.BaseInterface): input_spec = SetInputSpec output_spec = SetOutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): global _set_len outputs = self._outputs().get() _set_len = outputs['output1'] = len(self.inputs.input1) return outputs _products = [] """The Products interface execution results.""" class ProductInputSpec(nib.TraitedSpec): input1 = nib.traits.Int(mandatory=True, desc='input1') input2 = nib.traits.Int(mandatory=True, desc='input2') class ProductOutputSpec(nib.TraitedSpec): output1 = nib.traits.Int(mandatory=True, desc='output') class ProductInterface(nib.BaseInterface): input_spec = ProductInputSpec output_spec = ProductOutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): global _products outputs = self._outputs().get() outputs['output1'] = self.inputs.input1 * self.inputs.input2 _products.append(outputs['output1']) return outputs def test_join_expansion(): cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [1, 2])] # a pre-join node in the iterated path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'input1') # another pre-join node in the iterated path pre_join2 = pe.Node(IncrementInterface(), name='pre_join2') wf.connect(pre_join1, 'output1', pre_join2, 'input1') # the join node join = pe.JoinNode(SumInterface(), joinsource='inputspec', joinfield='input1', name='join') wf.connect(pre_join2, 'output1', join, 'input1') # an uniterated post-join node post_join1 = pe.Node(IncrementInterface(), name='post_join1') wf.connect(join, 'output1', post_join1, 'input1') # a post-join node in the iterated path post_join2 = pe.Node(ProductInterface(), name='post_join2') wf.connect(join, 'output1', post_join2, 'input1') wf.connect(pre_join1, 'output1', post_join2, 'input2') result = wf.run() # the two expanded pre-join predecessor nodes feed into one join node joins = [node for node in result.nodes() if node.name == 'join'] assert_equal(len(joins), 1, "The number of join result nodes is incorrect.") # the expanded graph contains 2 * 2 = 4 iteration pre-join nodes, 1 join # node, 1 non-iterated post-join node and 2 * 1 iteration post-join nodes. # Nipype factors away the IdentityInterface. assert_equal(len(result.nodes()), 8, "The number of expanded nodes is incorrect.") # the join Sum result is (1 + 1 + 1) + (2 + 1 + 1) assert_equal(len(_sums), 1, "The number of join outputs is incorrect") assert_equal(_sums[0], 7, "The join Sum output value is incorrect: %s." % _sums[0]) # the join input preserves the iterables input order assert_equal(_sum_operands[0], [3, 4], "The join Sum input is incorrect: %s." % _sum_operands[0]) # there are two iterations of the post-join node in the iterable path assert_equal(len(_products), 2, "The number of iterated post-join outputs is incorrect") os.chdir(cwd) rmtree(wd) def test_node_joinsource(): """Test setting the joinsource to a Node.""" cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [1, 2])] # the join node join = pe.JoinNode(SetInterface(), joinsource=inputspec, joinfield='input1', name='join') # the joinsource is the inputspec name assert_equal(join.joinsource, inputspec.name, "The joinsource is not set to the node name.") os.chdir(cwd) rmtree(wd) def test_set_join_node(): """Test collecting join inputs to a set.""" cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [1, 2, 1, 3, 2])] # a pre-join node in the iterated path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'input1') # the set join node join = pe.JoinNode(SetInterface(), joinsource='inputspec', joinfield='input1', name='join') wf.connect(pre_join1, 'output1', join, 'input1') wf.run() # the join length is the number of unique inputs assert_equal(_set_len, 3, "The join Set output value is incorrect: %s." % _set_len) os.chdir(cwd) rmtree(wd) def test_unique_join_node(): """Test join with the ``unique`` flag set to True.""" global _sum_operands _sum_operands = [] cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [3, 1, 2, 1, 3])] # a pre-join node in the iterated path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'input1') # the set join node join = pe.JoinNode(SumInterface(), joinsource='inputspec', joinfield='input1', unique=True, name='join') wf.connect(pre_join1, 'output1', join, 'input1') wf.run() assert_equal(_sum_operands[0], [4, 2, 3], "The unique join output value is incorrect: %s." % _sum_operands[0]) os.chdir(cwd) rmtree(wd) def test_multiple_join_nodes(): """Test two join nodes, one downstream of the other.""" global _products _products = [] cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [1, 2, 3])] # a pre-join node in the iterated path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'input1') # the first join node join1 = pe.JoinNode(IdentityInterface(fields=['vector']), joinsource='inputspec', joinfield='vector', name='join1') wf.connect(pre_join1, 'output1', join1, 'vector') # an uniterated post-join node post_join1 = pe.Node(SumInterface(), name='post_join1') wf.connect(join1, 'vector', post_join1, 'input1') # the downstream join node connected to both an upstream join # path output and a separate input in the iterated path join2 = pe.JoinNode(IdentityInterface(fields=['vector', 'scalar']), joinsource='inputspec', joinfield='vector', name='join2') wf.connect(pre_join1, 'output1', join2, 'vector') wf.connect(post_join1, 'output1', join2, 'scalar') # a second post-join node post_join2 = pe.Node(SumInterface(), name='post_join2') wf.connect(join2, 'vector', post_join2, 'input1') # a third post-join node post_join3 = pe.Node(ProductInterface(), name='post_join3') wf.connect(post_join2, 'output1', post_join3, 'input1') wf.connect(join2, 'scalar', post_join3, 'input2') result = wf.run() # The expanded graph contains one pre_join1 replicate per inputspec # replicate and one of each remaining node = 3 + 5 = 8 nodes. # The replicated inputspec nodes are factored out of the expansion. assert_equal(len(result.nodes()), 8, "The number of expanded nodes is incorrect.") # The outputs are: # pre_join1: [2, 3, 4] # post_join1: 9 # join2: [2, 3, 4] and 9 # post_join2: 9 # post_join3: 9 * 9 = 81 assert_equal(_products, [81], "The post-join product is incorrect") os.chdir(cwd) rmtree(wd) def test_identity_join_node(): """Test an IdentityInterface join.""" global _sum_operands _sum_operands = [] cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [1, 2, 3])] # a pre-join node in the iterated path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'input1') # the IdentityInterface join node join = pe.JoinNode(IdentityInterface(fields=['vector']), joinsource='inputspec', joinfield='vector', name='join') wf.connect(pre_join1, 'output1', join, 'vector') # an uniterated post-join node post_join1 = pe.Node(SumInterface(), name='post_join1') wf.connect(join, 'vector', post_join1, 'input1') result = wf.run() # the expanded graph contains 1 * 3 iteration pre-join nodes, 1 join # node and 1 post-join node. Nipype factors away the iterable input # IdentityInterface but keeps the join IdentityInterface. assert_equal(len(result.nodes()), 5, "The number of expanded nodes is incorrect.") assert_equal(_sum_operands[0], [2, 3, 4], "The join Sum input is incorrect: %s." %_sum_operands[0]) os.chdir(cwd) rmtree(wd) def test_multifield_join_node(): """Test join on several fields.""" global _products _products = [] cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec') inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])] # two pre-join nodes in a parallel iterated path inc1 = pe.Node(IncrementInterface(), name='inc1') wf.connect(inputspec, 'm', inc1, 'input1') inc2 = pe.Node(IncrementInterface(), name='inc2') wf.connect(inputspec, 'n', inc2, 'input1') # the join node join = pe.JoinNode(IdentityInterface(fields=['vector1', 'vector2']), joinsource='inputspec', name='join') wf.connect(inc1, 'output1', join, 'vector1') wf.connect(inc2, 'output1', join, 'vector2') # a post-join node prod = pe.MapNode(ProductInterface(), name='prod', iterfield=['input1', 'input2']) wf.connect(join, 'vector1', prod, 'input1') wf.connect(join, 'vector2', prod, 'input2') result = wf.run() # the iterables are expanded as the cartesian product of the iterables values. # thus, the expanded graph contains 2 * (2 * 2) iteration pre-join nodes, 1 join # node and 1 post-join node. assert_equal(len(result.nodes()), 10, "The number of expanded nodes is incorrect.") # the product inputs are [2, 4], [2, 5], [3, 4], [3, 5] assert_equal(_products, [8, 10, 12, 15], "The post-join products is incorrect: %s." % _products) os.chdir(cwd) rmtree(wd) def test_synchronize_join_node(): """Test join on an input node which has the ``synchronize`` flag set to True.""" global _products _products = [] cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec') inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])] inputspec.synchronize = True # two pre-join nodes in a parallel iterated path inc1 = pe.Node(IncrementInterface(), name='inc1') wf.connect(inputspec, 'm', inc1, 'input1') inc2 = pe.Node(IncrementInterface(), name='inc2') wf.connect(inputspec, 'n', inc2, 'input1') # the join node join = pe.JoinNode(IdentityInterface(fields=['vector1', 'vector2']), joinsource='inputspec', name='join') wf.connect(inc1, 'output1', join, 'vector1') wf.connect(inc2, 'output1', join, 'vector2') # a post-join node prod = pe.MapNode(ProductInterface(), name='prod', iterfield=['input1', 'input2']) wf.connect(join, 'vector1', prod, 'input1') wf.connect(join, 'vector2', prod, 'input2') result = wf.run() # there are 3 iterables expansions. # thus, the expanded graph contains 2 * 2 iteration pre-join nodes, 1 join # node and 1 post-join node. assert_equal(len(result.nodes()), 6, "The number of expanded nodes is incorrect.") # the product inputs are [2, 3] and [4, 5] assert_equal(_products, [8, 15], "The post-join products is incorrect: %s." % _products) os.chdir(cwd) rmtree(wd) def test_itersource_join_source_node(): """Test join on an input node which has an ``itersource``.""" cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [1, 2])] # an intermediate node in the first iteration path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'input1') # an iterable pre-join node with an itersource pre_join2 = pe.Node(ProductInterface(), name='pre_join2') pre_join2.itersource = ('inputspec', 'n') pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]}) wf.connect(pre_join1, 'output1', pre_join2, 'input2') # an intermediate node in the second iteration path pre_join3 = pe.Node(IncrementInterface(), name='pre_join3') wf.connect(pre_join2, 'output1', pre_join3, 'input1') # the join node join = pe.JoinNode(IdentityInterface(fields=['vector']), joinsource='pre_join2', joinfield='vector', name='join') wf.connect(pre_join3, 'output1', join, 'vector') # a join successor node post_join1 = pe.Node(SumInterface(), name='post_join1') wf.connect(join, 'vector', post_join1, 'input1') result = wf.run() # the expanded graph contains # 1 pre_join1 replicate for each inputspec iteration, # 2 pre_join2 replicates for each inputspec iteration, # 1 pre_join3 for each pre_join2 iteration, # 1 join replicate for each inputspec iteration and # 1 post_join1 replicate for each join replicate = # 2 + (2 * 2) + 4 + 2 + 2 = 14 expansion graph nodes. # Nipype factors away the iterable input # IdentityInterface but keeps the join IdentityInterface. assert_equal(len(result.nodes()), 14, "The number of expanded nodes is incorrect.") # The first join inputs are: # 1 + (3 * 2) and 1 + (4 * 2) # The second join inputs are: # 1 + (5 * 3) and 1 + (6 * 3) # the post-join nodes execution order is indeterminate; # therefore, compare the lists item-wise. assert_true([16, 19] in _sum_operands, "The join Sum input is incorrect: %s." % _sum_operands) assert_true([7, 9] in _sum_operands, "The join Sum input is incorrect: %s." % _sum_operands) os.chdir(cwd) rmtree(wd) def test_itersource_two_join_nodes(): """Test join with a midstream ``itersource`` and an upstream iterable.""" cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') inputspec.iterables = [('n', [1, 2])] # an intermediate node in the first iteration path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'input1') # an iterable pre-join node with an itersource pre_join2 = pe.Node(ProductInterface(), name='pre_join2') pre_join2.itersource = ('inputspec', 'n') pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]}) wf.connect(pre_join1, 'output1', pre_join2, 'input2') # an intermediate node in the second iteration path pre_join3 = pe.Node(IncrementInterface(), name='pre_join3') wf.connect(pre_join2, 'output1', pre_join3, 'input1') # the first join node join1 = pe.JoinNode(IdentityInterface(fields=['vector']), joinsource='pre_join2', joinfield='vector', name='join1') wf.connect(pre_join3, 'output1', join1, 'vector') # a join successor node post_join1 = pe.Node(SumInterface(), name='post_join1') wf.connect(join1, 'vector', post_join1, 'input1') # a summary join node join2 = pe.JoinNode(IdentityInterface(fields=['vector']), joinsource='inputspec', joinfield='vector', name='join2') wf.connect(post_join1, 'output1', join2, 'vector') result = wf.run() # the expanded graph contains the 14 test_itersource_join_source_node # nodes plus the summary join node. assert_equal(len(result.nodes()), 15, "The number of expanded nodes is incorrect.") os.chdir(cwd) rmtree(wd) if __name__ == "__main__": import nose nose.main(defaultTest=__name__) nipype-0.9.2/nipype/pipeline/tests/test_utils.py000066400000000000000000000316301227300005300220450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for the engine utils module """ import os from copy import deepcopy from tempfile import mkdtemp from shutil import rmtree from ...testing import (assert_equal, assert_true, assert_false) import nipype.pipeline.engine as pe import nipype.interfaces.base as nib import nipype.interfaces.utility as niu from ... import config from ..utils import merge_dict, clean_working_directory def test_identitynode_removal(): def test_function(arg1, arg2, arg3): import numpy as np return (np.array(arg1) + arg2 + arg3).tolist() wf = pe.Workflow(name="testidentity") n1 = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='src') n1.iterables = ('b', [0, 1, 2, 3]) n1.inputs.a = [0, 1, 2, 3] n2 = pe.Node(niu.Select(), name='selector') wf.connect(n1, ('a', test_function, 1, -1), n2, 'inlist') wf.connect(n1, 'b', n2, 'index') n3 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='passer') n3.inputs.c = [1, 2, 3, 4] wf.connect(n2, 'out', n3, 'd') n4 = pe.Node(niu.Select(), name='selector2') wf.connect(n3, ('c', test_function, 1, -1), n4, 'inlist') wf.connect(n3, 'd', n4, 'index') fg = wf._create_flat_graph() wf._set_needed_outputs(fg) eg = pe.generate_expanded_graph(deepcopy(fg)) yield assert_equal, len(eg.nodes()), 8 def test_clean_working_directory(): class OutputSpec(nib.TraitedSpec): files = nib.traits.List(nib.File) others = nib.File() class InputSpec(nib.TraitedSpec): infile = nib.File() outputs = OutputSpec() inputs = InputSpec() wd = mkdtemp() filenames = ['file.hdr', 'file.img', 'file.BRIK', 'file.HEAD', '_0x1234.json', 'foo.txt'] outfiles = [] for filename in filenames: outfile = os.path.join(wd, filename) with open(outfile, 'wt') as fp: fp.writelines('dummy') outfiles.append(outfile) outputs.files = outfiles[:4:2] outputs.others = outfiles[5] inputs.infile = outfiles[-1] needed_outputs = ['files'] config.set_default_config() yield assert_true, os.path.exists(outfiles[5]) config.set_default_config() config.set('execution', 'remove_unnecessary_outputs', False) out = clean_working_directory(outputs, wd, inputs, needed_outputs, deepcopy(config._sections)) yield assert_true, os.path.exists(outfiles[5]) yield assert_equal, out.others, outfiles[5] config.set('execution', 'remove_unnecessary_outputs', True) out = clean_working_directory(outputs, wd, inputs, needed_outputs, deepcopy(config._sections)) yield assert_true, os.path.exists(outfiles[1]) yield assert_true, os.path.exists(outfiles[3]) yield assert_true, os.path.exists(outfiles[4]) yield assert_false, os.path.exists(outfiles[5]) yield assert_equal, out.others, nib.Undefined yield assert_equal, len(out.files), 2 config.set_default_config() rmtree(wd) def test_outputs_removal(): def test_function(arg1): import os file1 = os.path.join(os.getcwd(), 'file1.txt') file2 = os.path.join(os.getcwd(), 'file2.txt') fp = open(file1, 'wt') fp.write('%d' % arg1) fp.close() fp = open(file2, 'wt') fp.write('%d' % arg1) fp.close() return file1, file2 out_dir = mkdtemp() n1 = pe.Node(niu.Function(input_names=['arg1'], output_names=['file1', 'file2'], function=test_function), base_dir=out_dir, name='testoutputs') n1.inputs.arg1 = 1 n1.config = {'execution': {'remove_unnecessary_outputs': True}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.run() yield assert_true, os.path.exists(os.path.join(out_dir, n1.name, 'file1.txt')) yield assert_true, os.path.exists(os.path.join(out_dir, n1.name, 'file2.txt')) n1.needed_outputs = ['file2'] n1.run() yield assert_false, os.path.exists(os.path.join(out_dir, n1.name, 'file1.txt')) yield assert_true, os.path.exists(os.path.join(out_dir, n1.name, 'file2.txt')) rmtree(out_dir) class InputSpec(nib.TraitedSpec): in_file = nib.File(exists=True, copyfile=True) class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') class TestInterface(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['output1'] = [1] return outputs def test_inputs_removal(): out_dir = mkdtemp() file1 = os.path.join(out_dir, 'file1.txt') fp = open(file1, 'wt') fp.write('dummy_file') fp.close() n1 = pe.Node(TestInterface(), base_dir=out_dir, name='testinputs') n1.inputs.in_file = file1 n1.config = {'execution': {'keep_inputs': True}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.run() yield assert_true, os.path.exists(os.path.join(out_dir, n1.name, 'file1.txt')) n1.inputs.in_file = file1 n1.config = {'execution': {'keep_inputs': False}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.overwrite = True n1.run() yield assert_false, os.path.exists(os.path.join(out_dir, n1.name, 'file1.txt')) rmtree(out_dir) def test_outputs_removal_wf(): def test_function(arg1): import os file1 = os.path.join(os.getcwd(), 'file1.txt') file2 = os.path.join(os.getcwd(), 'file2.txt') file3 = os.path.join(os.getcwd(), 'file3.txt') file4 = os.path.join(os.getcwd(), 'subdir', 'file1.txt') files = [file1, file2, file3, file4] os.mkdir("subdir") for filename in files: with open(filename, 'wt') as fp: fp.write('%d' % arg1) return file1, file2, os.path.join(os.getcwd(),"subdir") def test_function2(in_file, arg): import os in_arg = open(in_file).read() file1 = os.path.join(os.getcwd(), 'file1.txt') file2 = os.path.join(os.getcwd(), 'file2.txt') file3 = os.path.join(os.getcwd(), 'file3.txt') files = [file1, file2, file3] for filename in files: with open(filename, 'wt') as fp: fp.write('%d' % arg + in_arg) return file1, file2, 1 def test_function3(arg): import os return arg out_dir = mkdtemp() for plugin in ('Linear',):#, 'MultiProc'): n1 = pe.Node(niu.Function(input_names=['arg1'], output_names=['out_file1', 'out_file2', 'dir'], function=test_function), name='n1') n1.inputs.arg1 = 1 n2 = pe.Node(niu.Function(input_names=['in_file', 'arg'], output_names=['out_file1', 'out_file2', 'n'], function=test_function2), name='n2') n2.inputs.arg = 2 n3 = pe.Node(niu.Function(input_names=['arg'], output_names=['n'], function=test_function3), name='n3') wf = pe.Workflow(name="node_rem_test" + plugin, base_dir=out_dir) wf.connect(n1, "out_file1", n2, "in_file") wf.run(plugin='Linear') for remove_unnecessary_outputs in [True, False]: config.set_default_config() wf.config = {'execution': {'remove_unnecessary_outputs': remove_unnecessary_outputs}} rmtree(os.path.join(wf.base_dir, wf.name)) wf.run(plugin=plugin) yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n1.name, 'file2.txt')) != remove_unnecessary_outputs yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n1.name, "subdir", 'file1.txt')) != remove_unnecessary_outputs yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n1.name, 'file1.txt')) yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n1.name, 'file3.txt')) != remove_unnecessary_outputs yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, 'file2.txt')) yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, 'file3.txt')) != remove_unnecessary_outputs n4 = pe.Node(TestInterface(), name='n4') wf.connect(n2, "out_file1", n4, "in_file") def pick_first(l): return l[0] wf.connect(n4, ("output1", pick_first), n3, "arg") for remove_unnecessary_outputs in [True, False]: for keep_inputs in [True, False]: config.set_default_config() wf.config = {'execution': {'keep_inputs': keep_inputs, 'remove_unnecessary_outputs': remove_unnecessary_outputs}} rmtree(os.path.join(wf.base_dir, wf.name)) wf.run(plugin=plugin) yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, 'file2.txt')) != remove_unnecessary_outputs yield assert_true, os.path.exists(os.path.join(wf.base_dir, wf.name, n4.name, 'file1.txt')) == keep_inputs rmtree(out_dir) def fwhm(fwhm): return fwhm def create_wf(name): pipe = pe.Workflow(name=name) process = pe.Node(niu.Function(input_names=['fwhm'], output_names=['fwhm'], function=fwhm), name='proc') process.iterables = ('fwhm', [0]) process2 = pe.Node(niu.Function(input_names=['fwhm'], output_names=['fwhm'], function=fwhm), name='proc2') process2.iterables = ('fwhm', [0]) pipe.connect(process, 'fwhm', process2, 'fwhm') return pipe def test_multi_disconnected_iterable(): out_dir = mkdtemp() metawf = pe.Workflow(name='meta') metawf.base_dir = out_dir metawf.add_nodes([create_wf('wf%d' % i) for i in range(30)]) eg = metawf.run(plugin='Linear') yield assert_equal, len(eg.nodes()), 60 rmtree(out_dir) nipype-0.9.2/nipype/pipeline/utils.py000066400000000000000000001323361227300005300176510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utility routines for workflow graphs """ from copy import deepcopy from glob import glob from collections import defaultdict import os import pwd import re from uuid import uuid1 import numpy as np from nipype.utils.misc import package_check package_check('networkx', '1.3') from socket import gethostname import networkx as nx from ..utils.filemanip import (fname_presuffix, FileNotFoundError, filename_to_list, get_related_files) from ..utils.misc import create_function_from_source, str2bool from ..interfaces.base import (CommandLine, isdefined, Undefined, Bunch, InterfaceResult) from ..interfaces.utility import IdentityInterface from ..utils.provenance import ProvStore, pm, nipype_ns, get_id from .. import get_info from .. import logging, config logger = logging.getLogger('workflow') try: dfs_preorder = nx.dfs_preorder except AttributeError: dfs_preorder = nx.dfs_preorder_nodes logger.debug('networkx 1.4 dev or higher detected') try: from os.path import relpath except ImportError: import os.path as op def relpath(path, start=None): """Return a relative version of a path""" if start is None: start = os.curdir if not path: raise ValueError("no path specified") start_list = op.abspath(start).split(op.sep) path_list = op.abspath(path).split(op.sep) if start_list[0].lower() != path_list[0].lower(): unc_path, rest = op.splitunc(path) unc_start, rest = op.splitunc(start) if bool(unc_path) ^ bool(unc_start): raise ValueError(("Cannot mix UNC and non-UNC paths " "(%s and %s)") % (path, start)) else: raise ValueError("path is on drive %s, start on drive %s" % (path_list[0], start_list[0])) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): break else: i += 1 rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:] if not rel_list: return os.curdir return op.join(*rel_list) def modify_paths(object, relative=True, basedir=None): """Convert paths in data structure to either full paths or relative paths Supports combinations of lists, dicts, tuples, strs Parameters ---------- relative : boolean indicating whether paths should be set relative to the current directory basedir : default os.getcwd() what base directory to use as default """ if not basedir: basedir = os.getcwd() if isinstance(object, dict): out = {} for key, val in sorted(object.items()): if isdefined(val): out[key] = modify_paths(val, relative=relative, basedir=basedir) elif isinstance(object, (list, tuple)): out = [] for val in object: if isdefined(val): out.append(modify_paths(val, relative=relative, basedir=basedir)) if isinstance(object, tuple): out = tuple(out) else: if isdefined(object): if isinstance(object, str) and os.path.isfile(object): if relative: if config.getboolean('execution', 'use_relative_paths'): out = relpath(object, start=basedir) else: out = object else: out = os.path.abspath(os.path.join(basedir, object)) if not os.path.exists(out): raise FileNotFoundError('File %s not found' % out) else: out = object return out def get_print_name(node, simple_form=True): """Get the name of the node For example, a node containing an instance of interfaces.fsl.BET would be called nodename.BET.fsl """ name = node.fullname if hasattr(node, '_interface'): pkglist = node._interface.__class__.__module__.split('.') interface = node._interface.__class__.__name__ destclass = '' if len(pkglist) > 2: destclass = '.%s' % pkglist[2] if simple_form: name = node.fullname + destclass else: name = '.'.join([node.fullname, interface]) + destclass if simple_form: parts = name.split('.') if len(parts) > 2: return ' ('.join(parts[1:])+')' elif len(parts) == 2: return parts[1] return name def _create_dot_graph(graph, show_connectinfo=False, simple_form=True): """Create a graph that can be pickled. Ensures that edge info is pickleable. """ logger.debug('creating dot graph') pklgraph = nx.DiGraph() for edge in graph.edges(): data = graph.get_edge_data(*edge) srcname = get_print_name(edge[0], simple_form=simple_form) destname = get_print_name(edge[1], simple_form=simple_form) if show_connectinfo: pklgraph.add_edge(srcname, destname, l=str(data['connect'])) else: pklgraph.add_edge(srcname, destname) return pklgraph def _write_detailed_dot(graph, dotfilename): """Create a dot file with connection info digraph structs { node [shape=record]; struct1 [label=" left| mid\ dle| right"]; struct2 [label=" one| two"]; struct3 [label="hello\nworld |{ b |{c| d|e}| f}| g | h"]; struct1:f1 -> struct2:f0; struct1:f0 -> struct2:f1; struct1:f2 -> struct3:here; } """ text = ['digraph structs {', 'node [shape=record];'] # write nodes edges = [] replacefunk = lambda x: x.replace('_', '').replace('.', ''). \ replace('@', '').replace('-', '') for n in nx.topological_sort(graph): nodename = str(n) inports = [] for u, v, d in graph.in_edges_iter(nbunch=n, data=True): for cd in d['connect']: if isinstance(cd[0], str): outport = cd[0] else: outport = cd[0][0] inport = cd[1] ipstrip = 'in' + replacefunk(inport) opstrip = 'out' + replacefunk(outport) edges.append('%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''), opstrip, str(v).replace('.', ''), ipstrip)) if inport not in inports: inports.append(inport) inputstr = '{IN' for ip in sorted(inports): inputstr += '| %s' % (replacefunk(ip), ip) inputstr += '}' outports = [] for u, v, d in graph.out_edges_iter(nbunch=n, data=True): for cd in d['connect']: if isinstance(cd[0], str): outport = cd[0] else: outport = cd[0][0] if outport not in outports: outports.append(outport) outputstr = '{OUT' for op in sorted(outports): outputstr += '| %s' % (replacefunk(op), op) outputstr += '}' srcpackage = '' if hasattr(n, '_interface'): pkglist = n._interface.__class__.__module__.split('.') if len(pkglist) > 2: srcpackage = pkglist[2] srchierarchy = '.'.join(nodename.split('.')[1:-1]) nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1], srcpackage, srchierarchy) text += ['%s [label="%s|%s|%s"];' % (nodename.replace('.', ''), inputstr, nodenamestr, outputstr)] # write edges for edge in sorted(edges): text.append(edge) text.append('}') filep = open(dotfilename, 'wt') filep.write('\n'.join(text)) filep.close() return text # Graph manipulations for iterable expansion def _get_valid_pathstr(pathstr): """Remove disallowed characters from path Removes: [][ (){}?:<>#!|"';] Replaces: ',' -> '.' """ pathstr = pathstr.replace(os.sep, '..') pathstr = re.sub(r'''[][ (){}?:<>#!|"';]''', '', pathstr) pathstr = pathstr.replace(',', '.') return pathstr def expand_iterables(iterables, synchronize=False): if synchronize: return synchronize_iterables(iterables) else: return list(walk(iterables.items())) def count_iterables(iterables, synchronize=False): """Return the number of iterable expansion nodes. If synchronize is True, then the count is the maximum number of iterables value lists. Otherwise, the count is the product of the iterables value list sizes. """ if synchronize: op = max else: op = lambda x,y: x*y return reduce(op, [len(func()) for _, func in iterables.iteritems()]) def walk(children, level=0, path=None, usename=True): """Generate all the full paths in a tree, as a dict. Examples -------- >>> from nipype.pipeline.utils import walk >>> iterables = [('a', lambda: [1, 2]), ('b', lambda: [3, 4])] >>> list(walk(iterables)) [{'a': 1, 'b': 3}, {'a': 1, 'b': 4}, {'a': 2, 'b': 3}, {'a': 2, 'b': 4}] """ # Entry point if level == 0: path = {} # Exit condition if not children: yield path.copy() return # Tree recursion head, tail = children[0], children[1:] name, func = head for child in func(): # We can use the arg name or the tree level as a key if usename: path[name] = child else: path[level] = child # Recurse into the next level for child_paths in walk(tail, level + 1, path, usename): yield child_paths def synchronize_iterables(iterables): """Synchronize the given iterables in item-wise order. Return: the {field: value} dictionary list Examples -------- >>> from nipype.pipeline.utils import synchronize_iterables >>> iterables = dict(a=lambda: [1, 2], b=lambda: [3, 4]) >>> synced = synchronize_iterables(iterables) >>> synced == [{'a': 1, 'b': 3}, {'a': 2, 'b': 4}] True >>> iterables = dict(a=lambda: [1, 2], b=lambda: [3], c=lambda: [4, 5, 6]) >>> synced = synchronize_iterables(iterables) >>> synced == [{'a': 1, 'b': 3, 'c': 4}, {'a': 2, 'c': 5}, {'c': 6}] True """ # Convert the (field, function) tuples into (field, value) lists pair_lists = [[(field, value) for value in func()] for field, func in iterables.iteritems()] # A factory to make a dictionary from the mapped (field, value) # key-value pairs. The filter removes any unmapped None items. factory = lambda *pairs: dict(filter(None, pairs)) # Make a dictionary for each of the correlated (field, value) items return map(factory, *pair_lists) def evaluate_connect_function(function_source, args, first_arg): func = create_function_from_source(function_source) try: output_value = func(first_arg, *list(args)) except NameError as e: if e.args[0].startswith("global name") and \ e.args[0].endswith("is not defined"): e.args = (e.args[0], ("Due to engine constraints all imports have to be done " "inside each function definition")) raise e return output_value def get_levels(G): levels = {} for n in nx.topological_sort(G): levels[n] = 0 for pred in G.predecessors_iter(n): levels[n] = max(levels[n], levels[pred] + 1) return levels def _merge_graphs(supergraph, nodes, subgraph, nodeid, iterables, prefix, synchronize=False): """Merges two graphs that share a subset of nodes. If the subgraph needs to be replicated for multiple iterables, the merge happens with every copy of the subgraph. Assumes that edges between nodes of supergraph and subgraph contain data. Parameters ---------- supergraph : networkx graph Parent graph from which subgraph was selected nodes : networkx nodes Nodes of the parent graph from which the subgraph was initially constructed. subgraph : networkx graph A subgraph that contains as a subset nodes from the supergraph. These nodes connect the subgraph to the supergraph nodeid : string Identifier of a node for which parameterization has been sought iterables : dict of functions see `pipeline.NodeWrapper` for iterable requirements Returns ------- Returns a merged graph containing copies of the subgraph with appropriate edge connections to the supergraph. """ # Retrieve edge information connecting nodes of the subgraph to other # nodes of the supergraph. supernodes = supergraph.nodes() ids = [n._hierarchy + n._id for n in supernodes] if len(np.unique(ids)) != len(ids): # This should trap the problem of miswiring when multiple iterables are # used at the same level. The use of the template below for naming # updates to nodes is the general solution. raise Exception(("Execution graph does not have a unique set of node " "names. Please rerun the workflow")) edgeinfo = {} for n in subgraph.nodes(): nidx = ids.index(n._hierarchy + n._id) for edge in supergraph.in_edges_iter(supernodes[nidx]): #make sure edge is not part of subgraph if edge[0] not in subgraph.nodes(): if n._hierarchy + n._id not in edgeinfo.keys(): edgeinfo[n._hierarchy + n._id] = [] edgeinfo[n._hierarchy + n._id].append((edge[0], supergraph.get_edge_data(*edge))) supergraph.remove_nodes_from(nodes) # Add copies of the subgraph depending on the number of iterables iterable_params = expand_iterables(iterables, synchronize) # If there are no iterable subgraphs, then return if not iterable_params: return supergraph # Make an iterable subgraph node id template count = len(iterable_params) template = '.%s%%0%dd' % (prefix, np.ceil(np.log10(count))) # Copy the iterable subgraphs for i, params in enumerate(iterable_params): Gc = deepcopy(subgraph) ids = [n._hierarchy + n._id for n in Gc.nodes()] nodeidx = ids.index(nodeid) rootnode = Gc.nodes()[nodeidx] paramstr = '' for key, val in sorted(params.items()): paramstr = '_'.join((paramstr, _get_valid_pathstr(key), _get_valid_pathstr(str(val)))) rootnode.set_input(key, val) levels = get_levels(Gc) for n in Gc.nodes(): """ update parameterization of the node to reflect the location of the output directory. For example, if the iterables along a path of the directed graph consisted of the variables 'a' and 'b', then every node in the path including and after the node with iterable 'b' will be placed in a directory _a_aval/_b_bval/. """ path_length = levels[n] # enter as negative numbers so that earlier iterables with longer # path lengths get precedence in a sort paramlist = [(-path_length, paramstr)] if n.parameterization: n.parameterization = paramlist + n.parameterization else: n.parameterization = paramlist supergraph.add_nodes_from(Gc.nodes()) supergraph.add_edges_from(Gc.edges(data=True)) for node in Gc.nodes(): if node._hierarchy + node._id in edgeinfo.keys(): for info in edgeinfo[node._hierarchy + node._id]: supergraph.add_edges_from([(info[0], node, info[1])]) node._id += template % i return supergraph def _connect_nodes(graph, srcnode, destnode, connection_info): """Add a connection between two nodes """ data = graph.get_edge_data(srcnode, destnode, default=None) if not data: data = {'connect': connection_info} graph.add_edges_from([(srcnode, destnode, data)]) else: data['connect'].extend(connection_info) def _remove_nonjoin_identity_nodes(graph, keep_iterables=False): """Remove non-join identity nodes from the given graph Iterable nodes are retained if and only if the keep_iterables flag is set to True. """ # if keep_iterables is False, then include the iterable # and join nodes in the nodes to delete for node in _identity_nodes(graph, not keep_iterables): if not hasattr(node, 'joinsource'): _remove_identity_node(graph, node) return graph def _identity_nodes(graph, include_iterables): """Return the IdentityInterface nodes in the graph The nodes are in topological sort order. The iterable nodes are included if and only if the include_iterables flag is set to True. """ return [node for node in nx.topological_sort(graph) if isinstance(node._interface, IdentityInterface) and (include_iterables or getattr(node, 'iterables') is None)] def _remove_identity_node(graph, node): """Remove identity nodes from an execution graph """ portinputs, portoutputs = _node_ports(graph, node) for field, connections in portoutputs.items(): if portinputs: _propagate_internal_output(graph, node, field, connections, portinputs) else: _propagate_root_output(graph, node, field, connections) graph.remove_nodes_from([node]) logger.debug("Removed the identity node %s from the graph." % node) def _node_ports(graph, node): """Return the given node's input and output ports The return value is the (inputs, outputs) dictionaries. The inputs is a {destination field: (source node, source field)} dictionary. The outputs is a {source field: destination items} dictionary, where each destination item is a (destination node, destination field, source field) tuple. """ portinputs = {} portoutputs = {} for u, _, d in graph.in_edges_iter(node, data=True): for src, dest in d['connect']: portinputs[dest] = (u, src) for _, v, d in graph.out_edges_iter(node, data=True): for src, dest in d['connect']: if isinstance(src, tuple): srcport = src[0] else: srcport = src if srcport not in portoutputs: portoutputs[srcport] = [] portoutputs[srcport].append((v, dest, src)) return (portinputs, portoutputs) def _propagate_root_output(graph, node, field, connections): """Propagates the given graph root node output port field connections to the out-edge destination nodes.""" for destnode, inport, src in connections: value = getattr(node.inputs, field) if isinstance(src, tuple): value = evaluate_connect_function(src[1], src[2], value) destnode.set_input(inport, value) def _propagate_internal_output(graph, node, field, connections, portinputs): """Propagates the given graph internal node output port field connections to the out-edge source node and in-edge destination nodes.""" for destnode, inport, src in connections: if field in portinputs: srcnode, srcport = portinputs[field] if isinstance(srcport, tuple) and isinstance(src, tuple): raise ValueError(("Does not support two inline functions " "in series (\'%s\' and \'%s\'). " "Please use a Function node") % (srcport[1].split("\\n")[0][6:-1], src[1].split("\\n")[0][6:-1])) connect = graph.get_edge_data(srcnode, destnode, default={'connect': []}) if isinstance(src, tuple): connect['connect'].append(((srcport, src[1], src[2]), inport)) else: connect = {'connect': [(srcport, inport)]} old_connect = graph.get_edge_data(srcnode, destnode, default={'connect': []}) old_connect['connect'] += connect['connect'] graph.add_edges_from([(srcnode, destnode, old_connect)]) else: value = getattr(node.inputs, field) if isinstance(src, tuple): value = evaluate_connect_function(src[1], src[2], value) destnode.set_input(inport, value) def generate_expanded_graph(graph_in): """Generates an expanded graph based on node parameterization Parameterization is controlled using the `iterables` field of the pipeline elements. Thus if there are two nodes with iterables a=[1,2] and b=[3,4] this procedure will generate a graph with sub-graphs parameterized as (a=1,b=3), (a=1,b=4), (a=2,b=3) and (a=2,b=4). """ logger.debug("PE: expanding iterables") graph_in = _remove_nonjoin_identity_nodes(graph_in, keep_iterables=True) # standardize the iterables as {(field, function)} dictionaries for node in graph_in.nodes_iter(): if node.iterables: _standardize_iterables(node) allprefixes = list('abcdefghijklmnopqrstuvwxyz') # the iterable nodes inodes = _iterable_nodes(graph_in) logger.debug("Detected iterable nodes %s" % inodes) # while there is an iterable node, expand the iterable node's # subgraphs while inodes: inode = inodes[0] logger.debug("Expanding the iterable node %s..." % inode) # the join successor nodes of the current iterable node jnodes = [node for node in graph_in.nodes_iter() if hasattr(node, 'joinsource') and inode.name == node.joinsource and nx.has_path(graph_in, inode, node)] # excise the join in-edges. save the excised edges in a # {jnode: {source name: (destination name, edge data)}} # dictionary jedge_dict = {} for jnode in jnodes: in_edges = jedge_dict[jnode] = {} for src, dest, data in graph_in.in_edges_iter(jnode, True): in_edges[src._id] = data graph_in.remove_edge(src, dest) logger.debug("Excised the %s -> %s join node in-edge." % (src, dest)) if inode.itersource: # the itersource is a (node name, fields) tuple src_name, src_fields = inode.itersource # convert a single field to a list if isinstance(src_fields, str): src_fields = [src_fields] # find the unique iterable source node in the graph try: iter_src = next((node for node in graph_in.nodes_iter() if node.name == src_name and nx.has_path(graph_in, node, inode))) except StopIteration: raise ValueError("The node %s itersource %s was not found" " among the iterable predecessor nodes" % (inode, src_name)) logger.debug("The node %s has iterable source node %s" % (inode, iter_src)) # look up the iterables for this particular itersource descendant # using the iterable source ancestor values as a key iterables = {} # the source node iterables values src_values = [getattr(iter_src.inputs, field) for field in src_fields] # if there is one source field, then the key is the the source value, # otherwise the key is the tuple of source values if len(src_values) == 1: key = src_values[0] else: key = tuple(src_values) # The itersource iterables is a {field: lookup} dictionary, where the # lookup is a {source key: iteration list} dictionary. Look up the # current iterable value using the predecessor itersource input values. iter_dict = dict([(field, lookup[key]) for field, lookup in inode.iterables if key in lookup]) # convert the iterables to the standard {field: function} format iter_items = map(lambda(field, value): (field, lambda: value), iter_dict.iteritems()) iterables = dict(iter_items) else: iterables = inode.iterables.copy() inode.iterables = None logger.debug('node: %s iterables: %s' % (inode, iterables)) # collect the subnodes to expand subnodes = [s for s in dfs_preorder(graph_in, inode)] prior_prefix = [] for s in subnodes: prior_prefix.extend(re.findall('\.(.)I', s._id)) prior_prefix = sorted(prior_prefix) if not len(prior_prefix): iterable_prefix = 'a' else: if prior_prefix[-1] == 'z': raise ValueError('Too many iterables in the workflow') iterable_prefix =\ allprefixes[allprefixes.index(prior_prefix[-1]) + 1] logger.debug(('subnodes:', subnodes)) # append a suffix to the iterable node id inode._id += ('.' + iterable_prefix + 'I') # merge the iterated subgraphs subgraph = graph_in.subgraph(subnodes) graph_in = _merge_graphs(graph_in, subnodes, subgraph, inode._hierarchy + inode._id, iterables, iterable_prefix, inode.synchronize) # reconnect the join nodes for jnode in jnodes: # the {node id: edge data} dictionary for edges connecting # to the join node in the unexpanded graph old_edge_dict = jedge_dict[jnode] # the edge source node replicates expansions = defaultdict(list) for node in graph_in.nodes_iter(): for src_id, edge_data in old_edge_dict.iteritems(): if node._id.startswith(src_id): expansions[src_id].append(node) for in_id, in_nodes in expansions.iteritems(): logger.debug("The join node %s input %s was expanded" " to %d nodes." %(jnode, in_id, len(in_nodes))) # preserve the node iteration order by sorting on the node id for in_nodes in expansions.itervalues(): in_nodes.sort(key=lambda node: node._id) # the number of join source replicates. iter_cnt = count_iterables(iterables, inode.synchronize) # make new join node fields to connect to each replicated # join in-edge source node. slot_dicts = [jnode._add_join_item_fields() for _ in range(iter_cnt)] # for each join in-edge, connect every expanded source node # which matches on the in-edge source name to the destination # join node. Qualify each edge connect join field name by # appending the next join slot index, e.g. the connect # from two expanded nodes from field 'out_file' to join # field 'in' are qualified as ('out_file', 'in1') and # ('out_file', 'in2'), resp. This preserves connection port # integrity. for old_id, in_nodes in expansions.iteritems(): # reconnect each replication of the current join in-edge # source for in_idx, in_node in enumerate(in_nodes): olddata = old_edge_dict[old_id] newdata = deepcopy(olddata) # the (source, destination) field tuples connects = newdata['connect'] # the join fields connected to the source join_fields = [field for _, field in connects if field in jnode.joinfield] # the {field: slot fields} maps assigned to the input # node, e.g. {'image': 'imageJ3', 'mask': 'maskJ3'} # for the third join source expansion replicate of a # join node with join fields image and mask slots = slot_dicts[in_idx] for con_idx, connect in enumerate(connects): src_field, dest_field = connect # qualify a join destination field name if dest_field in slots: slot_field = slots[dest_field] connects[con_idx] = (src_field, slot_field) logger.debug("Qualified the %s -> %s join field" " %s as %s." % (in_node, jnode, dest_field, slot_field)) graph_in.add_edge(in_node, jnode, newdata) logger.debug("Connected the join node %s subgraph to the" " expanded join point %s" % (jnode, in_node)) #nx.write_dot(graph_in, '%s_post.dot' % node) # the remaining iterable nodes inodes = _iterable_nodes(graph_in) for node in graph_in.nodes(): if node.parameterization: node.parameterization = [param for _, param in sorted(node.parameterization)] logger.debug("PE: expanding iterables ... done") return _remove_nonjoin_identity_nodes(graph_in) def _iterable_nodes(graph_in): """Returns the iterable nodes in the given graph and their join dependencies. The nodes are ordered as follows: - nodes without an itersource precede nodes with an itersource - nodes without an itersource are sorted in reverse topological order - nodes with an itersource are sorted in topological order This order implies the following: - every iterable node without an itersource is expanded before any node with an itersource - every iterable node without an itersource is expanded before any of it's predecessor iterable nodes without an itersource - every node with an itersource is expanded before any of it's successor nodes with an itersource Return the iterable nodes list """ nodes = nx.topological_sort(graph_in) inodes = [node for node in nodes if node.iterables is not None] inodes_no_src = [node for node in inodes if not node.itersource] inodes_src = [node for node in inodes if node.itersource] inodes_no_src.reverse() return inodes_no_src + inodes_src def _standardize_iterables(node): """Converts the given iterables to a {field: function} dictionary, if necessary, where the function returns a list.""" # trivial case if not node.iterables: return iterables = node.iterables # The candidate iterable fields fields = set(node.inputs.copyable_trait_names()) # Flag indicating whether the iterables are in the alternate # synchronize form and are not converted to a standard format. synchronize = False # A synchronize iterables node without an itersource can be in # [fields, value tuples] format rather than # [(field, value list), (field, value list), ...] if node.synchronize: if len(iterables) == 2: first, last = iterables if all((isinstance(item, str) and item in fields for item in first)): iterables = _transpose_iterables(first, last) # Convert a tuple to a list if isinstance(iterables, tuple): iterables = [iterables] # Validate the standard [(field, values)] format _validate_iterables(node, iterables, fields) # Convert a list to a dictionary if isinstance(iterables, list): # Convert a values list to a function. This is a legacy # Nipype requirement with unknown rationale. if not node.itersource: iter_items = map(lambda(field, value): (field, lambda: value), iterables) iterables = dict(iter_items) node.iterables = iterables def _validate_iterables(node, iterables, fields): """ Raise TypeError if an iterables member is not iterable. Raise ValueError if an iterables member is not a (field, values) pair. Raise ValueError if an iterable field is not in the inputs. """ # The iterables can be a {field: value list} dictionary. if isinstance(iterables, dict): iterables = iterables.items() elif not isinstance(iterables, tuple) and not isinstance(iterables, list): raise ValueError("The %s iterables type is not a list or a dictionary:" " %s" % (node.name, iterables.__class__)) for item in iterables: try: if len(item) != 2: raise ValueError("The %s iterables is not a [(field, values)]" " list" % node.name) except TypeError, e: raise TypeError("A %s iterables member is not iterable: %s" % (node.name, e)) field, _ = item if field not in fields: raise ValueError("The %s iterables field is unrecognized: %s" % (node.name, field)) def _transpose_iterables(fields, values): """ Converts the given fields and tuple values into a standardized iterables value. If the input values is a synchronize iterables dictionary, then the result is a (field, {key: values}) list. Otherwise, the result is a list of (field: value list) pairs. """ if isinstance(values, dict): transposed = dict([(field, defaultdict(list)) for field in fields]) for key, tuples in values.iteritems(): for kvals in tuples: for idx, val in enumerate(kvals): if val != None: transposed[fields[idx]][key].append(val) return transposed.items() else: return zip(fields, [filter(lambda(v): v != None, list(transpose)) for transpose in zip(*values)]) def export_graph(graph_in, base_dir=None, show=False, use_execgraph=False, show_connectinfo=False, dotfilename='graph.dot', format='png', simple_form=True): """ Displays the graph layout of the pipeline This function requires that pygraphviz and matplotlib are available on the system. Parameters ---------- show : boolean Indicate whether to generate pygraphviz output fromn networkx. default [False] use_execgraph : boolean Indicates whether to use the specification graph or the execution graph. default [False] show_connectioninfo : boolean Indicates whether to show the edge data on the graph. This makes the graph rather cluttered. default [False] """ graph = deepcopy(graph_in) if use_execgraph: graph = generate_expanded_graph(graph) logger.debug('using execgraph') else: logger.debug('using input graph') if base_dir is None: base_dir = os.getcwd() if not os.path.exists(base_dir): os.makedirs(base_dir) outfname = fname_presuffix(dotfilename, suffix='_detailed.dot', use_ext=False, newpath=base_dir) logger.info('Creating detailed dot file: %s' % outfname) _write_detailed_dot(graph, outfname) cmd = 'dot -T%s -O %s' % (format, outfname) res = CommandLine(cmd, terminal_output='allatonce').run() if res.runtime.returncode: logger.warn('dot2png: %s', res.runtime.stderr) pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form) outfname = fname_presuffix(dotfilename, suffix='.dot', use_ext=False, newpath=base_dir) nx.write_dot(pklgraph, outfname) logger.info('Creating dot file: %s' % outfname) cmd = 'dot -T%s -O %s' % (format, outfname) res = CommandLine(cmd, terminal_output='allatonce').run() if res.runtime.returncode: logger.warn('dot2png: %s', res.runtime.stderr) if show: pos = nx.graphviz_layout(pklgraph, prog='dot') nx.draw(pklgraph, pos) if show_connectinfo: nx.draw_networkx_edge_labels(pklgraph, pos) def format_dot(dotfilename, format=None): cmd = 'dot -T%s -O %s' % (format, dotfilename) CommandLine(cmd).run() logger.info('Converting dotfile: %s to %s format' % (dotfilename, format)) def make_output_dir(outdir): """Make the output_dir if it doesn't exist. Parameters ---------- outdir : output directory to create """ if not os.path.exists(os.path.abspath(outdir)): logger.debug("Creating %s" % outdir) os.makedirs(outdir) return outdir def get_all_files(infile): files = [infile] if infile.endswith(".img"): files.append(infile[:-4] + ".hdr") files.append(infile[:-4] + ".mat") if infile.endswith(".img.gz"): files.append(infile[:-7] + ".hdr.gz") return files def walk_outputs(object): """Extract every file and directory from a python structure """ out = [] if isinstance(object, dict): for key, val in sorted(object.items()): if isdefined(val): out.extend(walk_outputs(val)) elif isinstance(object, (list, tuple)): for val in object: if isdefined(val): out.extend(walk_outputs(val)) else: if isdefined(object) and isinstance(object, basestring): if os.path.islink(object) or os.path.isfile(object): out = [(filename, 'f') for filename in get_all_files(object)] elif os.path.isdir(object): out = [(object, 'd')] return out def walk_files(cwd): for path, _, files in os.walk(cwd): for f in files: yield os.path.join(path, f) def clean_working_directory(outputs, cwd, inputs, needed_outputs, config, files2keep=None, dirs2keep=None): """Removes all files not needed for further analysis from the directory """ if not outputs: return outputs_to_keep = outputs.get().keys() if needed_outputs and \ str2bool(config['execution']['remove_unnecessary_outputs']): outputs_to_keep = needed_outputs # build a list of needed files output_files = [] outputdict = outputs.get() for output in outputs_to_keep: output_files.extend(walk_outputs(outputdict[output])) needed_files = [path for path, type in output_files if type == 'f'] if str2bool(config['execution']['keep_inputs']): input_files = [] inputdict = inputs.get() input_files.extend(walk_outputs(inputdict)) needed_files += [path for path, type in input_files if type == 'f'] for extra in ['_0x*.json', 'provenance.*', 'pyscript*.m', 'command.txt', 'result*.pklz', '_inputs.pklz', '_node.pklz']: needed_files.extend(glob(os.path.join(cwd, extra))) if files2keep: needed_files.extend(filename_to_list(files2keep)) needed_dirs = [path for path, type in output_files if type == 'd'] if dirs2keep: needed_dirs.extend(filename_to_list(dirs2keep)) for extra in ['_nipype', '_report']: needed_dirs.extend(glob(os.path.join(cwd, extra))) temp = [] for filename in needed_files: temp.extend(get_related_files(filename)) needed_files = temp logger.debug('Needed files: %s' % (';'.join(needed_files))) logger.debug('Needed dirs: %s' % (';'.join(needed_dirs))) files2remove = [] if str2bool(config['execution']['remove_unnecessary_outputs']): for f in walk_files(cwd): if f not in needed_files: if len(needed_dirs) == 0: files2remove.append(f) elif not any([f.startswith(dname) for dname in needed_dirs]): files2remove.append(f) else: if not str2bool(config['execution']['keep_inputs']): input_files = [] inputdict = inputs.get() input_files.extend(walk_outputs(inputdict)) input_files = [path for path, type in input_files if type == 'f'] for f in walk_files(cwd): if f in input_files and f not in needed_files: files2remove.append(f) logger.debug('Removing files: %s' % (';'.join(files2remove))) for f in files2remove: os.remove(f) for key in outputs.copyable_trait_names(): if key not in outputs_to_keep: setattr(outputs, key, Undefined) return outputs def merge_dict(d1, d2, merge=lambda x, y: y): """ Merges two dictionaries, non-destructively, combining values on duplicate keys as defined by the optional merge function. The default behavior replaces the values in d1 with corresponding values in d2. (There is no other generally applicable merge strategy, but often you'll have homogeneous types in your dicts, so specifying a merge technique can be valuable.) Examples: >>> d1 = {'a': 1, 'c': 3, 'b': 2} >>> merge_dict(d1, d1) {'a': 1, 'c': 3, 'b': 2} >>> merge_dict(d1, d1, lambda x,y: x+y) {'a': 2, 'c': 6, 'b': 4} """ if not isinstance(d1, dict): return merge(d1, d2) result = dict(d1) if d2 is None: return result for k, v in d2.iteritems(): if k in result: result[k] = merge_dict(result[k], v, merge=merge) else: result[k] = v return result def merge_bundles(g1, g2): for rec in g2.get_records(): g1._add_record(rec) return g1 def write_workflow_prov(graph, filename=None, format='turtle'): """Write W3C PROV Model JSON file """ if not filename: filename = os.path.join(os.getcwd(), 'workflow_provenance') ps = ProvStore() processes = [] nodes = graph.nodes() for idx, node in enumerate(nodes): result = node.result classname = node._interface.__class__.__name__ _, hashval, _, _ = node.hash_exists() attrs = {pm.PROV["type"]: nipype_ns[classname], pm.PROV["label"]: '_'.join((classname, node.name)), nipype_ns['hashval']: hashval} process = ps.g.activity(get_id(), None, None, attrs) if isinstance(result.runtime, list): process.add_extra_attributes({pm.PROV["type"]: nipype_ns["MapNode"]}) # add info about sub processes for idx, runtime in enumerate(result.runtime): subresult = InterfaceResult(result.interface[idx], runtime, outputs={}) if result.inputs: subresult.inputs = result.inputs[idx] if result.outputs: for key, value in result.outputs.items(): values = getattr(result.outputs, key) if isdefined(values): subresult.outputs[key] = values[idx] sub_bundle = ProvStore().add_results(subresult) ps.g = merge_bundles(ps.g, sub_bundle) ps.g.wasGeneratedBy(sub_bundle, process) else: process.add_extra_attributes({pm.PROV["type"]: nipype_ns["Node"]}) result_bundle = ProvStore().add_results(result) ps.g = merge_bundles(ps.g, result_bundle) ps.g.wasGeneratedBy(result_bundle, process) processes.append(process) # add dependencies (edges) # Process->Process for idx, edgeinfo in enumerate(graph.in_edges_iter()): ps.g.wasStartedBy(processes[nodes.index(edgeinfo[1])], starter=processes[nodes.index(edgeinfo[0])]) # write provenance try: if format in ['turtle', 'all']: ps.g.rdf().serialize(filename + '.ttl', format='turtle') except (ImportError, NameError): format = 'all' finally: if format in ['provn', 'all']: with open(filename + '.provn', 'wt') as fp: fp.writelines(ps.g.get_provn()) if format in ['json', 'all']: with open(filename + '.json', 'wt') as fp: pm.json.dump(ps.g, fp, cls=pm.ProvBundle.JSONEncoder) return ps.g def topological_sort(graph, depth_first=True): nodesort = nx.topological_sort(graph) if not depth_first: return nodesort, None logger.debug("Performing depth first search") nodes=[] groups=[] group=0 G = nx.Graph() G.add_nodes_from(graph.nodes()) G.add_edges_from(graph.edges()) components = nx.connected_components(G) for desc in components: group += 1 indices = [] for node in desc: indices.append(nodesort.index(node)) nodes.extend(np.array(nodesort)[np.array(indices)[np.argsort(indices)]].tolist()) for node in desc: nodesort.remove(node) groups.extend([group] * len(desc)) return nodes, groups nipype-0.9.2/nipype/pkg_info.py000066400000000000000000000057021227300005300164740ustar00rootroot00000000000000import os import sys import subprocess from ConfigParser import ConfigParser COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' def pkg_commit_hash(pkg_path): ''' Get short form of commit hash given directory `pkg_path` There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a file in INI file format, with at least one section: ``commit hash``, and two variables ``archive_subst_hash`` and ``install_hash``. The first has a substitution pattern in it which may have been filled by the execution of ``git archive`` if this is an archive generated that way. The second is filled in by the installation, if the installation is from a git archive. We get the commit hash from (in order of preference): * A substituted value in ``archive_subst_hash`` * A written commit hash value in ``install_hash` * git's output, if we are in a git repository If all these fail, we return a not-found placeholder tuple Parameters ---------- pkg_path : str directory containing package Returns ------- hash_from : str Where we got the hash from - description hash_str : str short form of hash ''' # Try and get commit from written commit text file pth = os.path.join(pkg_path, COMMIT_INFO_FNAME) if not os.path.isfile(pth): raise IOError('Missing commit info file %s' % pth) cfg_parser = ConfigParser() cfg_parser.read(pth) archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') if not archive_subst.startswith('$Format'): # it has been substituted return 'archive substitution', archive_subst install_subst = cfg_parser.get('commit hash', 'install_hash') if install_subst != '': return 'installation', install_subst # maybe we are in a repository proc = subprocess.Popen('git rev-parse HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=pkg_path, shell=True) repo_commit, _ = proc.communicate() if repo_commit: return 'repository', repo_commit.strip() return '(none found)', '' def get_pkg_info(pkg_path): ''' Return dict describing the context of this package Parameters ---------- pkg_path : str path containing __init__.py for package Returns ------- context : dict with named parameters of interest ''' src, hsh = pkg_commit_hash(pkg_path) import networkx import nibabel import numpy import scipy import traits return dict( pkg_path=pkg_path, commit_source=src, commit_hash=hsh, sys_version=sys.version, sys_executable=sys.executable, sys_platform=sys.platform, numpy_version=numpy.__version__, scipy_version=scipy.__version__, networkx_version=networkx.__version__, nibabel_version=nibabel.__version__, traits_version=traits.__version__) nipype-0.9.2/nipype/setup.py000066400000000000000000000015301227300005300160330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('nipype', parent_package, top_path) # List all packages to be loaded here config.add_subpackage('algorithms') config.add_subpackage('interfaces') config.add_subpackage('pipeline') config.add_subpackage('utils') config.add_subpackage('caching') config.add_subpackage('testing') config.add_subpackage('workflows') config.add_subpackage('external') config.add_subpackage('fixes') # List all data directories to be loaded here return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/testing/000077500000000000000000000000001227300005300157775ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/README000066400000000000000000000005411227300005300166570ustar00rootroot00000000000000The numpytesting directory contains a copy of all the files from numpy/testing for numpy version 1.3. This provides all the test integration with the nose test framework we need to run the nipype tests. By including these files, nipype can now run on systems that only have numpy 1.1, like Debian Lenny. This feature was added by Yaroslav Halchenko. nipype-0.9.2/nipype/testing/__init__.py000066400000000000000000000026501227300005300201130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The testing directory contains a small set of imaging files to be used for doctests only. More thorough tests and example data will be stored in a nipy data packages that you can download separately. .. note: We use the ``nose`` testing framework for tests. Nose is a dependency for the tests, but should not be a dependency for running the algorithms in the NIPY library. This file should import without nose being present on the python path. """ import os # Discover directory path filepath = os.path.abspath(__file__) basedir = os.path.dirname(filepath) funcfile = os.path.join(basedir, 'data', 'functional.nii') anatfile = os.path.join(basedir, 'data', 'structural.nii') template = funcfile transfm = funcfile from nose.tools import * from numpy.testing import * from . import decorators as dec from .utils import skip_if_no_package, package_check skipif = dec.skipif def example_data(infile='functional.nii'): """returns path to empty example data files for doc tests it will raise an exception if filename is not in the directory""" filepath = os.path.abspath(__file__) basedir = os.path.dirname(filepath) outfile = os.path.join(basedir, 'data', infile) if not os.path.exists(outfile): raise IOError('%s empty data file does NOT exist'%(outfile)) return outfile nipype-0.9.2/nipype/testing/data/000077500000000000000000000000001227300005300167105ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/4d_dwi.nii000066400000000000000000000000001227300005300205510ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/A.scheme000066400000000000000000000000001227300005300202440ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/A_qmat.Bdouble000066400000000000000000000000001227300005300213760ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/A_recon_params.Bdouble000066400000000000000000000000001227300005300231050ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/FLASH1.mgz000066400000000000000000000000001227300005300203330ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/FLASH2.mgz000066400000000000000000000000001227300005300203340ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/FLASH3.mgz000066400000000000000000000000001227300005300203350ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/FreeSurferColorLUT_adapted_aparc+aseg_out.pck000066400000000000000000000000001227300005300274630ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/MASK_average_thal_right.nii000066400000000000000000000000001227300005300240310ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/PD.mgz000066400000000000000000000000001227300005300177200ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/QSH_peaks.Bdouble000066400000000000000000000000001227300005300220120ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/README000066400000000000000000000006211227300005300175670ustar00rootroot00000000000000This directory contains empty, dummy files which are meant to be used in the doctests of nipype. For verion 0.3 of nipype, we're using Traits and for input files, the code checks to confirm the assigned files actually exist. It doesn't matter what the files are, or even if they contain "real data", only that they exist. Again, these files are only meant to serve as documentation in the doctests.nipype-0.9.2/nipype/testing/data/ROI_scale500.nii.gz000066400000000000000000000000001227300005300220430ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/SPM.mat000066400000000000000000000000001227300005300200400ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/SubjectA.Bfloat000066400000000000000000000000001227300005300215270ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/T1.mgz000066400000000000000000000000001227300005300177010ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/T1.nii000066400000000000000000000000001227300005300176630ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/TPM.nii000066400000000000000000000000001227300005300200370ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/Template_1_IXI550_MNI152.nii000066400000000000000000000000001227300005300232700ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/Template_6.nii000066400000000000000000000000001227300005300213770ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/afni_output.3D000066400000000000000000000000001227300005300214230ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/allFA.nii000066400000000000000000000000001227300005300203560ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/all_FA.nii.gz000066400000000000000000000000001227300005300211340ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/anat_coreg.mif000066400000000000000000000000001227300005300214750ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/anatomical.nii000066400000000000000000000000001227300005300215070ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/ants_Affine.txt000066400000000000000000000000001227300005300216540ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/ants_Warp.nii.gz000066400000000000000000000000001227300005300217540ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/ants_deformed.nii.gz000066400000000000000000000000001227300005300226300ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/aparc+aseg.nii000066400000000000000000000000001227300005300214000ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/aseg.mgz000066400000000000000000000000001227300005300203340ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/atlas.nii.gz000066400000000000000000000000001227300005300211220ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/b0_b0rev.nii000066400000000000000000000000001227300005300207760ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/ballstickfit_data.Bfloat000066400000000000000000000000001227300005300234730ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/bedpostxout/000077500000000000000000000000001227300005300212705ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/bedpostxout/do_not_delete.txt000066400000000000000000000000731227300005300246350ustar00rootroot00000000000000This file has to be here because git ignores empty folders.nipype-0.9.2/nipype/testing/data/brain_mask.nii000066400000000000000000000000001227300005300215050ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/brain_track.Bdouble000066400000000000000000000000001227300005300224530ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/bvals000066400000000000000000000000001227300005300177300ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/bvals.scheme000066400000000000000000000000001227300005300211730ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/bvecs000066400000000000000000000000001227300005300177230ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/bvecs.scheme000066400000000000000000000000001227300005300211660ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/c1s1.nii000066400000000000000000000000001227300005300201460ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/c1s3.nii000066400000000000000000000000001227300005300201500ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/clustering.mat000066400000000000000000000000001227300005300215600ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cmatrix.mat000066400000000000000000000000001227300005300210500ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/complex.nii000066400000000000000000000000001227300005300210460ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cont1.nii000066400000000000000000000000001227300005300204230ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cont1a.nii000066400000000000000000000000001227300005300205640ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cont2.nii000066400000000000000000000000001227300005300204240ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cont2a.nii000066400000000000000000000000001227300005300205650ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/converted.trk000066400000000000000000000000001227300005300214110ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cope.nii.gz000066400000000000000000000000001227300005300207440ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cope1.nii.gz000066400000000000000000000000001227300005300210250ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cope1run1.nii.gz000066400000000000000000000000001227300005300216330ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cope1run2.nii.gz000066400000000000000000000000001227300005300216340ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cope2run1.nii.gz000066400000000000000000000000001227300005300216340ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cope2run2.nii.gz000066400000000000000000000000001227300005300216350ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cortex.label000066400000000000000000000000001227300005300212030ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/cov_split.mat000066400000000000000000000000001227300005300214030ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/csd.mif000066400000000000000000000000001227300005300201440ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/data.Bfloat000066400000000000000000000000001227300005300207400ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/degree.csv000066400000000000000000000000001227300005300206460ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/degree.mat000066400000000000000000000000001227300005300206340ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/design.con000066400000000000000000000000001227300005300206500ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/design.mat000066400000000000000000000000001227300005300206520ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dicomdir/000077500000000000000000000000001227300005300205025ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dicomdir/123456-1-1.dcm000066400000000000000000000000001227300005300222150ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/diffusion.nii000066400000000000000000000000001227300005300213650ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/diffusion_weighted.nii000066400000000000000000000000001227300005300232450ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dirs.txt000066400000000000000000000000001227300005300204000ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dofrun1000066400000000000000000000000001227300005300201770ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dofrun2000066400000000000000000000000001227300005300202000ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dteig.Bdouble000066400000000000000000000000001227300005300212700ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi.mif000066400000000000000000000000001227300005300201560ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi2anat_InverseWarp.nii.gz000066400000000000000000000000001227300005300240540ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi2anat_Warp.nii.gz000066400000000000000000000000001227300005300225200ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi2anat_coreg_Affine.txt000066400000000000000000000000001227300005300235770ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi_CSD_tracked.tck000066400000000000000000000000001227300005300223520ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi_FA.mif000066400000000000000000000000001227300005300205240ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi_WMProb.mif000066400000000000000000000000001227300005300214040ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/dwi_tensor.mif000066400000000000000000000000001227300005300215500ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/encoding.txt000066400000000000000000000000001227300005300212250ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/epi.nii000066400000000000000000000000001227300005300201540ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/epi_acqp.txt000066400000000000000000000000001227300005300212200ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/epi_index.txt000066400000000000000000000000001227300005300214030ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/epi_mask.nii000066400000000000000000000000001227300005300211670ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/epi_rev.nii000066400000000000000000000000001227300005300210300ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/fa.nii.gz000066400000000000000000000000001227300005300204040ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/fitted_data1.Bfloat000066400000000000000000000000001227300005300223600ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/fitted_data2.Bfloat000066400000000000000000000000001227300005300223610ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/fixed1.nii000066400000000000000000000000001227300005300205570ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/fixed2.nii000066400000000000000000000000001227300005300205600ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/flash_05.mgz000066400000000000000000000000001227300005300210160ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/flash_30.mgz000066400000000000000000000000001227300005300210140ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/flirt.mat000066400000000000000000000000001227300005300205210ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/fmri_timeseries.csv000066400000000000000000002026341227300005300226220ustar00rootroot00000000000000"WM","Vent","Brain","LCau","LPut","LThal","LFpol","LAng","LSupraM","LMTG","LHip","LPostPHG","APHG","LAmy","LParaCing","LPCC","LPrec","RCau","RPut","RThal","RFpol","RAng","RSupraM","RMTG","RHip","RPostPHG","RAntPHG","RAmy","RParaCing","RPCC","RPrec" 10125.9,10112.8,9219.5,-7.39443,-8.74936,7.28395,13.7953,32.2328,32.4809,18.958,-12.2383,-6.86466,-23.0912,-16.425,-5.70842,11.2467,-1.58574,-4.53717,-17.3842,0.912601,13.0428,2.44622,2.08875,-8.74373,-9.47217,-6.87574,-8.11158,-14.54,0.414787,6.04424,0.540389 10136.8,10115.1,9222.54,-0.120582,-1.94906,6.92247,4.75197,11.0735,0.972766,10.2285,0.717545,-1.04488,-7.64424,-2.10875,-2.44368,1.52535,-1.14131,-1.72589,-1.1247,-0.993354,2.98318,1.29855,2.0688,1.00297,0.135373,-3.25325,-3.12065,0.913296,-1.7868,1.58829,-0.735248 10148,10122.2,9228.62,4.24336,-0.689111,5.12782,0.132862,-6.64526,-14.7952,5.19361,3.68198,2.77598,-0.691866,1.07559,1.71444,-1.30287,-2.75746,1.74208,4.75944,1.80799,-0.064464,2.37174,1.09905,3.5756,2.98064,-0.238711,0.822007,5.07188,-0.864496,-0.208741,-1.31367 10156.6,10132.2,9236.11,-0.047434,-1.79438,-0.767925,-3.78683,-2.46365,-12.9433,2.00586,-0.48292,1.16216,0.113706,-0.639879,-0.0445654,-2.82995,-2.22008,1.46544,3.70217,2.84476,-3.32792,6.701,0.982599,0.145487,0.0501163,-1.16747,-0.630382,-0.0550437,-0.0563951,0.0449386,-0.715988 10162.9,10141.8,9243.46,-0.3687,0.640608,-2.93969,-0.37466,-5.42813,-8.55527,-4.70566,-3.62351,-3.94857,0.847112,0.357187,1.39279,-3.07124,0.779726,5.12671,3.62277,2.86265,3.44378,5.49842,0.895482,-2.1777,0.14728,-0.491475,-0.0257423,-0.32504,2.28464,-0.610659,2.01955 10168.7,10149.5,9249.62,-0.272231,3.00751,-2.20783,-5.50238,-1.65733,-2.39574,-6.82249,-1.5591,-5.38806,-0.315138,2.41171,-0.227563,-0.306796,1.26618,4.45885,3.55662,3.14737,-0.0497907,2.76691,1.04757,-2.50276,3.25334,1.90194,3.54754,3.2308,0.393197,0.115407,1.88919 10175.3,10155.8,9253.09,0.271133,3.11725,-1.24188,-5.32432,6.94595,5.40219,2.63329,1.77742,-0.434798,3.20784,3.1926,-2.12653,1.4207,-0.162939,1.57116,1.20026,2.14004,-4.36978,-0.074248,0.344989,-2.79157,3.57441,2.795,6.81971,4.61981,-3.15395,-0.556388,-0.951462 10181,10160.9,9253.62,-1.52186,-1.02665,-1.31765,-8.89055,1.45638,-6.40533,-8.20284,3.42071,6.34151,7.32703,2.81444,-5.56924,-2.07761,-2.82472,1.75969,1.56549,2.59032,-4.99642,-0.861721,0.661704,1.27294,4.24609,5.72265,7.93181,6.46356,-4.54558,-2.93302,-2.55741 10182,10163.1,9253.53,-4.12759,-5.01517,-1.383,-11.7032,7.03273,-0.354258,-4.14846,2.56836,5.49077,2.70724,-0.00938943,-7.91268,-3.33257,-3.77932,-2.70035,-1.95288,1.51899,-10.5021,0.604386,1.13765,2.8031,0.719838,5.10986,5.4321,3.01561,-5.05514,-2.51591,-2.29453 10178.9,10161.7,9255.33,-2.09727,-3.23639,-0.971464,-6.47564,-1.86208,1.47429,-8.69004,2.23012,2.64935,4.20852,-0.00802028,-4.11236,-1.54808,-1.73414,-2.21966,-2.31888,0.521142,-4.49634,-1.66003,1.37105,1.47741,-1.17943,3.52554,2.31201,0.381259,-1.24137,-0.930002,-0.860505 10176.3,10158.2,9258.8,-2.87976,-1.16821,-1.15587,-7.36873,-2.70663,3.69409,-6.23946,3.17083,3.67683,5.95472,2.6739,-2.5798,1.61294,2.31642,-4.31408,-1.6647,-0.422612,-6.13843,-0.39141,1.92345,-2.82275,-0.742784,1.68164,-0.706688,-1.87652,0.172975,1.51911,1.04727 10176.2,10155.4,9261.93,-1.79655,0.511159,-2.91648,-1.19976,-6.01265,2.43062,-4.91165,1.64787,2.485,6.04132,2.79139,1.36683,2.36631,4.70105,-3.09068,-0.875835,-2.73203,-1.04036,0.0279962,0.57264,-4.70596,0.399049,0.109101,0.540718,-2.52779,1.90878,1.47212,2.48712 10177,10154.3,9263.36,-2.06935,1.47151,-1.59814,1.1621,-8.21806,2.74994,-4.8666,1.6535,2.86737,3.56179,1.87379,3.98852,2.20191,7.00018,-2.12026,-0.322149,-0.459427,1.99009,-0.386875,-1.65524,-2.88602,2.5405,3.09752,5.52644,1.72241,3.28467,2.06659,4.48929 10176.7,10153.6,9262.97,-2.47996,0.0736981,-1.18826,-1.40068,-2.38119,-1.33094,-3.87199,0.498621,1.31667,-0.952908,0.481976,0.0885501,1.11339,4.67043,-2.37383,-2.32579,0.991108,-0.25346,2.41941,-1.44295,0.0394728,1.67752,2.73018,4.10445,2.29859,0.993454,2.7469,3.39394 10174.9,10153,9261.77,-0.957748,-0.455644,0.885525,1.7746,0.0437147,0.878291,0.0855234,-0.572903,1.39546,0.00119098,1.69176,-1.96049,0.156938,2.84845,-1.18488,-2.65197,1.35428,1.98606,1.65427,-0.643756,-1.03602,-0.0406435,-0.236011,-0.961959,1.28125,-0.464305,1.75539,1.84618 10173.4,10153.5,9261.3,-0.583682,-0.792331,1.36077,0.644185,-3.55594,-0.618864,-4.88099,-0.136266,1.51362,2.73872,3.65897,-2.63062,0.416981,0.735765,0.533665,-0.326252,1.0146,2.83848,2.16063,2.30307,-2.01136,0.638055,-0.22921,-3.19692,0.947596,-0.379132,0.678065,0.747812 10174.5,10155.7,9262.24,-0.685336,0.856591,-2.63545,-0.959601,3.25442,0.791955,-2.20612,0.263046,-1.34292,4.47114,2.99912,-2.56858,-0.21931,-1.56389,-0.808263,0.311028,-2.34261,-0.965718,1.98615,3.50723,-1.41951,-0.258476,-1.16227,-1.73014,0.372641,-0.118946,-0.422557,-1.3986 10179.6,10157.8,9264.01,2.59538,3.68921,-1.9033,3.99249,0.109215,-1.86778,-4.51336,0.591929,-1.29086,1.52475,1.01934,0.773735,0.0652847,-3.00075,1.79923,2.1369,-2.11635,3.17035,-1.87907,2.19309,0.880052,-0.480886,-1.94369,-0.204693,1.63785,1.43004,-2.081,-3.24652 10186.9,10157.6,9265.4,2.10402,4.02633,0.884264,0.1708,-3.27208,-4.9215,-1.0364,1.60796,1.70888,-1.43476,1.10519,1.26841,0.0627916,-2.97727,1.13683,2.82663,-0.301705,-0.592683,-3.81587,-0.70989,1.60855,0.103857,-2.48043,-1.22737,-0.312858,1.31617,-1.91269,-3.98886 10192.2,10155.4,9265.29,1.6824,4.26755,1.57687,1.43194,-5.98808,-2.25097,0.153789,0.168572,0.879003,1.68604,0.75956,3.65922,-0.869793,-2.49312,0.497574,2.41553,-1.34226,-0.127659,-3.59295,-1.56547,0.88849,-0.785242,-4.24845,-5.15572,-4.81836,2.77035,-1.44493,-3.44434 10193.6,10153.7,9263.38,1.6491,4.80854,1.08823,5.10222,-5.26833,5.52263,-0.997094,-0.959485,-1.52356,6.15147,0.897033,7.60472,-1.50848,-0.576994,0.845199,3.25263,-2.21353,2.36454,-2.11918,-0.480371,1.405,-1.24949,-1.88424,-5.50221,-4.39822,4.6832,-0.575266,-0.350337 10193.7,10153.5,9260.14,0.371243,3.4575,-0.922956,2.86612,3.70316,4.4652,-2.35097,-2.08567,-4.55866,2.05406,0.20181,5.48777,-0.851734,-0.932792,0.852325,2.66059,-2.76402,-0.836483,3.32512,2.58318,3.54953,-1.82575,1.03107,-3.58566,-4.1055,2.71087,0.64122,1.16036 10193.4,10154.1,9256.45,0.655998,2.95689,-0.961572,2.95967,6.90968,-0.0847335,-1.13659,-2.64581,-3.78971,-2.43015,-0.722449,3.08777,-0.234356,-0.603156,1.30068,1.14368,-2.23215,0.241084,3.91588,3.38796,4.07024,-1.08082,1.15617,-0.375163,-2.54369,1.29418,0.795869,1.31402 10190.3,10152.8,9253.2,2.59279,1.93007,1.93861,4.82647,-1.84288,-5.84018,-7.03235,-2.16958,-0.8999,-4.4747,-1.99497,2.40008,0.0349671,-0.825783,2.00993,-0.184404,-0.576706,6.30193,1.43455,3.63536,2.34484,0.148851,-1.22127,-0.718508,-0.716753,1.50537,0.412978,0.73252 10185.2,10148.2,9250.73,1.88291,-0.127643,2.41457,0.38457,3.28565,2.40364,1.07674,-0.352091,-0.192694,-2.80281,-2.45121,-0.746935,0.454781,-0.345492,-2.38393,-2.35152,-0.468918,-0.28004,0.207449,2.6636,-1.39254,-2.09536,-4.44811,-4.48824,-2.93117,-0.770421,1.19,0.219788 10183,10142.2,9248.93,3.78484,0.701338,-0.71552,3.48407,0.454755,4.3743,3.68099,-0.668556,-3.42636,5.52772,-1.23863,-0.405148,0.665698,1.06479,-0.0251586,-0.48849,-0.847741,1.4814,-5.36764,-0.405219,-1.51485,-3.88226,-5.12764,-5.33767,-4.3365,-1.173,0.417418,0.415356 10185.4,10138.4,9247.93,3.11727,0.196163,-2.018,0.721283,-2.5075,-1.06349,0.331823,-1.2182,-4.01712,4.78444,0.452166,-2.16432,0.55673,1.61447,1.16718,1.44415,0.569846,-0.812131,-8.14324,-2.91296,2.43154,-1.45218,-0.730675,-1.0947,-2.25658,-3.52675,-0.361214,1.09266 10188,10139,9248.05,1.52249,-1.16117,-2.4591,-2.41492,-0.35832,-7.48161,-0.0490082,-2.1421,-3.52013,0.903896,-0.958215,-5.8036,-2.36788,-0.368615,-1.88998,-1.40466,-1.28791,-4.79995,-5.58563,-3.57656,4.13739,-0.274441,1.53352,2.93946,-1.96753,-6.76034,-1.87752,-0.324793 10186.8,10142.9,9249.23,2.29541,-0.414867,0.263844,-2.42527,-9.23597,-12.7958,-5.40665,-1.3296,-0.255947,1.05195,-3.09731,-3.83996,-4.40177,-0.0123634,-1.79533,-2.22933,-1.59891,-1.58539,-4.29444,-3.24283,2.73497,0.939395,2.25632,3.98042,0.672842,-4.87272,-3.0871,0.140664 10183.8,10146.3,9250.93,1.04007,-0.107056,-0.719832,-5.17314,-6.41206,-13.4527,-3.51115,-1.82372,-1.0661,0.164654,-4.87432,-3.16371,-3.16216,0.547311,-2.31938,-3.32366,-2.59406,-3.07878,1.07584,0.135595,-0.15385,-0.198986,-1.76614,-0.364142,-1.44816,-3.17832,-0.666637,0.539005 10182.5,10148.1,9252.57,1.58315,0.552138,-2.38854,1.84879,-2.25441,-6.8381,0.208721,-2.73312,-3.19332,-2.49192,-4.21087,0.445019,0.0651566,2.67403,-0.780414,-2.43461,-3.10543,1.48742,-0.123359,0.0321366,-2.00728,-1.30717,-5.02137,-5.05394,-3.39985,-0.233706,2.10556,1.51466 10182.7,10149.6,9253.33,0.671616,-1.8801,-5.19861,1.6691,-0.386439,-6.73637,0.390118,-1.36276,-2.8229,-3.74619,-1.53148,0.15594,0.934737,1.96014,-1.35363,-0.924511,-3.00858,0.653744,-1.84706,-3.59509,-0.247233,0.962108,-1.40552,-3.28119,-2.22432,0.0626129,2.48273,0.969888 10182.9,10150.9,9252.01,0.0166707,-2.52456,-5.48285,2.26653,-2.03587,-6.50283,-1.00325,0.264499,-1.46362,-0.822672,-1.11829,0.403605,-0.734484,-0.382999,-0.186567,1.24812,-2.13095,1.80897,-2.82131,-6.15356,2.54337,2.39696,2.51379,2.41699,0.307725,-0.195503,-0.252349,-0.890546 10182.1,10151,9248.33,-1.21698,-1.52567,-2.334,0.102378,3.74418,-1.36756,3.51501,1.50357,-1.80774,-0.855037,-2.71284,0.0746735,-1.2904,-2.37263,-0.326812,1.37779,0.0811662,-2.04277,0.452769,-4.37491,4.60025,0.785458,0.944597,2.57121,-0.443829,-1.9031,-1.78376,-2.25217 10180.2,10149.4,9243.85,-0.498632,0.815261,-1.05027,1.32586,2.65892,-5.17029,-0.588453,1.63481,-3.33979,4.4087,-1.26981,2.01576,-3.03953,-3.66687,1.33091,1.62961,0.568999,0.53543,0.477935,-1.78405,3.91722,-1.12653,-3.07327,-2.27103,-2.21119,-0.0469714,-3.05949,-3.83303 10176.1,10146.3,9240.54,-0.464849,1.25223,-1.14736,-0.645201,4.96922,-0.805424,1.85313,1.43677,-1.45072,6.22509,1.54511,2.89442,-3.56094,-4.35854,-0.476689,0.39343,-0.929162,-1.07774,0.941846,-0.57756,0.363373,-1.13491,-1.30865,-3.06369,-1.8739,2.47973,-3.19611,-5.38414 10169.3,10142.4,9238.91,2.28739,1.91951,-0.759834,1.17008,-1.10807,0.137649,-1.76481,-0.427729,-0.592675,2.50623,0.607717,4.10404,-2.20382,-5.11375,1.80008,0.383348,-3.40396,4.33491,0.605228,-0.0871236,0.185566,0.480246,2.74078,1.48145,2.07534,4.96863,-2.65852,-5.78272 10162.1,10139,9238.14,2.03262,2.32633,0.46709,-2.26524,5.80967,5.85587,5.67759,0.185696,-0.246666,-0.787877,-0.201738,0.61348,-0.542043,-3.51173,0.345287,-0.426571,-4.01566,0.315299,2.10005,-0.391753,2.39343,1.28396,3,4.99164,5.3145,2.31592,0.0224444,-4.14279 10158.4,10136.9,9237.31,2.77556,2.83113,1.37245,1.19159,2.19923,-2.0116,3.1913,1.03754,-0.929092,0.870894,1.00256,-0.624392,-0.561338,-2.99529,2.23674,0.823539,-1.63024,3.75817,0.298891,-1.18515,4.54738,1.25951,1.91277,3.57793,5.44217,0.785618,0.025315,-3.27161 10158.5,10135.5,9236.37,0.0672571,0.761886,2.35427,-0.889999,6.73976,-1.98269,8.45302,1.1398,0.0604089,-1.15193,1.32222,-2.47069,0.131408,-3.48238,-0.669944,0.753279,3.07189,-2.04262,0.174304,-2.32107,2.83224,0.708328,3.23848,0.984911,2.384,-1.28385,-0.548071,-3.32946 10160.6,10134.8,9236.46,-0.783525,0.239203,0.00548465,1.88108,6.83171,-2.89703,7.27976,-2.71585,-1.47417,2.12383,-1.04536,-1.14095,0.145875,-4.3962,-0.139564,0.781551,3.40043,-0.28834,-0.343608,-2.36391,0.0938093,-0.36295,1.0276,-0.578692,-0.619797,-0.489157,-1.92106,-4.163 10166.1,10135,9239.02,0.124276,1.29463,-1.44975,3.21172,2.53479,-3.38317,-0.20102,-4.72755,-2.14129,5.53743,-1.24849,0.994366,0.436372,-3.09635,2.19121,1.13794,1.52365,3.0586,0.622146,-0.699363,0.103461,0.316277,-1.73095,-0.195395,0.490618,1.44514,-2.50878,-3.62472 10175.6,10136.9,9243.9,1.67228,1.70099,-0.125799,2.04051,6.74509,2.05118,7.82124,-3.08565,-1.70842,3.37127,-0.160655,1.32998,0.57087,-1.46351,1.80831,-0.585194,-0.267853,0.719624,2.12333,-0.931791,2.61407,0.519467,-1.78038,1.70819,2.66646,1.47407,-2.48388,-2.6294 10184.4,10140.5,9249.09,4.05746,1.49391,3.1491,4.74869,1.42089,-7.65297,4.6083,-1.50292,-0.681543,0.792377,-1.54194,2.19467,-1.449,-2.54459,5.38937,-0.0662613,0.683022,6.46847,-1.151,-2.09676,5.40097,0.0884146,-0.584039,0.411805,2.87021,2.70096,-3.69024,-2.72328 10185.2,10143.8,9252.71,2.20708,-1.9117,6.2705,-1.38994,9.88462,0.984595,14.8745,1.09177,3.01497,-6.59006,-3.06879,0.864155,-0.352553,-2.42934,1.6214,-0.899998,2.90809,-2.62154,-0.748965,-1.78716,3.1828,-0.76616,1.51574,-1.80336,0.759499,1.08543,-1.48814,-0.830864 10176.5,10145.2,9254.8,3.08758,-1.24415,2.30133,1.5123,4.9996,-2.25743,5.71269,0.326257,0.862459,-5.32366,-2.15784,1.98295,-0.769376,-3.24456,1.73394,-1.18022,0.303592,1.19388,-1.18318,1.1848,-0.484859,-3.12715,-2.31674,-4.16244,-1.41399,2.32149,-1.0187,-1.70219 10164.6,10145.4,9256.92,1.59078,-1.06701,-0.557541,-2.88977,3.22953,-0.245042,-0.474481,0.0498212,-1.16809,-8.33134,-0.306573,0.38113,0.242976,-2.39828,-1.29092,-1.68013,-0.127576,-1.94114,1.03024,1.7825,-1.44807,-2.86352,-4.13379,-1.78466,1.5241,1.16147,-0.513496,-2.30027 10156.4,10145.9,9260.21,0.0333157,-1.40254,-1.63643,-2.63202,2.15792,2.8366,-1.32406,-2.25364,-4.61227,-7.74587,-1.005,0.107792,-0.131513,-2.0428,-1.28031,-1.65736,-0.0589992,-0.767749,0.0451012,-1.23948,0.334266,-2.05544,-5.74107,1.40617,2.47259,0.129519,-1.22605,-3.50154 10152.5,10145.2,9264.25,-2.23854,-3.34598,0.871046,-4.48776,-5.12246,-0.367558,-7.49548,-3.04105,-2.99035,-3.84367,-2.67766,1.19195,0.695189,-1.99211,2.38266,0.800284,2.92667,1.82052,-0.796218,-1.82753,3.43662,1.60186,-2.49788,2.02216,2.59346,0.975508,-0.397427,-2.78437 10148.6,10141.1,9267.56,-4.64613,-5.4569,3.80281,-6.22039,0.554038,5.00519,-0.395733,-3.04225,0.570141,-6.95862,-4.49105,-0.00732036,3.78285,-2.09066,1.46914,-0.873643,3.95228,-2.08532,2.8568,0.749314,1.78963,1.02579,-0.808831,-1.60113,-1.17483,0.544949,1.95805,-1.27827 10142.4,10134.6,9268.73,-4.02228,-5.3818,4.39201,-6.57399,-2.68308,-0.146626,-0.297909,-1.28233,3.72363,-10.5635,-3.46562,-0.498293,3.92457,-1.10422,0.725311,-0.888612,3.1725,-1.82837,4.64182,1.32637,-0.56378,0.781271,3.29557,-0.557202,-0.712584,0.587691,2.76212,1.05325 10137.8,10128,9266.83,-2.98689,-3.62614,2.49614,-3.78405,5.33483,-3.24499,-1.4797,-1.49474,0.75769,-13.0722,-3.57543,-1.73535,1.13307,-2.81826,-2.67056,-2.75063,-0.407379,-1.38965,7.67619,2.2374,-2.93415,-2.1994,0.956463,-2.25511,-4.42128,-0.889014,2.30781,-0.144069 10139.6,10121.2,9261.84,-1.19244,-2.09691,-1.17019,-2.92359,1.84257,-9.64131,-8.2266,-2.48032,-2.29368,-7.41116,-3.60172,0.404837,-2.31741,-3.52505,-1.14341,-1.1367,-2.22469,2.93998,5.91064,0.841518,-1.68308,-1.06298,-0.398387,-1.68239,-3.53445,0.38234,1.02165,-0.403129 10146.2,10113.8,9255.3,-3.35595,-3.34535,-1.74811,-10.4556,3.60927,-0.776329,-3.08604,-1.29687,0.835023,-5.76979,-1.7646,-2.22816,-1.31439,-0.382083,-1.73312,-0.792276,0.206848,-4.1992,4.29806,-0.830575,-1.71405,1.40452,2.00247,0.106559,-0.768805,-1.08451,1.11784,1.22578 10152.4,10107.8,9249.87,-2.49869,-3.87311,-1.98238,-6.90342,-1.23671,2.90852,2.97754,-0.581043,2.81778,-2.71728,-1.21684,-5.07044,0.497485,2.01224,-0.365556,-1.64542,1.17956,-3.76085,-0.573467,-2.58111,-2.12663,0.378165,4.18795,1.24581,-1.36196,-2.87649,0.482267,1.63454 10154.8,10107.2,9247.27,-4.01788,-5.39388,-1.72161,-10.3153,-0.251037,-1.57831,1.61553,1.18147,5.7765,-0.599766,-1.22598,-10.0294,0.895145,2.02015,-4.45992,-2.58818,2.98391,-9.45103,-1.41902,-1.29446,-0.55725,-0.180421,6.94249,-0.594659,-3.53394,-6.50742,1.38112,1.51458 10153,10112.2,9246.76,-3.24249,-5.01072,-2.02956,-7.46567,0.0264794,-1.5224,-3.31193,1.53111,5.32332,2.5335,0.40251,-7.05633,-0.711568,2.89381,-5.39998,-1.36446,2.04786,-7.02942,-4.53297,-0.88262,-0.357391,0.595822,6.5409,-2.84395,-2.64994,-5.7378,1.39939,2.97985 10148.7,10119,9246.16,-3.96002,-4.42756,-3.26432,-8.69557,4.03628,0.616301,-3.92147,2.76458,1.652,2.17356,4.22927,-4.5247,-2.33417,3.89508,-5.29918,-0.309883,-0.288513,-8.36711,-3.09529,-0.126421,-1.8539,2.38545,3.61409,-1.26649,0.429596,-4.19612,1.45711,3.95651 10145,10125.2,9244.17,-1.75695,-0.511195,-1.73883,-3.34742,-1.26592,5.24499,-3.03549,2.78645,-2.1334,0.220919,5.88292,0.160927,-1.7455,5.37331,-1.59599,1.91312,-0.631146,-3.16886,-2.94994,0.34822,-3.01289,2.84951,0.356135,3.47859,4.18276,-0.12287,0.984563,3.64398 10143.1,10130.2,9241.27,-1.71615,1.12867,1.04805,-6.57347,2.41341,16.2593,7.00371,0.924589,-2.71609,-6.2656,3.57183,0.37743,1.96421,5.66573,-2.3041,2.26799,0.668846,-8.32571,2.30148,2.66333,-1.75615,2.71555,1.44408,6.00224,4.85886,0.685304,3.03234,2.82015 10140.7,10134.4,9239.05,-1.25992,2.46902,-0.556969,-2.76672,5.45596,12.4649,8.36959,-2.49709,-3.8708,-1.40646,1.38854,1.37064,2.12007,3.84209,0.459629,2.15086,-1.24194,-4.15365,4.52043,5.4809,0.876317,0.656659,-1.01116,2.09458,1.65028,2.77599,3.21635,0.381243 10133.6,10137.8,9238.32,-2.22442,1.37094,-0.787327,-1.05469,3.55443,5.14715,-0.0509983,-0.0905216,0.72894,3.96149,2.38061,1.75467,3.09083,4.18358,2.79613,3.29833,0.325666,-0.671704,6.07566,7.72379,3.13564,0.655668,-2.59152,-1.76199,1.58102,4.45884,3.34631,0.480564 10121.1,10140.7,9238.2,-2.17367,-0.866588,-2.79273,0.692199,10.1863,9.97874,6.04483,2.66482,1.76948,2.61332,1.9281,-1.1243,5.03132,3.85731,-0.443337,0.284932,-0.868815,-3.31091,8.51065,6.49177,2.23459,-1.67042,-3.77735,-2.781,-0.902713,1.50205,4.04064,0.197185 10110.8,10144,9237.47,0.303664,0.966366,-2.65365,4.69141,3.98147,5.09796,4.57488,3.26927,0.562439,5.41174,1.92471,-1.15766,3.6349,2.42314,-0.0874924,-0.0560302,-1.22366,1.9914,3.44357,1.69106,1.98031,-1.32375,-0.576816,-1.03349,0.269332,-0.300454,3.28264,-0.458562 10110.3,10147.7,9235.48,1.28867,0.940385,2.1165,-0.581377,-0.643187,-2.16313,1.69237,2.47912,1.37859,3.32286,1.26412,-0.720553,2.36863,-1.25903,0.0706914,0.944374,2.2859,0.229574,1.5842,-0.12766,4.43122,1.34327,3.34673,-0.404948,2.87655,-1.67866,3.04869,-0.25307 10116.7,10150.7,9232.33,0.394714,-0.833445,4.94793,-6.11826,9.22151,2.99358,11.1041,1.15853,2.93899,0.397365,0.0221406,-0.0976144,-1.13452,-3.42557,-3.72862,0.476803,3.69054,-8.12164,2.48493,0.363106,3.87676,0.504363,0.972674,-1.44388,2.15926,-0.828986,1.75931,-0.549928 10121.4,10152.8,9229.14,1.29508,-0.757006,3.12597,-1.6729,7.62364,-0.936804,6.48918,-1.03742,1.86227,-0.262351,-0.75051,2.31301,-4.8422,-4.5034,-2.66476,0.578808,1.27532,-2.04282,3.45288,3.01897,0.564668,-1.21876,-3.06331,-2.70583,0.257935,3.52846,-1.56111,-1.5308 10121.6,10152.4,9226.86,0.677648,0.378414,1.31475,-2.61018,4.91454,0.37514,2.86121,-0.193973,1.93324,-4.63591,1.10695,3.14457,-2.96694,-2.19304,-2.99025,0.50097,0.165722,-0.200595,6.85438,4.63234,-2.47705,0.342532,-1.30419,-0.141339,1.63084,4.32707,-1.19328,0.76139 10120.5,10149.2,9225.49,0.499478,1.88224,-2.14427,-2.77288,10.6927,1.71766,6.49787,0.43981,0.0705592,-5.13201,2.57263,1.48076,-1.20267,-0.591255,-4.74193,-1.79266,-1.46188,-3.42451,8.04316,3.54243,-2.30088,0.0710442,-2.83238,0.653942,0.240506,0.904871,0.430945,1.6283 10121.2,10144.8,9224.89,1.35965,2.80608,-1.94166,1.75583,0.26227,-8.26437,0.567312,1.6259,1.60009,0.0627174,2.62631,2.65738,-1.31444,1.36503,-0.138702,-0.303116,1.07964,0.805711,0.6712,-0.0379901,0.596301,1.49046,-2.9437,-0.0854658,1.7116,1.14138,0.19577,2.11315 10121.7,10140,9224.64,-0.625981,1.46152,0.571473,-0.708952,-3.97306,-7.60183,3.54876,2.52756,3.43643,-3.37318,1.25185,1.95327,-0.430742,1.99167,1.38528,0.439469,3.35733,-3.21518,-3.33649,-3.33716,1.63613,2.87364,0.216347,-1.19264,2.34646,1.38095,0.250252,2.26893 10117.5,10135.7,9223.59,-0.644241,3.50756,1.18011,1.32346,-4.09529,-1.15572,8.91836,0.864807,0.810206,-4.21922,0.85698,1.54667,-0.984211,1.49262,0.424346,0.272079,0.55043,-3.11065,-4.92549,-5.21789,0.616593,0.933381,0.453042,-0.907799,0.816878,0.888407,-1.07882,0.897744 10109,10134,9221.44,1.24811,3.97674,3.11247,-1.16572,-9.20759,1.26864,10.07,0.861166,0.629341,-5.07074,1.84156,0.554677,0.501606,2.3508,-1.99158,1.42546,-0.0624237,-4.75601,-4.11731,-5.27973,3.12042,0.927954,2.01431,1.91643,2.26937,-2.42322,-1.85499,2.11246 10103,10135.6,9219.87,2.2046,4.10281,1.87105,-2.44462,-1.81059,2.73657,16.517,1.49188,0.862687,-1.50652,2.91423,-2.27191,-0.311967,3.16828,-6.05317,-0.647296,-0.600809,-9.86797,-3.317,-4.05579,3.51099,-1.77799,-1.17227,0.17711,-2.12588,-5.86398,-2.08211,1.43944 10103.9,10138.7,9220.3,3.77174,5.49059,1.2637,1.03751,-12.6254,-6.24364,0.90728,3.65224,3.71822,2.59825,4.31988,1.86088,-2.62582,4.43061,-1.00461,2.10803,1.47555,-3.28777,-8.18549,-4.31695,2.95113,-1.34785,0.676274,-1.38936,-3.04336,-1.37001,-2.35773,2.00922 10108.6,10140.8,9221.82,-0.70593,3.90046,-1.14247,-3.0764,-1.47295,-1.10809,-0.510284,3.79285,2.60078,-1.28697,3.77566,2.32766,-3.54475,2.99719,-1.20306,1.33262,-0.719923,-9.06449,-7.33119,-4.80493,-0.721145,-2.4024,1.79362,-1.97223,-5.04385,0.0875954,-1.73778,0.950888 10113.1,10142.1,9223.55,-1.06377,0.843971,-1.44889,-5.32939,2.69029,-3.83385,-5.63119,0.535717,-1.61039,-5.59267,1.26514,2.05707,-3.31026,-0.958826,1.33732,1.46551,-3.13585,-9.66605,-6.00234,-4.35532,-0.26599,-0.831562,2.98878,0.128679,-2.54674,-0.278737,-3.58409,-1.324 10120.7,10142.9,9227.01,3.56995,1.04759,3.75113,-1.7421,5.12807,3.1454,2.38504,-1.62768,-2.93793,-5.71266,-0.530001,2.84448,-2.04436,-1.31251,2.17243,2.11298,-0.867238,-7.66197,-6.87331,-3.32769,-0.373459,-0.116178,2.03689,0.379397,-0.00605166,-0.182103,-4.1657,-1.22794 10135.1,10142.1,9232.63,4.13322,3.14571,5.42112,-9.50857,6.61076,-1.5265,-1.3563,-0.229734,-0.953633,-2.39287,0.0907423,-2.25912,-2.95494,-0.622513,-0.878638,3.11006,2.20909,-12.7591,-4.65267,-0.652931,-0.508727,-0.484787,-1.43884,-3.89903,-1.68783,-1.20607,-1.47415,-0.30987 10150.6,10139.9,9237.26,7.08686,7.1115,3.05908,-7.31514,-2.75139,-6.15754,-6.75994,1.34201,0.583247,1.72791,0.0586144,-1.05549,-2.23348,1.35232,0.957745,3.9225,0.27845,-7.28043,-8.71747,-3.21629,1.12263,-1.08286,-3.72117,-4.10901,-0.817087,-0.319549,-0.171801,1.86899 10161.3,10137.9,9238.2,5.45348,5.872,0.0360833,-8.71486,1.68904,-1.57501,-9.84544,2.70784,2.39605,-1.45535,-0.548901,-2.93743,2.31592,2.21738,-0.0678836,1.75621,-1.90485,-7.83172,-5.34721,-0.902631,2.89369,0.938874,1.08004,0.946796,3.39736,-3.2386,1.23533,3.43628 10168.7,10135,9236.89,1.9988,3.16081,-0.959961,-1.65775,15.8147,12.2058,-6.43511,1.69639,2.59198,-2.06327,-0.47323,-4.35241,3.77438,3.79233,-2.16153,-2.08622,-2.56136,-3.89096,-0.736348,5.49778,-0.475583,0.770127,3.05002,3.17719,3.81221,-4.99556,1.59718,3.01185 10178.3,10131.2,9237.28,0.818385,-0.233269,1.46873,6.63122,10.9706,17.5879,-3.54675,0.677416,3.72244,0.655626,-0.201865,-1.16835,1.57109,5.42876,-0.444523,-1.12764,-0.256929,5.62565,-1.99386,6.4084,-2.47406,1.18593,3.2834,3.0293,3.51573,-2.53776,0.959038,3.23253 10193.3,10130.2,9242.16,-2.48525,-2.35837,2.98987,5.98816,11.4719,15.9039,-4.84232,-0.825315,2.54659,1.43064,-0.659643,-2.96556,0.571285,2.41784,-2.00371,-0.757574,1.41844,6.37057,1.42823,7.71148,-4.93994,-1.54988,-0.232174,-1.34349,-1.26249,-2.05601,1.26179,0.464125 10210.2,10133.3,9250.5,-0.302459,-1.69801,0.843368,2.30597,6.15326,11.0157,-5.9274,-1.05244,-1.68469,-0.278629,-0.694935,-0.891837,1.23651,-0.21345,-0.305015,-0.0987808,0.160233,4.91775,0.166271,3.92353,-3.88399,-2.55526,0.198425,-0.923912,-1.86728,-0.552523,1.22445,1.15572 10221,10137.3,9258.6,-1.56339,-0.256664,0.840544,-1.61826,11.0061,14.4706,-2.59098,0.449882,-1.65171,-1.89163,-1.35949,-1.40198,3.60618,0.270121,-1.02351,-1.1912,0.778059,-0.110922,0.867721,2.27546,-5.20223,-2.14642,1.17716,-1.36266,-2.51971,-1.10085,2.42789,2.32548 10222.9,10141.6,9264.61,-4.74868,-0.212232,1.05283,-1.29221,10.744,4.75459,-2.81401,0.644295,0.850172,0.179994,-3.01777,-4.30435,2.71079,-1.12735,-1.29174,-2.07496,1.34575,1.0376,2.5823,1.95702,-4.5778,-1.28586,-0.494008,-4.39926,-5.46478,-2.40477,1.70545,-0.546783 10222.5,10148.7,9269.02,-3.49502,-0.678579,-0.213247,8.06515,8.4472,0.736921,12.8231,-0.680516,1.09355,1.44143,-3.62765,-2.08929,0.194595,-2.35671,-0.392866,-2.86869,-0.655593,6.76095,0.52286,-1.94996,-0.69629,-1.94695,-3.05311,-3.36287,-5.8798,-2.04553,-0.962602,-2.08692 10226.3,10155.2,9271.48,-1.96969,-0.131236,-7.34816,10.3469,1.43629,-18.1274,6.28789,-1.94889,-4.21799,9.10578,-0.96868,-0.513386,-5.07894,-4.75252,3.07715,-1.21549,-4.62974,12.6049,-2.11208,-4.5134,4.07597,-2.26695,-5.31607,-0.080814,-4.75562,0.0499323,-2.60796,-2.05158 10230.1,10151.7,9270.27,-0.441668,1.99564,-2.24149,10.4542,-4.09391,-6.45561,-1.77752,0.712394,-1.02642,8.25875,2.54249,4.31177,-1.67116,1.28898,3.90167,2.27301,-0.292013,13.1856,-3.31394,-4.23242,0.509949,-0.582218,-1.55254,1.54596,0.383257,3.15094,0.659781,3.83919 10224.9,10138.7,9266.49,4.67287,5.1299,-1.26323,13.4301,-10.2745,-9.49416,-12.2719,-1.18436,-2.87586,6.16837,2.83569,6.07774,-2.8315,2.00898,6.40272,2.01559,-1.86315,15.8694,-4.72684,-3.25468,-2.65905,-3.311,-6.24296,-4.21139,-3.70695,4.80612,0.395122,1.76566 10212.8,10131.4,9265.67,3.01888,4.86272,2.80549,9.41976,5.08199,16.7307,3.01517,-1.39232,-0.901598,-3.17761,2.70511,2.89126,0.206015,2.09237,1.79821,0.427067,-0.286912,4.97158,1.88506,1.52106,-4.78901,-3.10639,-5.19696,-1.88352,-1.17405,1.76068,1.66502,-0.462334 10205.3,10137.3,9271.29,5.0191,6.44861,-1.029,10.2232,1.46143,6.79866,-7.1328,-3.52906,-8.32347,-3.93806,2.03961,4.301,-3.73195,-3.92217,6.44854,2.90593,-2.49697,11.4551,-0.562561,1.57056,0.711111,-0.350636,-4.25263,3.76126,3.75639,3.70316,-1.79131,-3.47622 10205.7,10147.7,9278.59,5.83546,6.36501,-0.202118,7.16455,-12.9828,-12.4607,-27.3389,-3.33415,-9.60681,-6.26496,-0.539386,6.78879,-3.91681,-6.10831,9.8609,6.12423,0.502419,17.71,-2.72276,0.90307,5.89102,4.35576,1.47131,6.87862,9.08531,6.44279,-3.45175,-1.92878 10205.4,10153.7,9279.43,2.61204,3.79426,2.8599,4.2373,-6.30104,-6.55433,-17.9117,-2.30217,-4.33352,-8.56342,-2.54108,4.06241,-0.221565,-2.25183,3.87958,2.42384,1.7425,10.0636,-0.274803,1.38918,2.9688,2.49859,1.85002,3.57782,5.56749,4.25356,-1.57246,0.769565 10198.3,10155.2,9271.53,1.79363,-0.436721,3.46418,1.17919,-6.21503,-12.0337,-14.7144,-0.753172,-0.422946,-10.0673,-1.05729,0.16841,0.00393219,0.329848,3.06417,0.641188,1.13987,4.50086,-1.96838,-0.158451,2.22687,1.01485,-0.617827,-1.82684,0.837829,1.35672,-0.969077,2.83866 10187,10154.7,9258.9,0.357944,-3.85399,-0.403587,-0.905802,-6.94279,-16.6984,-17.7781,-0.22625,-1.87358,-4.80273,-0.208291,-3.41762,-1.38116,-0.435891,4.56144,1.47257,0.881539,4.31043,-2.35524,-0.63135,2.49929,2.73787,-0.3439,-0.967951,0.479767,-1.25236,-0.198644,2.70849 10175.5,10150.8,9245.55,-2.22289,-4.64417,-1.57873,-3.37822,-3.35046,-9.88201,-14.3071,0.168661,-0.756661,-2.69992,-1.57269,-4.61371,-0.741804,-0.794809,1.95045,1.34471,1.90438,0.670421,-1.36383,-0.0207592,1.95603,4.44548,1.70081,0.896225,1.96219,-2.68814,1.37985,1.21966 10163.9,10144.5,9233.39,-1.0609,-3.6573,-1.22008,-1.66234,-8.72059,-9.8591,-9.71449,-0.237702,2.4907,-0.383432,-2.45784,-2.52105,-0.451308,-0.95008,0.101755,0.998499,0.0147502,0.763548,-2.08901,-0.286814,2.08671,3.24587,1.98374,-1.03823,1.41551,-1.64013,0.866956,-0.452541 10152.5,10140.9,9224.11,1.58528,-1.3177,-2.21666,-0.770113,-12.1162,-14.2306,-0.877621,-0.372338,1.62768,2.76293,-0.69447,0.389726,-2.24466,-0.492948,-1.07534,1.2119,-2.84085,1.62365,-4.58137,-3.47859,2.38127,-0.58689,-1.20067,-5.12188,-1.38938,0.191315,-1.00868,-0.231626 10144.9,10141,9218.45,2.9188,-0.174985,-4.58083,-6.94645,-12.0718,-23.1781,-6.27315,-0.364715,-3.24703,1.70145,0.993811,-0.598274,-3.56103,-0.759525,0.496704,2.46032,-1.89983,0.597576,-2.01394,-2.93857,4.73883,-0.682548,-1.34504,-3.70636,-1.23983,0.0550942,-2.01066,1.58053 10141.8,10139.7,9215.32,1.06474,0.421951,-5.29652,-9.2234,8.36446,-5.7284,0.960531,-0.909556,-4.90704,0.770291,1.54135,-5.62095,-2.20122,-1.09503,-2.35206,-0.974175,-1.0101,-7.23319,3.01594,0.768168,2.39478,-1.32615,-1.6404,1.53725,-1.51813,-3.97654,-1.7665,0.833795 10141.4,10134.3,9214.23,0.86273,1.35397,-0.657898,-4.72598,2.71892,1.93911,-8.71178,0.127278,0.812447,5.14689,3.34014,-5.47575,-0.124804,-2.70815,-0.541837,-0.600256,1.53834,-3.53843,0.0605411,2.43643,0.689316,0.936364,1.45495,3.58725,0.917646,-4.12549,-2.16127,-1.91164 10145.6,10128.8,9217.09,0.035273,1.26692,3.11502,-4.96307,-6.78084,1.02172,-8.79811,2.69846,4.94751,11.3598,6.51275,-2.0705,0.657905,-2.59061,-0.35795,1.18908,3.42851,-3.05799,-3.41004,0.806424,0.399374,2.92706,4.4301,0.273598,0.553543,-1.76552,-0.755718,-3.46001 10157.5,10128.8,9225.31,0.248702,0.312336,2.57768,-4.36878,-7.1619,-0.049009,-3.2758,2.7151,1.99544,11.1247,7.80862,3.2311,1.05086,1.13953,0.117826,1.5885,2.6575,-2.74279,-2.82058,-0.206648,1.25493,1.71967,2.81266,-4.13773,-2.45207,2.50385,0.789243,-0.268176 10170.7,10133.1,9236.11,-2.23675,-0.885477,2.34602,-6.30375,3.19378,12.3402,5.26964,2.51006,1.86666,4.33237,6.63528,4.85198,3.48519,8.46812,-2.52066,-0.634166,3.57125,-6.40349,1.46869,0.818123,-1.68738,1.2743,1.91738,-0.951766,-0.403311,4.63843,3.18061,7.04436 10176.7,10136.2,9243.78,0.782244,0.338989,-0.179665,0.677035,-11.8864,-9.98092,-16.6014,-0.0876104,-1.39338,0.511794,2.05749,5.37285,2.64871,7.7119,4.8232,-1.23349,2.56586,8.98335,0.643413,1.73431,-0.63479,2.49537,-0.600719,2.26345,1.69812,6.71431,2.31721,8.10433 10176.8,10136.6,9245.84,-3.20567,1.13405,3.92668,-1.78597,-0.236073,-2.19382,-11.4115,3.08973,1.33702,-3.27145,0.727769,-0.100717,5.38921,8.19297,0.492232,-2.20151,5.25989,3.6589,4.08819,2.21554,-1.32513,3.54291,0.119275,3.23854,3.862,2.19948,5.28701,6.25834 10178.4,10137.4,9245.74,-5.53585,0.420645,5.85295,-4.47724,14.54,12.4497,8.36972,4.99424,2.57479,-4.3639,0.677018,-2.6813,6.67898,7.5884,-5.54187,-1.3688,4.05586,-6.15054,4.2909,-0.899213,-1.24567,1.90686,-0.469126,1.72139,5.00978,-1.65339,6.96518,3.71489 10184.8,10141.1,9247.89,-4.95644,-1.91401,3.7243,-7.95873,7.49028,6.40526,5.31843,3.53676,4.4376,-3.95261,0.746514,-2.92295,5.17495,5.09822,-5.56387,2.13589,1.74219,-7.51099,1.13636,-2.24892,-0.712168,1.40767,0.401594,-0.663717,6.22808,-1.51586,5.59537,1.86444 10195.1,10147.9,9253.27,-3.98,-3.06823,-2.05534,-6.10099,3.83685,4.55708,3.92119,0.928846,2.49159,0.0763172,1.14792,-2.88509,3.3624,3.14131,-4.76678,1.53759,-2.49281,-5.00974,0.3227,-1.57677,-2.36177,0.558465,1.76223,-0.153596,3.21585,-0.248642,3.44061,1.09292 10206.6,10155.3,9259.98,-4.64998,-1.64546,-4.6585,-6.92405,-1.23826,-1.4651,-7.80907,2.03872,0.322905,5.35637,2.9557,-1.90346,0.941137,2.90995,-2.25745,1.6362,-2.73525,-3.06893,0.361893,-0.410406,-1.95298,3.18373,4.96997,3.18307,2.09522,2.29277,1.29516,1.46329 10215.1,10159.8,9265.65,-5.64262,-2.22323,-2.32616,-8.62966,1.24852,3.53986,-7.11813,2.5704,-0.221435,0.41167,0.765415,-1.44792,2.10023,1.14341,-1.90736,0.761342,-0.0657556,-6.90094,4.60419,2.00852,-1.1143,4.44335,7.23913,4.6059,2.18355,1.92624,1.0442,1.06642 10218.9,10161,9269.98,-5.54728,-2.69742,0.623383,-4.54971,5.62832,12.115,1.60837,0.527375,0.225195,-4.35554,-1.09064,-1.69716,2.68584,-2.42078,-3.28377,-0.48855,1.46337,-7.59929,7.41232,3.78152,-1.52786,1.12019,5.14455,0.902689,0.791392,0.171231,1.01653,-2.1951 10225.1,10161.4,9274.87,-4.18459,-1.40959,4.0543,-3.78563,4.56469,13.1486,7.4468,1.32559,4.01602,-4.26528,2.47676,-0.706977,1.49841,-2.44619,-4.48237,0.314642,3.21848,-7.78537,6.45365,2.67192,-0.518631,-0.579868,3.1551,-3.30298,0.42352,0.385421,1.09082,-3.38628 10238.6,10163.7,9281.72,0.163978,0.29531,1.39945,-1.88245,0.770367,3.01996,6.47156,0.843119,3.05229,-2.89342,3.69162,1.01002,0.156961,-1.63668,-1.88068,0.459627,0.572044,-3.8789,6.07964,1.73877,1.04155,-0.952277,-0.352698,-3.89818,-1.13337,1.63306,0.655322,-3.05775 10252.3,10168.8,9289.58,1.69242,0.803041,0.969081,-1.57571,10.1963,10.1486,9.01137,-0.23779,2.45598,-11.8335,0.764195,0.347471,0.63322,0.818036,-2.67947,-0.48707,-0.0121974,-5.92175,4.75178,1.31186,-0.59319,-0.865273,-2.13114,-0.629395,-0.22624,0.187864,0.687159,-1.38416 10258.4,10175.1,9296.44,0.693656,-1.47018,1.57507,-4.07861,13.9151,7.913,3.87705,-2.41045,1.40643,-18.8401,-3.38044,-3.78137,0.444306,-0.142111,-3.19856,-0.633983,1.26609,-6.96487,4.03731,1.86282,-0.255938,0.885239,0.576534,4.16798,1.48633,-2.91027,0.44246,-1.26861 10259.2,10179.7,9301.13,-1.11281,-2.9356,3.48279,-4.07376,14.5961,4.75668,2.95063,-2.50321,1.99968,-15.2573,-3.94817,-6.19421,0.994523,-0.409685,-3.36826,-1.30752,2.89435,-7.11783,2.3961,1.75016,-0.287404,0.839505,2.32354,3.16514,0.431073,-4.23834,0.224613,-1.13459 10258.9,10180.8,9303.2,-3.70956,-2.93593,3.76222,-6.98265,14.1006,4.36509,3.13521,0.524873,3.4745,-8.19672,-0.812591,-7.54285,2.87285,0.165482,-4.34303,-3.00502,3.10194,-11.8146,3.48326,1.87454,-2.39007,-1.71717,-0.0308325,-3.00344,-3.10099,-5.07511,0.999296,-0.291248 10259.7,10178.9,9302.61,-2.50722,-0.863499,1.6361,-7.29671,5.65875,7.35687,6.74534,2.86707,2.5541,-4.10002,1.92641,-4.21325,3.79643,1.11564,-2.85299,-3.384,0.718232,-13.5344,2.15514,-0.378278,-3.09826,-4.48668,-4.09564,-6.07121,-4.62941,-4.63714,1.35609,1.33932 10264.3,10176.2,9300.58,-1.50986,-0.476834,0.153861,-9.03392,2.34462,9.76008,11.2624,0.958254,-0.70443,-6.3101,0.886002,-3.04957,4.20237,0.687347,-2.59931,-4.30057,-0.344332,-15.3463,3.30618,0.212706,-1.83037,-5.39362,-6.37009,-5.79293,-5.6463,-5.17005,1.45394,1.2199 10270.2,10175.5,9299.06,-1.8193,-1.62584,1.49621,-15.2891,-0.19176,0.694336,7.97111,-0.906134,-1.88497,-6.47048,-0.900237,-3.70282,1.23614,0.322582,-3.93212,-3.45866,1.71962,-16.8955,0.58688,-0.409914,-0.259588,-2.68512,-3.64588,-3.35838,-4.51583,-4.19392,0.240148,0.159851 10270.2,10179.6,9298.63,-1.90388,-3.42457,3.36972,-15.5947,6.83754,-2.72512,7.96959,-1.26132,-2.35887,-7.13988,-3.00989,-4.84946,-1.32472,-2.90407,-7.21556,-3.99747,1.63284,-18.121,1.49353,-0.486008,-0.289734,-2.44221,-2.61409,-4.74746,-6.81336,-4.22186,-0.397997,-3.01155 10263.1,10186.3,9296.94,0.1046,-2.95923,0.55802,-3.53552,11.956,6.06043,20.0157,-0.175478,-1.81809,-1.77528,-2.10279,-0.283075,-3.48288,-4.09089,-6.41457,-3.4926,-1.98205,-11.2644,1.51324,-2.56718,2.01317,-3.17178,-3.03644,-4.28621,-6.82533,-2.57386,-0.732198,-4.52782 10250.3,10186.7,9289.82,0.787893,-2.63004,-4.83671,4.59987,9.90165,5.11396,20.1712,-1.49013,-0.900383,3.2704,-1.38302,1.01612,-3.51797,-3.65748,-2.01906,-2.31487,-4.58178,-0.663723,4.99631,0.0846666,6.20019,-1.32911,-0.366123,-0.708005,-3.05462,-1.4169,-1.33549,-4.03837 10229.6,10174.2,9276.51,2.92922,1.43172,-8.45959,7.92191,9.82817,0.906035,15.1761,-5.66535,-4.80598,8.92318,-1.50732,0.863702,-4.19618,-1.72605,1.43049,-1.60336,-7.78679,7.9456,2.20311,0.976306,4.6808,-2.0774,-1.41618,1.52784,-1.00485,0.251303,-2.51818,-3.24837 10203.9,10154.8,9263.01,1.97737,4.88419,1.86761,-1.89071,16.8831,21.8027,18.6752,-2.85592,-0.407409,1.1857,1.57668,2.90834,1.42619,5.01683,-2.88862,1.13125,-1.02838,-3.77013,-1.83294,-0.874118,-1.82318,-1.06152,0.617181,1.34269,3.38069,1.15764,1.12216,1.38647 10184.5,10141.2,9256.68,5.24597,7.64832,2.18557,1.58328,4.92602,9.28816,-0.0172234,-2.70209,-2.36954,2.63625,2.45988,6.65341,1.30855,2.45772,0.884071,4.15289,-0.306199,0.501745,-3.91598,-0.843063,-3.78083,-0.751671,-0.908618,-0.353576,1.46737,4.59599,1.10914,-1.05414 10178.9,10140.4,9258.57,8.5511,8.38576,-0.704081,10.0442,3.87995,9.53107,4.06474,-2.33977,-3.33414,3.45052,0.769206,8.44243,0.151836,-0.110094,2.50423,3.89258,-1.86971,4.86933,-2.34618,0.208276,-3.54318,-0.382483,-0.444637,3.17545,1.86638,6.31308,-0.0788599,-2.11239 10182.7,10148,9263.52,7.664,6.75263,-0.540997,5.42972,-5.04193,-7.98425,-8.29464,-0.166299,-0.588527,3.31557,0.500806,4.72146,-2.51571,-1.43305,5.52369,5.671,1.03703,8.03067,0.0463032,4.16527,0.993743,2.27,2.01907,5.48701,6.28587,6.50446,-0.915646,-0.555951 10185.6,10156.6,9266.64,4.26252,2.60407,3.65205,1.35764,1.93964,-1.71464,3.62386,0.664968,2.07164,-1.84774,-1.41728,2.03742,-1.93901,-0.955849,2.55509,2.24827,3.4143,2.08534,1.52467,4.36357,2.40504,-0.149419,1.87333,2.56701,3.76988,3.58853,-0.290298,1.53656 10182.8,10164.1,9266.99,3.44774,1.00051,3.58435,5.06036,-3.20427,-1.32409,2.16178,-1.24869,0.986594,2.68824,-3.10496,3.75494,-3.03899,-1.36189,2.85639,-0.797041,2.25309,6.84226,-1.01807,1.45026,1.64915,-1.77668,1.47461,1.32051,0.0174875,3.15498,-1.91103,0.915561 10177.6,10169.5,9265.47,2.97062,0.742454,2.19308,3.39405,-10.2555,-6.11354,-8.35604,-2.29312,-0.492631,4.2024,-2.46282,2.85236,-2.05854,-1.07623,3.34902,-1.67951,1.43015,9.72371,1.0556,1.2093,0.0329592,0.933345,2.62882,4.14907,1.43657,2.25242,-2.21302,0.424466 10175.1,10171.1,9262.53,2.78573,0.66686,2.0545,2.76769,-2.38316,1.38611,1.33538,-1.98843,-1.22362,0.719734,-1.48276,0.571928,-0.303568,1.13172,0.533248,-2.57485,0.218063,4.75694,4.12677,1.25451,-2.29974,1.77459,2.18864,5.66448,2.31972,-0.197648,-0.423422,1.24127 10176.1,10170.7,9258.49,5.31438,0.737423,2.23937,7.15555,-6.03862,-6.93885,2.59027,-2.08985,-1.82474,1.76361,-1.51506,2.40133,-2.94977,1.13326,2.34185,-1.4691,-0.319475,6.55378,0.151184,-0.820336,-1.03183,0.737373,1.0173,1.60097,0.120988,0.706961,-1.06361,1.61191 10177.1,10171.1,9253.43,5.27989,0.124242,0.594136,6.40228,-14.4792,-17.9873,-7.83873,-2.70593,-2.84279,6.19952,-1.02819,4.22035,-3.89328,-0.655654,4.6427,-0.543649,-0.312946,7.67303,-3.34568,-2.99026,0.892734,0.193866,0.437901,-1.37172,-2.06494,3.10779,-2.09072,0.969194 10175,10171.9,9247.28,2.27598,-1.11333,-0.371999,2.70022,-5.44405,-1.24932,2.95574,-2.54561,-3.07604,2.81372,-0.48024,4.11824,2.04907,-0.370621,1.24343,-2.71039,-1.27809,-0.906837,-1.29061,-4.80376,-0.177684,-0.68347,-0.0356975,0.976652,-2.58184,2.60538,-0.53245,1.0079 10170.6,10171.1,9240.98,0.484599,0.0646839,-1.51326,2.89899,-3.4319,-0.213982,2.47953,-0.834731,-2.00581,5.72898,0.227883,2.67222,2.27602,0.0505934,1.31844,-2.26552,-2.6972,-0.975391,-0.869576,-3.70984,-1.26158,-0.292123,-0.590846,2.58737,-1.84822,1.62378,-0.526111,-0.491878 10166.9,10167.6,9236.09,0.964725,-0.0392702,-0.079079,4.19696,-8.77705,-7.3393,-5.33084,1.7816,1.00552,6.00308,-0.645333,1.80016,-0.345783,0.537513,3.29513,-0.258503,-1.94323,3.02276,-2.07851,-0.708951,-0.985472,0.42465,-0.0047685,-0.0149723,-1.37113,0.550535,-0.779034,-0.484969 10166.1,10161.5,9233.6,-0.598547,-1.76595,-1.06041,-0.952044,-3.22733,-6.25839,-1.71002,3.5389,3.14678,2.52469,-0.94774,-0.697306,-1.82073,1.8162,-0.398189,-0.0962201,-1.17773,-3.11075,-1.86249,-0.148137,-0.912351,0.0729367,0.372787,-1.52491,-1.99794,-1.67208,0.753712,1.02245 10167.9,10154.5,9233.85,1.32924,-0.579085,-4.09528,3.27081,-6.78357,-9.38603,-3.06915,1.95927,0.70163,2.46784,-0.635142,0.854662,-1.03664,2.44479,0.381434,0.976493,-2.1874,1.35415,-3.25712,-1.85514,0.202589,0.286026,0.720155,0.627719,-0.687001,-0.872865,1.21871,2.25385 10170.4,10147.3,9236.23,1.55419,0.655793,-3.90119,3.65032,-6.92144,-3.81534,-0.829364,1.59907,-0.150104,0.588015,0.212751,1.04803,3.09472,3.79829,-0.218751,1.11779,-1.55055,0.933332,-1.25266,-2.59487,0.647035,1.39731,2.58953,2.8589,1.80309,-1.43261,2.52993,2.79953 10171.9,10139.7,9239.22,2.16966,0.513128,-2.93705,2.73804,-10.8601,-4.50483,3.76187,1.03924,-0.676839,-1.4866,-1.19577,1.6866,5.98311,3.12642,0.0885709,0.9896,-0.594518,0.533618,0.379411,-3.82145,2.32664,2.22298,3.60721,3.05218,2.2889,-1.98702,2.79897,1.35025 10172.4,10133.5,9242.05,0.627291,0.905709,1.39363,2.99372,-15.425,-9.09382,2.11414,1.04226,2.10526,-4.39506,-2.77953,2.15891,6.66724,1.70369,-0.372333,1.40462,2.59187,2.26874,-0.378224,-3.69675,3.0335,2.25396,3.10192,0.0429504,0.10951,-0.799702,2.66794,-0.282681 10173.8,10130.2,9245.36,-1.33644,1.42161,3.11004,3.93858,-17.0646,-12.116,1.67239,1.94826,5.54306,-3.85205,-1.5475,2.52019,4.33814,1.15019,-0.541069,1.99129,3.05378,4.25369,-2.76731,-2.80645,1.85733,0.988299,2.88783,-1.97077,-2.83768,1.85125,2.84766,0.389147 10176.4,10130.9,9250,-3.53503,0.391503,-0.270572,1.95882,-15.1875,-18.5758,-1.42497,2.28845,5.40786,-2.12974,1.20821,0.911564,0.2788,0.0689856,-0.00271805,2.01928,-0.20812,3.23848,-1.98612,0.0245125,0.488358,-1.18054,1.47019,-3.47437,-4.6287,2.11498,2.20934,0.993318 10178.8,10135.9,9255.56,-3.20255,-0.268054,-3.48033,2.47099,-11.3536,-16.9308,2.01776,1.40976,1.56328,0.853625,1.89586,1.47109,-1.50849,0.167668,0.627511,1.41809,-4.21425,2.05546,-2.39209,-0.416193,0.276633,-1.50971,-0.820011,-1.25927,-1.76,0.153711,0.431209,1.48315 10181.2,10144.1,9260.31,-2.49125,-0.613263,-3.86482,0.287362,-9.17309,-14.1157,3.48478,0.196793,-1.25386,2.83848,0.198147,-0.0165582,0.471677,-0.139327,-0.216901,-0.966032,-5.2193,-1.40546,-0.977273,-1.2574,1.78779,0.134179,-1.72164,0.653388,0.313432,-3.37716,-0.587605,0.861387 10186.6,10151.1,9263.12,-0.0358474,0.714951,-5.47328,-0.875177,-17.5089,-13.8361,0.471247,0.643912,-2.41975,9.9458,0.993041,0.803296,-0.226386,0.0668295,2.19176,-1.16819,-4.40868,0.69383,-3.38706,-3.58218,3.07732,2.10253,1.79789,2.06744,1.83904,-2.15516,-1.67344,0.661882 10193.4,10152.2,9264.85,-2.78688,1.85556,-1.96216,-7.27433,-5.61022,0.625161,3.91544,2.78407,0.13042,8.01854,3.573,-2.43853,-1.07905,0.148792,-1.48277,-2.3792,0.378784,-7.05144,-1.06108,-1.76148,0.135824,1.71393,3.80312,-1.43656,0.702495,-1.95731,-0.703674,-0.33177 10196.9,10148.7,9267.46,1.41437,4.41491,0.0330121,-0.96198,-19.7539,-11.561,-5.49424,1.03618,-0.588315,13.1158,4.11913,1.82776,-4.02743,-1.24038,4.49417,2.16391,1.61464,5.33203,-6.2827,-3.22771,2.42673,4.53812,5.27571,1.95384,4.83592,2.15944,-2.23414,-0.0179182 10195.1,10146.6,9271.67,-0.599083,4.08109,5.56207,-0.651956,-1.899,4.41751,8.64946,-0.00765143,1.65381,7.40697,3.13743,0.528221,-1.17274,-0.333192,-1.34405,0.810869,3.04978,-1.96585,-3.00608,-1.02587,-0.427114,2.63482,2.33223,1.44749,2.70602,-0.508442,-0.782524,0.838544 10190.6,10149.1,9275.95,0.560997,3.32623,0.00253245,1.6273,-9.62681,-9.32197,-7.13248,-1.74244,-2.26773,10.279,2.01853,1.79006,-2.32577,-1.861,2.70102,2.63733,-0.668516,4.89049,-2.56801,1.67809,-0.682542,1.07859,-0.730879,1.04436,0.219305,1.04839,-1.30085,-0.204558 10188,10153.1,9277.72,-1.05102,1.4439,-1.2902,0.37219,3.61058,7.8905,-0.13638,-0.797121,-3.203,3.7144,-0.467361,1.43319,1.01941,-0.964803,1.27849,1.32106,-0.71757,-0.281666,1.82319,4.43107,-2.93419,-0.102775,-2.79816,1.60946,-0.350934,0.837113,0.975085,-0.206216 10189.3,10155.8,9275.17,1.71247,1.79065,-0.806826,4.2591,-1.07113,5.08033,-3.80833,-1.05846,-3.93516,4.86697,-2.48519,4.41458,1.0147,-2.04319,5.76698,3.04901,0.621182,6.18537,-0.471514,3.74338,0.0954557,1.78055,-2.23478,4.29533,3.28968,4.08665,-0.45381,-1.12752 10190.8,10155.9,9267.91,0.0885688,1.62773,3.97676,0.475719,6.50171,12.0036,4.17355,0.0800788,0.877184,4.13283,-1.66529,2.3731,1.22312,-1.52431,1.32333,1.30085,4.02821,0.00402446,-0.278254,3.83144,-0.00616006,1.70507,0.14686,2.05675,3.75234,3.42709,-1.13997,-2.28219 10186.5,10152.6,9257.34,-0.152071,1.1051,2.98089,-3.26014,-3.23874,0.545145,-3.74253,0.650653,4.32612,4.55661,-0.349067,0.443991,-1.54712,-2.37082,1.08068,1.11666,3.19332,0.114235,-4.77887,1.03262,0.526047,1.57427,1.96416,-1.21359,2.2522,2.81775,-2.19914,-3.20958 10175.9,10146,9246.33,-2.37365,-0.801223,1.8448,-4.49245,2.73452,3.45587,0.665856,0.804743,7.15539,-1.25789,-1.25952,-2.70716,-1.07845,-2.04441,-1.93328,-1.35806,1.5978,-5.1161,-5.79834,-0.925826,-2.80177,-1.15512,-1.39234,-4.88988,-2.71874,-0.727928,-1.17586,-2.55528 10163.6,10137.3,9237.87,-0.803469,-2.78044,-0.895544,-1.96323,-0.541223,-3.95959,-1.23923,0.0489646,5.82687,-0.842944,-2.20839,-1.37161,-0.868195,-0.366623,-0.326653,-0.542204,-0.442138,-3.06811,-5.05951,-1.77693,-2.56412,-2.0747,-5.18551,-5.90628,-3.59607,-1.51359,-1.0358,-0.0442413 10154.4,10129.1,9233.99,1.23915,-3.76005,-2.64612,0.723829,-3.148,-4.96491,0.57486,-0.202117,2.21428,-0.386009,-2.61213,0.591537,-0.420445,2.51457,0.848114,0.0155665,-2.8099,-0.688955,-1.65728,-1.68576,-0.314736,-2.37588,-7.30164,-5.93878,-1.09582,-1.08092,-1.23666,3.04974 10147.7,10124.3,9234.84,0.130569,-3.33534,-5.30783,0.228073,-1.79103,-2.90284,1.72325,0.336059,-1.67646,0.805152,-2.51359,-1.68843,-1.08056,2.79024,0.667811,-0.918425,-5.25023,-0.613583,-1.21144,-3.86108,1.12026,-2.87087,-6.96217,-3.74878,-0.871173,-1.99148,-1.4983,3.13726 10141.9,10125,9238.34,-2.3342,-3.74514,-6.28736,0.247636,2.71253,3.12847,7.57994,-0.0401623,-2.07147,0.481455,-3.97685,-4.46362,-0.415913,1.42821,-0.575486,-2.68041,-4.57327,-2.24353,-2.60028,-5.84863,0.625916,-3.42977,-3.6369,-0.844099,-3.5874,-4.64335,-0.985747,1.2717 10139.9,10130.2,9242.19,-1.31024,-4.72475,-7.14762,0.73153,1.45053,-5.53508,5.90136,-2.31863,0.194991,0.488804,-6.97821,-4.41928,-2.29074,-1.35009,0.919216,-2.89533,-3.25509,-0.799203,-1.99553,-4.14064,2.04707,-1.98553,-0.137078,-0.0166083,-4.9352,-5.40326,-1.67739,-1.42035 10146.2,10135.6,9246.04,1.48702,-3.36982,-6.22071,1.74719,2.56435,-13.0074,1.99705,-3.21561,2.91416,0.844878,-6.7988,-2.16439,-5.4962,-1.85975,2.13575,-1.59383,-2.91884,1.52462,-1.3314,-1.85117,3.6544,-0.430522,0.692754,-0.840642,-3.31251,-2.33908,-3.05762,-2.1983 10158.1,10136.1,9250.8,0.841737,-2.49661,-1.39476,-1.47649,15.6927,0.965199,10.869,-0.546861,4.02682,-3.15137,-2.65822,-1.05518,-4.77058,0.229656,-2.58261,-1.60934,-0.689737,-5.44364,-0.234473,-1.95479,2.60062,-0.769404,0.484685,-2.21476,-2.21659,-0.527818,-2.3356,-0.631119 10167.2,10131.4,9256.17,1.43756,-1.64599,0.0828565,1.10643,1.09851,-8.71597,-1.14743,1.16785,1.24835,1.69522,0.678389,1.91657,-5.73395,-1.26925,0.618759,0.671225,0.99422,2.5392,-3.14056,-3.00047,3.39733,-0.267724,0.865602,-1.72338,-1.28093,1.59131,-3.58079,-1.60917 10168.5,10125.9,9259.95,0.111755,-1.49369,1.18289,-0.284048,-1.52165,-7.82514,1.91577,2.83987,1.30957,4.34859,2.31828,0.547347,-5.35341,-2.95714,0.120479,-0.07344,1.25038,0.863374,-1.97606,-2.63292,2.99367,-1.51317,-0.192761,-1.94301,-2.34527,-0.816782,-4.15688,-3.69083 10164.7,10123.5,9260.03,2.54631,0.123647,1.85441,0.291179,-2.26534,-5.622,0.403256,2.75151,1.92159,5.45502,4.02912,0.277333,-3.49437,-2.59529,1.68451,1.03176,0.611114,1.05444,-1.37086,-0.762577,2.09659,-3.15435,-1.66892,-4.18628,-2.03484,-0.59484,-4.5361,-4.06338 10160.7,10123.9,9256.02,4.16394,1.15842,1.00215,-1.41089,3.00077,3.69915,2.12147,1.50602,1.11373,3.7783,5.12886,1.27055,-1.0735,0.163066,0.715848,1.75274,0.248762,-1.87449,-2.70607,-0.0821427,-0.982237,-3.91753,-0.603176,-5.15131,-1.55797,1.9122,-2.63806,-2.45448 10157.6,10124.8,9249.1,1.13904,0.752742,1.28292,-3.44794,5.87463,13.5955,-3.90547,0.053564,0.392376,-2.17549,4.02652,0.800942,2.14933,0.991305,-1.00534,1.93346,1.74799,-4.3887,-2.62983,2.12002,-3.97726,-2.37985,1.92724,-3.91126,-1.80145,3.29901,0.515867,-2.07875 10155.9,10125.9,9241.01,-1.21278,1.24353,0.0902419,-1.38693,3.90257,17.0687,-1.7671,-0.621263,-0.743581,-3.56603,3.19768,0.515647,2.83626,-0.394058,-0.965446,2.53295,1.02968,-3.73706,-0.646373,4.19926,-3.90665,0.100245,2.07717,0.65145,-0.4389,3.45695,1.30478,-2.26372 10156.9,10129,9233.19,-0.519545,3.45514,-0.128203,0.470911,-4.34917,11.6069,-5.37302,-0.249794,0.0908138,-1.64961,3.7305,0.887725,1.28233,-0.50548,0.651175,4.68216,0.481759,0.131141,2.83721,7.4517,-1.51906,2.02591,0.478488,2.8447,3.96564,4.21205,0.0189546,-1.26083 10160.2,10134.9,9226.61,0.334619,3.63902,-1.33005,0.500933,-0.0390483,15.3466,3.49804,-1.22599,-0.443012,-1.29729,1.85728,0.83413,0.663791,1.08815,-1.61332,2.35978,-1.91003,-1.54128,7.06018,8.52392,-0.0931056,-0.631766,-1.8937,1.21041,3.92464,3.0125,0.582016,-0.0552563 10165.1,10142,9222.12,-0.0501124,2.72845,-2.35233,0.461804,-3.24106,3.89637,-4.4752,-1.7395,-0.658087,1.46568,0.74815,1.9358,-1.37579,1.26993,0.248403,2.1501,-1.97865,2.84403,4.93078,6.34449,2.55208,-1.66616,-1.28941,-0.85475,2.44335,3.28626,0.575625,0.0867697 10169,10147.2,9219.92,-2.57524,1.55278,1.64717,-0.408592,2.78686,3.93608,-3.35557,-1.05071,0.358949,-1.71793,1.23509,0.730307,-0.807758,0.469476,-0.799756,2.26666,1.42763,2.57756,3.31921,4.24278,2.32673,-1.92157,-0.625841,-1.7385,0.55312,2.469,0.416022,0.102824 10167.7,10149.8,9219.39,-2.61236,0.265041,4.14099,-1.10443,5.68968,5.75872,0.437178,-1.27371,-1.44794,-5.50529,0.962099,-1.7594,-0.014506,-1.47838,-2.10998,2.88166,2.32266,2.31558,3.04189,2.76494,1.13588,-2.76241,-2.5749,-1.37983,-0.132212,1.62609,0.00182996,-0.567092 10161.2,10151.5,9219.88,-1.00231,0.225002,2.94421,2.03312,-0.355979,4.16591,-0.636307,-0.980578,-3.17075,-4.4683,-0.0413473,-0.96548,-0.194949,-0.798368,-1.08568,3.94015,1.20872,6.21739,0.493017,0.663456,-1.20346,-2.76074,-4.99576,-0.484664,1.27829,1.87168,-0.0347963,-0.649195 10155.5,10153.9,9220.83,-0.939771,0.647249,0.0634509,3.2582,-1.62031,4.0693,-0.997477,-0.169163,-4.01209,-4.20755,-1.14083,-0.040949,0.676499,1.0769,-0.637069,2.85891,0.53402,4.18699,0.666861,0.369829,-2.63692,-0.336214,-3.73798,1.47577,2.81105,-0.292838,0.0270106,-0.151526 10154.1,10157.5,9221.67,-1.65802,1.59847,-3.57612,1.52401,6.37221,4.48866,-1.46299,-0.915699,-6.98915,-0.340048,-0.952717,-2.18866,-0.811792,-0.642645,-0.622625,-0.300884,-1.00057,-1.15759,2.44751,2.6773,-1.823,1.29837,-1.91591,2.49204,1.93197,-3.59974,-1.91245,-2.4109 10154.4,10160.7,9221.98,-0.583463,-0.108757,-4.6507,-0.0693877,5.35637,4.425,-6.56889,-1.82597,-8.57191,2.85503,-1.05825,-2.33955,-3.22781,-4.76081,2.05753,-0.861931,-1.83229,-0.124382,0.503483,2.18131,1.30665,2.42826,0.824233,3.84653,2.09007,-3.3925,-4.31649,-3.96112 10153.4,10159.2,9221.68,-2.76485,-4.09131,-2.87698,-1.10712,12.5336,12.9839,-4.34652,-1.87041,-6.50663,-1.43881,-2.78497,-4.09349,-3.27711,-7.58611,-0.918956,-2.43732,-1.68029,-2.93885,1.37614,1.00354,-0.202025,0.252735,-1.35224,2.14941,-1.22668,-3.85694,-3.91196,-5.39514 10153.1,10150.6,9221.82,-3.95579,-6.11602,-1.95691,-0.571033,7.36799,2.23424,-8.23593,-1.15065,-2.89936,-3.34966,-3.42278,-4.92737,-4.22729,-7.57776,-1.53936,-2.4826,-0.485854,-2.05301,1.35048,0.235875,-0.851581,0.299046,-3.65228,0.452501,-2.53126,-4.14097,-3.0318,-6.032 10156.5,10138.1,9224.22,-1.72219,-4.81284,-2.04034,3.64429,-3.40667,-8.21149,-2.06758,-0.247629,0.240041,0.844032,-2.55693,-2.29071,-5.62686,-4.10255,0.955484,-2.58578,-0.573095,1.96046,-2.89531,-2.47853,1.00662,1.59082,-2.31097,1.60096,-0.355857,-3.59741,-2.54995,-3.16362 10162.5,10126.5,9229.66,-1.48624,-2.31864,-1.19917,5.07688,-2.15075,-4.48733,6.81643,1.19375,3.4529,3.66948,-1.49639,-1.71619,-5.51437,-1.29231,-0.407537,-4.604,-2.54282,0.0824236,-5.27449,-4.81883,0.767691,-1.39492,-2.55861,-0.325428,-1.75464,-3.59903,-1.89829,-0.732932 10167.7,10118.7,9237.56,-1.06333,-0.880843,-0.709075,2.8371,-10.0447,-10.4348,-2.5904,3.18465,5.97115,6.33779,-0.55058,-1.01646,-4.14332,-1.6247,-0.0193591,-4.01402,-3.73144,0.38443,-5.50468,-6.41294,-0.295721,-3.62009,-2.70822,-3.1355,-4.45086,-2.10376,-1.79258,-1.22716 10172.5,10116.9,9247.18,1.551,0.130326,-0.490568,5.87654,-14.5436,-8.35183,-0.790109,3.39107,4.7174,8.28156,-0.0057788,2.6686,-1.84943,-1.48071,1.03911,-4.0934,-3.48936,2.7605,-6.22541,-8.72046,-2.487,-3.9855,-3.15508,-4.85806,-6.30628,-0.1826,-2.22861,-1.91313 10179.7,10122.6,9257.78,1.5355,1.00586,-2.46594,5.55739,-10.6179,-9.89219,1.01847,2.02002,1.55047,10.3651,1.59035,2.3257,-3.02423,-0.681756,0.379055,-4.13859,-2.86252,2.65539,-7.09955,-8.4785,-1.80811,-2.44766,-3.84586,-6.08215,-4.18234,0.309597,-3.66089,-1.78168 10188.9,10134.4,9267.84,0.423127,-1.44673,-6.16369,2.54558,-3.2605,-10.2788,1.93481,-0.460125,-1.55478,7.53447,1.04311,-2.037,-5.33297,-0.715827,-0.912315,-4.00679,-5.27357,1.32517,-7.02947,-5.6844,2.49,-1.1701,-4.14164,-4.46692,0.160721,-1.23591,-5.46575,-0.678645 10196.3,10145.5,9275.21,0.204833,-4.851,-9.24744,3.38063,-3.90706,-1.89916,-0.318999,-3.05687,-4.83175,3.88926,-1.68472,-4.52857,-6.76493,0.053409,0.356074,-2.44354,-9.25902,3.95243,-8.99635,-3.68403,4.07743,-1.41439,-4.06526,0.784286,2.50666,-1.59161,-6.31937,0.0761621 10200.4,10148.5,9278.92,-3.06966,-5.752,-6.27773,-0.452092,4.18213,13.2473,-12.0757,-4.47092,-6.49884,-5.96616,-4.08975,-9.08064,-3.65565,-1.03612,-1.9757,-2.79369,-8.22081,-3.13926,-2.68074,1.98539,-1.47914,-4.27865,-6.82097,-0.0420558,-2.72616,-3.80964,-3.69263,-2.81706 10202.3,10144.3,9279.66,1.7621,-1.2767,-1.87182,1.61337,-6.80859,14.4514,-16.815,-2.07514,-4.63562,0.0307544,-1.49074,-2.29138,-1.18636,-1.08621,1.86862,0.689509,-4.2555,-0.913166,-4.04706,-1.13903,-2.95495,-1.4359,-3.45987,4.36607,0.619825,-1.53464,-2.06409,-2.58631 10201.6,10141.5,9277.89,2.73427,2.11183,3.79277,1.71546,-5.8859,13.3557,-11.3022,2.79327,2.37116,13.2011,3.98285,0.966107,0.039656,-0.715821,2.85166,2.34242,2.77476,-0.0888099,-4.98538,-3.4432,-1.83877,3.57211,2.68075,7.05565,6.45616,-1.54302,-1.24469,-1.49869 10196,10143.8,9273.55,-2.52737,0.202188,7.08167,-4.89952,6.71679,10.6699,0.756855,5.54471,7.25909,13.9583,6.39787,-2.37566,0.745793,-1.45474,-1.09404,0.910205,7.21143,-6.92492,-3.24203,-2.89701,-0.543452,6.07649,7.33376,6.57894,6.15484,-4.40884,0.0587056,-1.11052 10186.2,10147.8,9267.63,-4.31786,0.145523,8.74123,-1.12372,3.61382,5.90919,-2.20636,4.87121,7.93339,10.8223,5.77747,-1.02016,1.70524,-1.23974,-1.99873,1.22043,7.18349,-2.02393,-4.52471,-1.19367,-1.87015,5.60664,6.92162,5.30532,3.03549,-3.16865,1.33872,-1.3693 10178.3,10151.3,9262.07,-1.01371,-0.36759,7.07326,3.03463,-3.67644,6.41668,1.01659,3.32806,5.69645,6.11989,4.17302,3.13986,4.40199,0.31144,-2.58094,-0.0539033,4.16067,1.49299,-3.2753,-1.39228,-2.172,3.33149,4.19598,3.46064,0.616277,-0.818505,3.98959,0.698301 10177.2,10154.3,9257.94,2.09186,0.0766925,2.17884,5.08344,-13.9717,-0.882929,-3.84368,2.86526,4.57806,7.77504,4.75117,6.29349,4.58116,4.04706,1.06485,0.914494,1.84175,7.12093,-3.92066,-3.04038,-1.76589,1.29071,2.74094,1.46176,1.98937,3.12251,5.09485,3.84087 10179.4,10155.4,9254.74,0.187596,-0.882072,-0.665652,4.15319,-3.56212,6.25634,3.46947,2.99756,3.30879,0.859046,5.1349,3.91232,5.90056,6.60019,0.839946,-0.162343,-0.484405,2.65509,-1.8674,-3.50916,-5.10299,-1.60522,1.28388,-0.0295086,1.05,2.81748,5.21994,5.53563 10178.8,10153.1,9251.26,-1.91139,-0.154839,-0.832651,7.32065,-8.14661,3.20829,-4.61065,3.9011,1.20806,1.29028,6.11631,4.24084,4.66918,7.38927,3.1094,1.72009,-0.436683,6.06925,-3.83738,-3.64103,-8.35166,-0.222316,1.74303,3.43329,2.82215,3.91599,3.2218,6.05878 10175,10149.2,9246.46,-3.00223,-0.829219,2.18951,8.12634,-8.29635,3.98254,-2.55022,3.58933,0.0476173,2.00734,2.85452,5.13863,4.39434,5.86178,1.57419,0.321093,2.11151,4.62819,-0.677836,-1.98205,-7.44972,1.36379,2.52895,5.12261,2.10196,3.15929,2.77152,6.16477 10170.8,10147.7,9240.32,-2.09934,-1.33891,3.77143,6.49402,-6.43302,-0.0826344,0.87837,1.12061,0.421557,1.06025,-1.52903,5.64507,3.68263,3.49536,1.25096,-1.4957,2.92854,4.60413,2.40658,-0.645265,-3.32217,0.987715,2.60908,1.94117,-0.424246,2.85508,2.71473,4.88469 10167.3,10148.7,9234.04,-1.71112,-2.89318,3.67043,1.66277,3.35424,4.57631,10.1924,-0.35173,1.35064,-5.80931,-1.82085,3.64176,4.57117,2.2882,0.924739,-2.41648,2.22467,2.19365,5.80375,-0.426137,-2.32705,-0.919332,2.09081,-2.34116,-2.25007,1.71251,3.40172,3.5108 10165.7,10149.1,9229.23,-1.45001,-3.05548,2.45599,-0.349391,3.71978,4.53119,5.144,-0.0754888,2.20722,-6.90377,0.948441,2.13514,3.08117,1.83942,2.86791,-0.010419,2.66035,5.23219,5.6626,-0.804354,-2.37724,-1.67323,0.673861,-3.53649,-1.59081,1.76997,2.75549,2.29186 10167.4,10147.1,9226.8,-1.49928,-2.70714,1.88393,-0.842721,-0.225431,3.25531,1.41947,0.140255,3.21042,-3.88608,1.41104,1.86088,-0.091131,0.642157,1.94581,0.307133,3.18746,6.22574,4.30938,-1.01513,-1.1936,-1.8575,-0.588364,-1.42784,-2.08205,1.85519,1.46316,1.06047 10171.1,10143.9,9226.48,-2.01672,-2.40053,3.06391,-0.0599903,-8.34303,2.94718,-5.04409,-0.199276,4.0892,-3.68083,-0.226057,2.75547,-0.686676,-0.843757,0.670264,-0.458086,3.08212,7.11729,2.84836,0.933537,-1.50789,-1.59001,0.179663,0.0589795,-2.55704,3.42709,0.775783,0.360096 10175,10140.6,9227.89,-1.34782,-2.60865,2.14445,1.39294,-10.3608,4.5868,-8.2559,-1.78039,0.356678,-10.0047,-3.28868,2.87133,1.85333,-3.67234,1.53223,-1.27653,0.113475,6.97877,4.49731,3.38158,-3.24882,-2.09817,-0.213742,-0.816136,-3.92766,4.36792,1.46638,-0.25462 10179,10139.5,9231.01,-0.683001,-1.14693,0.835389,1.45465,-4.93888,6.92044,-3.2459,-1.76518,-2.11784,-11.5638,-3.99539,3.25477,2.97649,-3.54233,2.62301,-0.286071,-1.99677,5.44349,5.35012,2.55683,-3.04093,-1.82791,-1.42661,0.583625,-2.6178,3.43693,2.29735,-0.308687 10185.5,10142.2,9235.77,-0.0852919,0.0218383,0.522022,1.091,-4.00515,-0.71681,-2.72016,-1.24891,-1.4593,-5.53454,-2.81228,2.98724,1.40275,-1.35994,4.37674,1.00841,-2.02092,6.34309,4.01241,0.223476,0.719167,-0.617158,-1.79277,2.19906,-0.00915837,1.60933,1.1106,-0.276707 10194.7,10147.7,9242.28,-0.507821,-1.45713,1.82236,1.06383,0.990703,1.16431,3.40878,-1.35424,0.436421,-3.7364,-2.82733,0.844561,2.18188,1.42103,2.14788,-1.48658,-0.956157,3.31294,2.03859,-1.09837,2.11718,-0.147919,0.113767,0.665977,1.0134,-0.758268,0.662046,1.48327 10202.3,10153,9250.68,-0.953894,-1.28733,1.09826,0.183582,-2.63676,-4.1377,-2.89907,-0.851983,3.07691,-0.452803,-2.18838,0.00930997,2.87142,4.0314,0.911046,-1.55443,1.18147,4.24956,-2.48362,-1.23019,1.72571,2.11001,5.29268,-0.281886,3.31927,-0.100871,1.85826,4.09941 10205.4,10156.4,9259.89,-1.27754,0.134823,0.181405,0.430733,3.94306,1.54036,2.99815,-1.16285,4.70226,-4.24342,-1.81256,1.00154,4.93307,6.24027,-1.59843,-1.48742,2.34844,2.10305,-2.00905,-0.662325,0.626241,1.17997,6.74123,-1.67701,1.35772,0.491316,4.32271,6.53414 10204.9,10157.9,9267.94,0.0906612,2.16352,-0.379486,5.42194,2.73054,2.84047,-1.4914,-1.83181,4.02307,-5.15449,-0.262248,3.79351,5.21678,7.80905,0.384689,1.27337,2.9796,6.90988,1.28339,2.20996,-0.91791,-0.163496,3.78903,-1.75168,-0.655347,2.9127,4.88667,7.66747 10203.5,10159,9273.39,2.81598,1.22437,-0.368556,7.79675,3.42922,7.94279,4.57077,-0.708312,0.0968463,-6.10539,0.906129,5.55489,5.11842,8.21484,-0.0671665,1.22889,2.37144,6.24544,4.97372,3.9233,-2.49967,0.267274,-0.310124,1.09266,-0.410233,4.04567,4.74621,8.0612 10203.2,10162.2,9275.77,5.91857,0.355765,0.897437,11.4606,-3.5509,6.21936,2.57301,-0.0103725,-3.12789,-4.93913,0.601331,6.94209,5.77388,6.93334,1.15761,0.716978,2.28439,10.4648,4.58557,4.39511,-2.76356,2.73426,-1.51427,4.03252,2.99548,5.47757,3.66414,6.66569 10203.5,10167.2,9275.21,3.60261,-0.370029,0.212296,6.53742,-1.17501,1.39057,4.60494,-1.59955,-3.36286,-6.83681,-0.619753,2.05525,7.21718,4.0699,-0.311278,-1.80144,1.07578,6.02142,4.81799,3.05296,-1.94492,1.84126,-1.66326,1.40391,1.77364,2.95825,3.1993,3.61198 10203.2,10169.7,9272.52,1.94895,1.27875,-0.411546,7.45768,-3.75161,0.551798,7.13428,-3.82068,-2.61405,-4.51085,-0.839975,-0.654388,7.59238,3.63367,1.11679,-0.895324,0.0589114,6.72608,0.605615,-0.28023,-1.84675,-0.134175,-0.468956,-1.06577,2.10307,1.19208,2.14254,2.35948 10201,10166,9269.14,-0.454618,0.774031,2.06017,2.8462,-0.622985,0.18548,5.53147,-2.50822,-2.46147,-4.96779,0.0109421,-5.95039,4.88549,1.45711,-1.36876,0.21175,1.58667,0.959389,-1.72767,-0.999701,-1.91612,-0.271218,-0.271307,-3.60937,2.2528,-2.81471,1.29832,0.342989 10196.9,10158.5,9266.51,1.16537,-1.9421,4.60098,6.66208,-8.91079,-4.05041,0.977918,-0.375912,-2.52562,-2.44083,-1.83608,-5.04574,0.870179,-2.88837,0.903319,2.45464,2.77487,7.13809,-7.32993,-2.29902,0.410437,1.61472,1.76486,-2.68616,2.88565,-3.79142,-0.830458,-1.20118 10194.1,10152.5,9265.18,-4.11534,-5.864,4.81522,5.05616,0.145339,-4.93641,2.59855,0.656712,1.10696,-4.83177,-6.68192,-7.2593,-1.01756,-6.50992,-0.623669,0.165413,3.83811,5.84041,-5.84841,-0.103661,1.98729,0.416145,1.34348,-6.16515,-2.67871,-5.57128,-1.65554,-3.26762 10194.1,10148.4,9264.07,-6.59722,-4.92656,-2.01588,3.7417,0.726794,-18.2936,5.15057,-0.276157,1.50739,-0.538248,-8.52874,-4.00362,-4.55022,-5.27015,0.604573,-0.930054,-0.109161,8.19838,-8.17669,-2.1092,4.17484,-1.56197,-1.02102,-5.8341,-5.50376,-1.7134,-2.50895,-3.06608 10193.9,10142,9261.25,-7.62788,-2.98611,1.9356,-1.40885,17.3716,4.06957,22.1809,1.39972,5.64224,-7.94302,-5.59134,-1.45901,0.439725,1.11211,-6.73411,-3.11746,1.4598,-4.78344,-2.09513,-0.404037,0.473396,-4.22587,-2.43839,-5.70551,-5.26427,-0.515338,1.20082,0.113119 10190.4,10132.9,9256.55,-0.061965,0.47587,-3.01478,1.28661,-2.15014,-14.2047,7.89898,0.463674,0.911903,2.0883,-1.64338,3.11185,-2.21723,0.781415,-1.37312,0.396228,-1.38267,3.09944,-1.8496,-1.29836,2.6087,-3.15966,-2.03297,-3.33185,-3.23065,2.92606,0.328003,-0.0324179 10185,10126,9252.36,-0.460313,1.71643,-3.7396,-2.47922,-1.49725,-15.3645,-1.80975,0.715758,-0.981069,-0.691494,-0.794101,-0.106849,-2.08179,-0.30971,-1.53311,0.428815,-0.320026,-0.221114,2.28648,0.175576,3.04606,-1.33911,-0.290353,-5.37868,-3.63253,0.919151,0.306196,-0.421839 10178.6,10124.8,9251.04,-1.00256,1.33259,-4.2472,-1.03971,2.95821,-4.55752,1.84476,0.117356,-4.36831,-4.27268,-1.02576,-0.886254,0.661063,-0.0446314,-0.718596,-0.508343,-2.00182,-0.337999,2.57329,-0.613947,2.18595,0.685998,2.2221,-1.4549,-2.89677,-0.0111036,1.2411,0.83044 10170.8,10127.6,9252.97,-1.71108,0.0714348,-2.91875,-0.0818013,10.0027,5.28964,4.84662,0.115636,-5.97389,-2.97492,0.466922,-1.16018,3.14319,-0.484977,-0.73996,-1.40938,-2.86898,-1.18229,2.85098,1.59393,-0.709864,0.769892,0.0526875,0.667581,-4.09633,-0.130706,2.87503,0.28772 10163.4,10130.8,9256.69,-0.0482655,-0.561906,-4.41924,-1.93638,1.00001,-3.80859,-6.74655,-0.693966,-6.90741,3.83606,-0.443929,0.133173,1.32042,-4.12952,2.21239,-0.401666,-2.83084,1.48444,3.60821,4.7162,0.0479322,1.57325,-2.9423,0.781086,-3.57562,1.01359,1.5974,-1.03302 10159.1,10132.9,9259.9,0.830676,1.38376,-3.59798,1.88876,1.90766,6.33722,1.16568,-1.88109,-5.49532,7.56995,-3.97276,2.47056,-1.10217,-4.02745,0.530141,-1.80729,-2.44923,1.11112,6.04583,5.79514,-1.61378,0.146823,-4.31812,1.65679,-0.82556,0.385538,-1.6035,-0.921055 10159.8,10135.2,9260.63,-0.16576,1.00018,-5.12473,0.442361,0.505831,-5.64864,-2.63413,-2.52592,-5.46478,4.95174,-4.3147,0.782684,-5.73615,-4.82371,0.266276,-1.86669,-4.0481,-1.31822,9.03428,5.18538,0.835431,-1.04748,-4.21294,1.0615,-0.105573,-1.22812,-5.24566,-3.63422 10165.2,10138.1,9258.46,0.205477,-0.680098,-4.46762,5.26891,1.18115,-1.68502,7.13137,-1.22722,-4.01706,-1.7858,-0.511666,3.55446,-3.85553,-2.43205,1.3525,-0.694302,-4.16672,-0.729833,7.26617,2.38627,0.742375,-2.04911,-3.24066,2.72775,2.10783,0.115275,-4.78462,-4.34396 10171.6,10139.6,9254.61,-1.51268,-2.23477,-5.13237,-3.29461,-0.317239,-10.5071,-7.94002,1.87205,-2.15615,-2.57627,4.52526,1.46446,-2.39092,-3.68309,1.44927,1.27351,-2.10555,-3.67494,7.0263,3.64847,0.370668,0.612656,-2.452,4.76347,5.31087,1.21101,-2.18927,-4.86589 10174.6,10139.6,9250.85,-0.380976,0.430706,-4.77251,1.24603,3.57465,-3.14504,-10.8805,1.4131,-3.82203,6.1265,4.05681,1.86576,-2.69539,-3.84931,0.571097,0.0445532,-3.61574,1.0929,5.45496,4.67637,-2.69117,0.376736,-3.44843,8.26613,5.44059,2.39248,-1.35143,-3.43895 10173.2,10141.8,9247.9,-0.967231,0.660605,-0.333774,0.682442,10.1733,9.80472,-4.02844,0.296976,-2.0856,1.70749,0.105393,-0.302007,-2.02762,-1.68176,-2.57321,-1.85542,-2.20576,-3.56605,7.81712,4.57148,-0.717533,0.00661063,0.070936,7.88567,3.00205,-0.188925,-1.30646,-0.417109 10169.8,10147.8,9245.05,1.57911,1.89614,-1.23894,5.44327,1.1255,2.7455,0.888702,-2.69789,-2.29535,1.37374,-2.16695,0.277041,-2.61632,-0.168021,1.19527,-0.966804,-1.39634,2.02717,6.13068,1.74285,2.61838,-0.673957,2.42798,5.71141,1.0237,-0.190537,-2.48355,-0.424022 10166.9,10152.4,9241.4,1.48812,1.56883,0.00439658,-1.99079,-5.3945,-7.45076,-2.79497,-1.09824,0.438405,1.08335,0.567998,-2.12211,0.537132,0.235065,2.13962,0.850241,2.33283,0.11668,5.71046,0.316621,2.37782,1.5783,4.38674,4.44102,2.85837,-0.867284,0.197126,-0.632035 10166,10149.9,9237.21,3.10346,3.20745,-0.0787972,3.26164,-1.99167,1.15174,7.73898,0.388067,-1.3872,7.93093,2.89628,-0.846609,2.95243,1.10786,0.0356645,-0.191303,-1.48335,3.06518,0.833731,-2.48298,-2.62814,-0.329278,-0.0454046,4.84244,1.50962,-0.571214,2.28968,0.0896905 10169.4,10141.9,9233.72,1.54047,2.79665,0.872984,0.435893,0.341067,4.50191,6.31086,2.24353,0.0763229,5.33021,2.30696,-1.94916,2.28551,1.6759,-3.55737,-0.57595,-3.31446,-1.28349,0.109544,-0.911539,-3.08755,0.149125,-2.57658,2.65457,-0.759677,-1.72314,1.73795,1.22082 10175.5,10134.5,9231.85,3.08721,1.31195,-0.463831,-2.78365,-16.0641,-12.4959,-7.90321,1.44639,2.2521,2.09953,-0.628689,0.674957,-0.991746,0.999703,0.501374,1.08647,-1.9555,-0.457535,-1.969,0.140249,0.679574,4.05153,-1.26929,2.9472,1.23177,0.0460567,-1.18548,1.19414 10178.5,10132.3,9231.94,4.8578,-0.156201,-1.83619,3.45539,-10.5983,-4.40534,-3.25278,-1.48511,1.7839,1.07398,-3.79721,3.44697,-0.661031,-0.19397,1.51898,-2.78611,-1.58924,-1.02247,-4.03291,-0.779814,-2.72459,1.42865,-4.44874,1.96164,0.024013,0.769821,-1.68183,-1.09525 10176,10135.5,9234.24,3.98434,-2.9881,-1.82932,-3.45496,-4.37718,-1.32479,-6.81161,0.242295,3.63988,0.773917,-2.92089,1.50769,1.03257,-1.29175,0.607123,-3.32519,0.794345,-7.2134,-4.18473,-2.11878,-3.48641,2.04926,-1.83971,2.5711,1.8547,-0.444122,0.204744,-0.633906 10170.3,10141.1,9238.24,4.5574,-1.21766,-1.92884,-3.3891,-4.53289,-3.61119,-11.1428,0.87067,2.52674,6.28098,-0.916225,0.833349,-0.285056,-2.02874,2.83162,-0.822357,0.836116,-2.02452,-4.36166,-2.46534,-2.40599,3.53798,0.439996,2.8824,2.66576,-0.190266,-0.411649,-0.335746 10164.8,10146.9,9241.73,1.14271,0.21175,2.54403,-5.97996,8.86795,9.92082,0.583279,0.92891,3.1377,1.52082,0.653327,-2.04189,-0.909795,-1.88382,-1.45444,-1.72465,2.94817,-6.9659,0.661566,-0.779148,-2.33549,3.61435,1.90115,-0.709103,0.572663,-2.44443,-1.61985,-1.24632 10161.8,10151.9,9242.42,0.429305,-0.24402,1.54324,-0.758714,1.99988,2.30697,-0.150645,-1.67843,-0.372931,2.68223,0.974669,-2.18675,-3.69726,-3.84373,0.315076,-1.61503,2.02219,-0.439987,1.5067,0.347441,-0.468043,1.85512,2.51346,-3.61534,-1.61311,-1.68631,-4.32277,-3.31289 10160.6,10154.5,9240.5,-1.6783,-2.7916,3.79283,-1.46484,1.8842,7.0456,3.61276,-2.08564,-1.14902,-3.90469,1.00738,-2.71903,-1.12392,-2.56102,-0.564502,-1.26929,2.87817,-3.80446,2.16188,1.69189,-0.17359,-0.806729,4.45158,-4.99401,-1.9224,-2.1335,-3.41399,-1.5215 10158.8,10152.9,9238.94,-1.26294,-1.55708,2.47997,-0.37092,-5.35681,-1.99801,-4.61673,-3.19995,-3.63982,-3.59422,0.268397,-1.15304,1.21312,-1.94008,2.37467,0.463918,1.03699,-0.249188,1.94821,3.1095,0.656428,-1.26258,5.17342,-2.5293,-0.911564,-0.727538,-1.60047,-0.657086 10157.1,10148.4,9241.47,-0.729297,1.90628,1.50273,8.02209,4.5029,7.25435,-0.943104,-3.87229,-5.15977,-0.605295,-0.786266,-0.00624273,3.2036,-0.99694,1.83674,-0.424322,-0.759934,4.69506,3.12589,4.93905,-1.14094,-2.37706,0.896838,-1.15642,-2.07425,-0.341439,0.651623,-1.90525 10159.3,10145.1,9249.53,-3.61489,-0.368775,4.8318,0.654323,13.8953,20.2332,9.01061,0.740005,1.06482,-1.98312,1.43178,-2.39481,5.44965,2.23927,-2.07082,1.84445,3.36316,-2.3874,5.82791,5.13504,0.331121,1.17574,4.11636,2.46863,2.53744,-2.31289,3.73605,1.261 10166.4,10146.2,9260.39,-0.690065,-0.196533,2.57149,3.28245,1.26863,3.07282,2.3288,0.343504,0.7493,7.7189,2.47287,-2.19401,1.83016,1.49389,2.04941,5.57015,1.68587,7.37325,4.33035,3.86901,3.21355,1.31074,4.30838,4.34097,4.14204,-0.792683,1.91579,1.4487 10174.6,10153.3,9268.63,0.973864,0.288282,4.67663,-0.604468,1.35396,1.77193,6.1612,0.928573,3.56181,0.301872,1.61496,-1.94891,1.37811,1.784,-0.829802,4.5252,2.98522,2.05165,3.03006,0.33278,4.9167,0.692046,4.78248,3.89965,4.1223,-1.28055,0.902128,2.44014 10179.4,10165.9,9270.91,0.383028,0.372248,2.91142,5.26445,-4.52355,-0.481389,-1.47582,-0.0802922,4.09074,-3.4789,-1.84054,-0.641665,1.60157,2.15213,-0.406849,1.24052,1.05589,7.69175,-4.79723,-3.42058,1.48542,-2.69221,-0.604027,-2.8823,-1.41943,-0.386671,1.59434,1.71786 10180.9,10180.3,9268.76,-7.39108,-4.07938,1.96913,5.84801,-1.99672,13.1344,-8.45676,2.45664,8.74322,0.00440195,-3.70354,-4.02376,5.09873,7.07674,-2.94009,-6.27334,-2.18896,9.06615,-15.5002,-6.518,-12.659,-9.2251,-8.78964,-16.0646,-15.2285,-1.36974,7.28841,2.96689 nipype-0.9.2/nipype/testing/data/fmri_timeseries_nolabels.csv000066400000000000000000002022421227300005300244740ustar00rootroot0000000000000010125.9,10112.8,9219.5,-7.39443,-8.74936,7.28395,13.7953,32.2328,32.4809,18.958,-12.2383,-6.86466,-23.0912,-16.425,-5.70842,11.2467,-1.58574,-4.53717,-17.3842,0.912601,13.0428,2.44622,2.08875,-8.74373,-9.47217,-6.87574,-8.11158,-14.54,0.414787,6.04424,0.540389 10136.8,10115.1,9222.54,-0.120582,-1.94906,6.92247,4.75197,11.0735,0.972766,10.2285,0.717545,-1.04488,-7.64424,-2.10875,-2.44368,1.52535,-1.14131,-1.72589,-1.1247,-0.993354,2.98318,1.29855,2.0688,1.00297,0.135373,-3.25325,-3.12065,0.913296,-1.7868,1.58829,-0.735248 10148,10122.2,9228.62,4.24336,-0.689111,5.12782,0.132862,-6.64526,-14.7952,5.19361,3.68198,2.77598,-0.691866,1.07559,1.71444,-1.30287,-2.75746,1.74208,4.75944,1.80799,-0.064464,2.37174,1.09905,3.5756,2.98064,-0.238711,0.822007,5.07188,-0.864496,-0.208741,-1.31367 10156.6,10132.2,9236.11,-0.047434,-1.79438,-0.767925,-3.78683,-2.46365,-12.9433,2.00586,-0.48292,1.16216,0.113706,-0.639879,-0.0445654,-2.82995,-2.22008,1.46544,3.70217,2.84476,-3.32792,6.701,0.982599,0.145487,0.0501163,-1.16747,-0.630382,-0.0550437,-0.0563951,0.0449386,-0.715988 10162.9,10141.8,9243.46,-0.3687,0.640608,-2.93969,-0.37466,-5.42813,-8.55527,-4.70566,-3.62351,-3.94857,0.847112,0.357187,1.39279,-3.07124,0.779726,5.12671,3.62277,2.86265,3.44378,5.49842,0.895482,-2.1777,0.14728,-0.491475,-0.0257423,-0.32504,2.28464,-0.610659,2.01955 10168.7,10149.5,9249.62,-0.272231,3.00751,-2.20783,-5.50238,-1.65733,-2.39574,-6.82249,-1.5591,-5.38806,-0.315138,2.41171,-0.227563,-0.306796,1.26618,4.45885,3.55662,3.14737,-0.0497907,2.76691,1.04757,-2.50276,3.25334,1.90194,3.54754,3.2308,0.393197,0.115407,1.88919 10175.3,10155.8,9253.09,0.271133,3.11725,-1.24188,-5.32432,6.94595,5.40219,2.63329,1.77742,-0.434798,3.20784,3.1926,-2.12653,1.4207,-0.162939,1.57116,1.20026,2.14004,-4.36978,-0.074248,0.344989,-2.79157,3.57441,2.795,6.81971,4.61981,-3.15395,-0.556388,-0.951462 10181,10160.9,9253.62,-1.52186,-1.02665,-1.31765,-8.89055,1.45638,-6.40533,-8.20284,3.42071,6.34151,7.32703,2.81444,-5.56924,-2.07761,-2.82472,1.75969,1.56549,2.59032,-4.99642,-0.861721,0.661704,1.27294,4.24609,5.72265,7.93181,6.46356,-4.54558,-2.93302,-2.55741 10182,10163.1,9253.53,-4.12759,-5.01517,-1.383,-11.7032,7.03273,-0.354258,-4.14846,2.56836,5.49077,2.70724,-0.00938943,-7.91268,-3.33257,-3.77932,-2.70035,-1.95288,1.51899,-10.5021,0.604386,1.13765,2.8031,0.719838,5.10986,5.4321,3.01561,-5.05514,-2.51591,-2.29453 10178.9,10161.7,9255.33,-2.09727,-3.23639,-0.971464,-6.47564,-1.86208,1.47429,-8.69004,2.23012,2.64935,4.20852,-0.00802028,-4.11236,-1.54808,-1.73414,-2.21966,-2.31888,0.521142,-4.49634,-1.66003,1.37105,1.47741,-1.17943,3.52554,2.31201,0.381259,-1.24137,-0.930002,-0.860505 10176.3,10158.2,9258.8,-2.87976,-1.16821,-1.15587,-7.36873,-2.70663,3.69409,-6.23946,3.17083,3.67683,5.95472,2.6739,-2.5798,1.61294,2.31642,-4.31408,-1.6647,-0.422612,-6.13843,-0.39141,1.92345,-2.82275,-0.742784,1.68164,-0.706688,-1.87652,0.172975,1.51911,1.04727 10176.2,10155.4,9261.93,-1.79655,0.511159,-2.91648,-1.19976,-6.01265,2.43062,-4.91165,1.64787,2.485,6.04132,2.79139,1.36683,2.36631,4.70105,-3.09068,-0.875835,-2.73203,-1.04036,0.0279962,0.57264,-4.70596,0.399049,0.109101,0.540718,-2.52779,1.90878,1.47212,2.48712 10177,10154.3,9263.36,-2.06935,1.47151,-1.59814,1.1621,-8.21806,2.74994,-4.8666,1.6535,2.86737,3.56179,1.87379,3.98852,2.20191,7.00018,-2.12026,-0.322149,-0.459427,1.99009,-0.386875,-1.65524,-2.88602,2.5405,3.09752,5.52644,1.72241,3.28467,2.06659,4.48929 10176.7,10153.6,9262.97,-2.47996,0.0736981,-1.18826,-1.40068,-2.38119,-1.33094,-3.87199,0.498621,1.31667,-0.952908,0.481976,0.0885501,1.11339,4.67043,-2.37383,-2.32579,0.991108,-0.25346,2.41941,-1.44295,0.0394728,1.67752,2.73018,4.10445,2.29859,0.993454,2.7469,3.39394 10174.9,10153,9261.77,-0.957748,-0.455644,0.885525,1.7746,0.0437147,0.878291,0.0855234,-0.572903,1.39546,0.00119098,1.69176,-1.96049,0.156938,2.84845,-1.18488,-2.65197,1.35428,1.98606,1.65427,-0.643756,-1.03602,-0.0406435,-0.236011,-0.961959,1.28125,-0.464305,1.75539,1.84618 10173.4,10153.5,9261.3,-0.583682,-0.792331,1.36077,0.644185,-3.55594,-0.618864,-4.88099,-0.136266,1.51362,2.73872,3.65897,-2.63062,0.416981,0.735765,0.533665,-0.326252,1.0146,2.83848,2.16063,2.30307,-2.01136,0.638055,-0.22921,-3.19692,0.947596,-0.379132,0.678065,0.747812 10174.5,10155.7,9262.24,-0.685336,0.856591,-2.63545,-0.959601,3.25442,0.791955,-2.20612,0.263046,-1.34292,4.47114,2.99912,-2.56858,-0.21931,-1.56389,-0.808263,0.311028,-2.34261,-0.965718,1.98615,3.50723,-1.41951,-0.258476,-1.16227,-1.73014,0.372641,-0.118946,-0.422557,-1.3986 10179.6,10157.8,9264.01,2.59538,3.68921,-1.9033,3.99249,0.109215,-1.86778,-4.51336,0.591929,-1.29086,1.52475,1.01934,0.773735,0.0652847,-3.00075,1.79923,2.1369,-2.11635,3.17035,-1.87907,2.19309,0.880052,-0.480886,-1.94369,-0.204693,1.63785,1.43004,-2.081,-3.24652 10186.9,10157.6,9265.4,2.10402,4.02633,0.884264,0.1708,-3.27208,-4.9215,-1.0364,1.60796,1.70888,-1.43476,1.10519,1.26841,0.0627916,-2.97727,1.13683,2.82663,-0.301705,-0.592683,-3.81587,-0.70989,1.60855,0.103857,-2.48043,-1.22737,-0.312858,1.31617,-1.91269,-3.98886 10192.2,10155.4,9265.29,1.6824,4.26755,1.57687,1.43194,-5.98808,-2.25097,0.153789,0.168572,0.879003,1.68604,0.75956,3.65922,-0.869793,-2.49312,0.497574,2.41553,-1.34226,-0.127659,-3.59295,-1.56547,0.88849,-0.785242,-4.24845,-5.15572,-4.81836,2.77035,-1.44493,-3.44434 10193.6,10153.7,9263.38,1.6491,4.80854,1.08823,5.10222,-5.26833,5.52263,-0.997094,-0.959485,-1.52356,6.15147,0.897033,7.60472,-1.50848,-0.576994,0.845199,3.25263,-2.21353,2.36454,-2.11918,-0.480371,1.405,-1.24949,-1.88424,-5.50221,-4.39822,4.6832,-0.575266,-0.350337 10193.7,10153.5,9260.14,0.371243,3.4575,-0.922956,2.86612,3.70316,4.4652,-2.35097,-2.08567,-4.55866,2.05406,0.20181,5.48777,-0.851734,-0.932792,0.852325,2.66059,-2.76402,-0.836483,3.32512,2.58318,3.54953,-1.82575,1.03107,-3.58566,-4.1055,2.71087,0.64122,1.16036 10193.4,10154.1,9256.45,0.655998,2.95689,-0.961572,2.95967,6.90968,-0.0847335,-1.13659,-2.64581,-3.78971,-2.43015,-0.722449,3.08777,-0.234356,-0.603156,1.30068,1.14368,-2.23215,0.241084,3.91588,3.38796,4.07024,-1.08082,1.15617,-0.375163,-2.54369,1.29418,0.795869,1.31402 10190.3,10152.8,9253.2,2.59279,1.93007,1.93861,4.82647,-1.84288,-5.84018,-7.03235,-2.16958,-0.8999,-4.4747,-1.99497,2.40008,0.0349671,-0.825783,2.00993,-0.184404,-0.576706,6.30193,1.43455,3.63536,2.34484,0.148851,-1.22127,-0.718508,-0.716753,1.50537,0.412978,0.73252 10185.2,10148.2,9250.73,1.88291,-0.127643,2.41457,0.38457,3.28565,2.40364,1.07674,-0.352091,-0.192694,-2.80281,-2.45121,-0.746935,0.454781,-0.345492,-2.38393,-2.35152,-0.468918,-0.28004,0.207449,2.6636,-1.39254,-2.09536,-4.44811,-4.48824,-2.93117,-0.770421,1.19,0.219788 10183,10142.2,9248.93,3.78484,0.701338,-0.71552,3.48407,0.454755,4.3743,3.68099,-0.668556,-3.42636,5.52772,-1.23863,-0.405148,0.665698,1.06479,-0.0251586,-0.48849,-0.847741,1.4814,-5.36764,-0.405219,-1.51485,-3.88226,-5.12764,-5.33767,-4.3365,-1.173,0.417418,0.415356 10185.4,10138.4,9247.93,3.11727,0.196163,-2.018,0.721283,-2.5075,-1.06349,0.331823,-1.2182,-4.01712,4.78444,0.452166,-2.16432,0.55673,1.61447,1.16718,1.44415,0.569846,-0.812131,-8.14324,-2.91296,2.43154,-1.45218,-0.730675,-1.0947,-2.25658,-3.52675,-0.361214,1.09266 10188,10139,9248.05,1.52249,-1.16117,-2.4591,-2.41492,-0.35832,-7.48161,-0.0490082,-2.1421,-3.52013,0.903896,-0.958215,-5.8036,-2.36788,-0.368615,-1.88998,-1.40466,-1.28791,-4.79995,-5.58563,-3.57656,4.13739,-0.274441,1.53352,2.93946,-1.96753,-6.76034,-1.87752,-0.324793 10186.8,10142.9,9249.23,2.29541,-0.414867,0.263844,-2.42527,-9.23597,-12.7958,-5.40665,-1.3296,-0.255947,1.05195,-3.09731,-3.83996,-4.40177,-0.0123634,-1.79533,-2.22933,-1.59891,-1.58539,-4.29444,-3.24283,2.73497,0.939395,2.25632,3.98042,0.672842,-4.87272,-3.0871,0.140664 10183.8,10146.3,9250.93,1.04007,-0.107056,-0.719832,-5.17314,-6.41206,-13.4527,-3.51115,-1.82372,-1.0661,0.164654,-4.87432,-3.16371,-3.16216,0.547311,-2.31938,-3.32366,-2.59406,-3.07878,1.07584,0.135595,-0.15385,-0.198986,-1.76614,-0.364142,-1.44816,-3.17832,-0.666637,0.539005 10182.5,10148.1,9252.57,1.58315,0.552138,-2.38854,1.84879,-2.25441,-6.8381,0.208721,-2.73312,-3.19332,-2.49192,-4.21087,0.445019,0.0651566,2.67403,-0.780414,-2.43461,-3.10543,1.48742,-0.123359,0.0321366,-2.00728,-1.30717,-5.02137,-5.05394,-3.39985,-0.233706,2.10556,1.51466 10182.7,10149.6,9253.33,0.671616,-1.8801,-5.19861,1.6691,-0.386439,-6.73637,0.390118,-1.36276,-2.8229,-3.74619,-1.53148,0.15594,0.934737,1.96014,-1.35363,-0.924511,-3.00858,0.653744,-1.84706,-3.59509,-0.247233,0.962108,-1.40552,-3.28119,-2.22432,0.0626129,2.48273,0.969888 10182.9,10150.9,9252.01,0.0166707,-2.52456,-5.48285,2.26653,-2.03587,-6.50283,-1.00325,0.264499,-1.46362,-0.822672,-1.11829,0.403605,-0.734484,-0.382999,-0.186567,1.24812,-2.13095,1.80897,-2.82131,-6.15356,2.54337,2.39696,2.51379,2.41699,0.307725,-0.195503,-0.252349,-0.890546 10182.1,10151,9248.33,-1.21698,-1.52567,-2.334,0.102378,3.74418,-1.36756,3.51501,1.50357,-1.80774,-0.855037,-2.71284,0.0746735,-1.2904,-2.37263,-0.326812,1.37779,0.0811662,-2.04277,0.452769,-4.37491,4.60025,0.785458,0.944597,2.57121,-0.443829,-1.9031,-1.78376,-2.25217 10180.2,10149.4,9243.85,-0.498632,0.815261,-1.05027,1.32586,2.65892,-5.17029,-0.588453,1.63481,-3.33979,4.4087,-1.26981,2.01576,-3.03953,-3.66687,1.33091,1.62961,0.568999,0.53543,0.477935,-1.78405,3.91722,-1.12653,-3.07327,-2.27103,-2.21119,-0.0469714,-3.05949,-3.83303 10176.1,10146.3,9240.54,-0.464849,1.25223,-1.14736,-0.645201,4.96922,-0.805424,1.85313,1.43677,-1.45072,6.22509,1.54511,2.89442,-3.56094,-4.35854,-0.476689,0.39343,-0.929162,-1.07774,0.941846,-0.57756,0.363373,-1.13491,-1.30865,-3.06369,-1.8739,2.47973,-3.19611,-5.38414 10169.3,10142.4,9238.91,2.28739,1.91951,-0.759834,1.17008,-1.10807,0.137649,-1.76481,-0.427729,-0.592675,2.50623,0.607717,4.10404,-2.20382,-5.11375,1.80008,0.383348,-3.40396,4.33491,0.605228,-0.0871236,0.185566,0.480246,2.74078,1.48145,2.07534,4.96863,-2.65852,-5.78272 10162.1,10139,9238.14,2.03262,2.32633,0.46709,-2.26524,5.80967,5.85587,5.67759,0.185696,-0.246666,-0.787877,-0.201738,0.61348,-0.542043,-3.51173,0.345287,-0.426571,-4.01566,0.315299,2.10005,-0.391753,2.39343,1.28396,3,4.99164,5.3145,2.31592,0.0224444,-4.14279 10158.4,10136.9,9237.31,2.77556,2.83113,1.37245,1.19159,2.19923,-2.0116,3.1913,1.03754,-0.929092,0.870894,1.00256,-0.624392,-0.561338,-2.99529,2.23674,0.823539,-1.63024,3.75817,0.298891,-1.18515,4.54738,1.25951,1.91277,3.57793,5.44217,0.785618,0.025315,-3.27161 10158.5,10135.5,9236.37,0.0672571,0.761886,2.35427,-0.889999,6.73976,-1.98269,8.45302,1.1398,0.0604089,-1.15193,1.32222,-2.47069,0.131408,-3.48238,-0.669944,0.753279,3.07189,-2.04262,0.174304,-2.32107,2.83224,0.708328,3.23848,0.984911,2.384,-1.28385,-0.548071,-3.32946 10160.6,10134.8,9236.46,-0.783525,0.239203,0.00548465,1.88108,6.83171,-2.89703,7.27976,-2.71585,-1.47417,2.12383,-1.04536,-1.14095,0.145875,-4.3962,-0.139564,0.781551,3.40043,-0.28834,-0.343608,-2.36391,0.0938093,-0.36295,1.0276,-0.578692,-0.619797,-0.489157,-1.92106,-4.163 10166.1,10135,9239.02,0.124276,1.29463,-1.44975,3.21172,2.53479,-3.38317,-0.20102,-4.72755,-2.14129,5.53743,-1.24849,0.994366,0.436372,-3.09635,2.19121,1.13794,1.52365,3.0586,0.622146,-0.699363,0.103461,0.316277,-1.73095,-0.195395,0.490618,1.44514,-2.50878,-3.62472 10175.6,10136.9,9243.9,1.67228,1.70099,-0.125799,2.04051,6.74509,2.05118,7.82124,-3.08565,-1.70842,3.37127,-0.160655,1.32998,0.57087,-1.46351,1.80831,-0.585194,-0.267853,0.719624,2.12333,-0.931791,2.61407,0.519467,-1.78038,1.70819,2.66646,1.47407,-2.48388,-2.6294 10184.4,10140.5,9249.09,4.05746,1.49391,3.1491,4.74869,1.42089,-7.65297,4.6083,-1.50292,-0.681543,0.792377,-1.54194,2.19467,-1.449,-2.54459,5.38937,-0.0662613,0.683022,6.46847,-1.151,-2.09676,5.40097,0.0884146,-0.584039,0.411805,2.87021,2.70096,-3.69024,-2.72328 10185.2,10143.8,9252.71,2.20708,-1.9117,6.2705,-1.38994,9.88462,0.984595,14.8745,1.09177,3.01497,-6.59006,-3.06879,0.864155,-0.352553,-2.42934,1.6214,-0.899998,2.90809,-2.62154,-0.748965,-1.78716,3.1828,-0.76616,1.51574,-1.80336,0.759499,1.08543,-1.48814,-0.830864 10176.5,10145.2,9254.8,3.08758,-1.24415,2.30133,1.5123,4.9996,-2.25743,5.71269,0.326257,0.862459,-5.32366,-2.15784,1.98295,-0.769376,-3.24456,1.73394,-1.18022,0.303592,1.19388,-1.18318,1.1848,-0.484859,-3.12715,-2.31674,-4.16244,-1.41399,2.32149,-1.0187,-1.70219 10164.6,10145.4,9256.92,1.59078,-1.06701,-0.557541,-2.88977,3.22953,-0.245042,-0.474481,0.0498212,-1.16809,-8.33134,-0.306573,0.38113,0.242976,-2.39828,-1.29092,-1.68013,-0.127576,-1.94114,1.03024,1.7825,-1.44807,-2.86352,-4.13379,-1.78466,1.5241,1.16147,-0.513496,-2.30027 10156.4,10145.9,9260.21,0.0333157,-1.40254,-1.63643,-2.63202,2.15792,2.8366,-1.32406,-2.25364,-4.61227,-7.74587,-1.005,0.107792,-0.131513,-2.0428,-1.28031,-1.65736,-0.0589992,-0.767749,0.0451012,-1.23948,0.334266,-2.05544,-5.74107,1.40617,2.47259,0.129519,-1.22605,-3.50154 10152.5,10145.2,9264.25,-2.23854,-3.34598,0.871046,-4.48776,-5.12246,-0.367558,-7.49548,-3.04105,-2.99035,-3.84367,-2.67766,1.19195,0.695189,-1.99211,2.38266,0.800284,2.92667,1.82052,-0.796218,-1.82753,3.43662,1.60186,-2.49788,2.02216,2.59346,0.975508,-0.397427,-2.78437 10148.6,10141.1,9267.56,-4.64613,-5.4569,3.80281,-6.22039,0.554038,5.00519,-0.395733,-3.04225,0.570141,-6.95862,-4.49105,-0.00732036,3.78285,-2.09066,1.46914,-0.873643,3.95228,-2.08532,2.8568,0.749314,1.78963,1.02579,-0.808831,-1.60113,-1.17483,0.544949,1.95805,-1.27827 10142.4,10134.6,9268.73,-4.02228,-5.3818,4.39201,-6.57399,-2.68308,-0.146626,-0.297909,-1.28233,3.72363,-10.5635,-3.46562,-0.498293,3.92457,-1.10422,0.725311,-0.888612,3.1725,-1.82837,4.64182,1.32637,-0.56378,0.781271,3.29557,-0.557202,-0.712584,0.587691,2.76212,1.05325 10137.8,10128,9266.83,-2.98689,-3.62614,2.49614,-3.78405,5.33483,-3.24499,-1.4797,-1.49474,0.75769,-13.0722,-3.57543,-1.73535,1.13307,-2.81826,-2.67056,-2.75063,-0.407379,-1.38965,7.67619,2.2374,-2.93415,-2.1994,0.956463,-2.25511,-4.42128,-0.889014,2.30781,-0.144069 10139.6,10121.2,9261.84,-1.19244,-2.09691,-1.17019,-2.92359,1.84257,-9.64131,-8.2266,-2.48032,-2.29368,-7.41116,-3.60172,0.404837,-2.31741,-3.52505,-1.14341,-1.1367,-2.22469,2.93998,5.91064,0.841518,-1.68308,-1.06298,-0.398387,-1.68239,-3.53445,0.38234,1.02165,-0.403129 10146.2,10113.8,9255.3,-3.35595,-3.34535,-1.74811,-10.4556,3.60927,-0.776329,-3.08604,-1.29687,0.835023,-5.76979,-1.7646,-2.22816,-1.31439,-0.382083,-1.73312,-0.792276,0.206848,-4.1992,4.29806,-0.830575,-1.71405,1.40452,2.00247,0.106559,-0.768805,-1.08451,1.11784,1.22578 10152.4,10107.8,9249.87,-2.49869,-3.87311,-1.98238,-6.90342,-1.23671,2.90852,2.97754,-0.581043,2.81778,-2.71728,-1.21684,-5.07044,0.497485,2.01224,-0.365556,-1.64542,1.17956,-3.76085,-0.573467,-2.58111,-2.12663,0.378165,4.18795,1.24581,-1.36196,-2.87649,0.482267,1.63454 10154.8,10107.2,9247.27,-4.01788,-5.39388,-1.72161,-10.3153,-0.251037,-1.57831,1.61553,1.18147,5.7765,-0.599766,-1.22598,-10.0294,0.895145,2.02015,-4.45992,-2.58818,2.98391,-9.45103,-1.41902,-1.29446,-0.55725,-0.180421,6.94249,-0.594659,-3.53394,-6.50742,1.38112,1.51458 10153,10112.2,9246.76,-3.24249,-5.01072,-2.02956,-7.46567,0.0264794,-1.5224,-3.31193,1.53111,5.32332,2.5335,0.40251,-7.05633,-0.711568,2.89381,-5.39998,-1.36446,2.04786,-7.02942,-4.53297,-0.88262,-0.357391,0.595822,6.5409,-2.84395,-2.64994,-5.7378,1.39939,2.97985 10148.7,10119,9246.16,-3.96002,-4.42756,-3.26432,-8.69557,4.03628,0.616301,-3.92147,2.76458,1.652,2.17356,4.22927,-4.5247,-2.33417,3.89508,-5.29918,-0.309883,-0.288513,-8.36711,-3.09529,-0.126421,-1.8539,2.38545,3.61409,-1.26649,0.429596,-4.19612,1.45711,3.95651 10145,10125.2,9244.17,-1.75695,-0.511195,-1.73883,-3.34742,-1.26592,5.24499,-3.03549,2.78645,-2.1334,0.220919,5.88292,0.160927,-1.7455,5.37331,-1.59599,1.91312,-0.631146,-3.16886,-2.94994,0.34822,-3.01289,2.84951,0.356135,3.47859,4.18276,-0.12287,0.984563,3.64398 10143.1,10130.2,9241.27,-1.71615,1.12867,1.04805,-6.57347,2.41341,16.2593,7.00371,0.924589,-2.71609,-6.2656,3.57183,0.37743,1.96421,5.66573,-2.3041,2.26799,0.668846,-8.32571,2.30148,2.66333,-1.75615,2.71555,1.44408,6.00224,4.85886,0.685304,3.03234,2.82015 10140.7,10134.4,9239.05,-1.25992,2.46902,-0.556969,-2.76672,5.45596,12.4649,8.36959,-2.49709,-3.8708,-1.40646,1.38854,1.37064,2.12007,3.84209,0.459629,2.15086,-1.24194,-4.15365,4.52043,5.4809,0.876317,0.656659,-1.01116,2.09458,1.65028,2.77599,3.21635,0.381243 10133.6,10137.8,9238.32,-2.22442,1.37094,-0.787327,-1.05469,3.55443,5.14715,-0.0509983,-0.0905216,0.72894,3.96149,2.38061,1.75467,3.09083,4.18358,2.79613,3.29833,0.325666,-0.671704,6.07566,7.72379,3.13564,0.655668,-2.59152,-1.76199,1.58102,4.45884,3.34631,0.480564 10121.1,10140.7,9238.2,-2.17367,-0.866588,-2.79273,0.692199,10.1863,9.97874,6.04483,2.66482,1.76948,2.61332,1.9281,-1.1243,5.03132,3.85731,-0.443337,0.284932,-0.868815,-3.31091,8.51065,6.49177,2.23459,-1.67042,-3.77735,-2.781,-0.902713,1.50205,4.04064,0.197185 10110.8,10144,9237.47,0.303664,0.966366,-2.65365,4.69141,3.98147,5.09796,4.57488,3.26927,0.562439,5.41174,1.92471,-1.15766,3.6349,2.42314,-0.0874924,-0.0560302,-1.22366,1.9914,3.44357,1.69106,1.98031,-1.32375,-0.576816,-1.03349,0.269332,-0.300454,3.28264,-0.458562 10110.3,10147.7,9235.48,1.28867,0.940385,2.1165,-0.581377,-0.643187,-2.16313,1.69237,2.47912,1.37859,3.32286,1.26412,-0.720553,2.36863,-1.25903,0.0706914,0.944374,2.2859,0.229574,1.5842,-0.12766,4.43122,1.34327,3.34673,-0.404948,2.87655,-1.67866,3.04869,-0.25307 10116.7,10150.7,9232.33,0.394714,-0.833445,4.94793,-6.11826,9.22151,2.99358,11.1041,1.15853,2.93899,0.397365,0.0221406,-0.0976144,-1.13452,-3.42557,-3.72862,0.476803,3.69054,-8.12164,2.48493,0.363106,3.87676,0.504363,0.972674,-1.44388,2.15926,-0.828986,1.75931,-0.549928 10121.4,10152.8,9229.14,1.29508,-0.757006,3.12597,-1.6729,7.62364,-0.936804,6.48918,-1.03742,1.86227,-0.262351,-0.75051,2.31301,-4.8422,-4.5034,-2.66476,0.578808,1.27532,-2.04282,3.45288,3.01897,0.564668,-1.21876,-3.06331,-2.70583,0.257935,3.52846,-1.56111,-1.5308 10121.6,10152.4,9226.86,0.677648,0.378414,1.31475,-2.61018,4.91454,0.37514,2.86121,-0.193973,1.93324,-4.63591,1.10695,3.14457,-2.96694,-2.19304,-2.99025,0.50097,0.165722,-0.200595,6.85438,4.63234,-2.47705,0.342532,-1.30419,-0.141339,1.63084,4.32707,-1.19328,0.76139 10120.5,10149.2,9225.49,0.499478,1.88224,-2.14427,-2.77288,10.6927,1.71766,6.49787,0.43981,0.0705592,-5.13201,2.57263,1.48076,-1.20267,-0.591255,-4.74193,-1.79266,-1.46188,-3.42451,8.04316,3.54243,-2.30088,0.0710442,-2.83238,0.653942,0.240506,0.904871,0.430945,1.6283 10121.2,10144.8,9224.89,1.35965,2.80608,-1.94166,1.75583,0.26227,-8.26437,0.567312,1.6259,1.60009,0.0627174,2.62631,2.65738,-1.31444,1.36503,-0.138702,-0.303116,1.07964,0.805711,0.6712,-0.0379901,0.596301,1.49046,-2.9437,-0.0854658,1.7116,1.14138,0.19577,2.11315 10121.7,10140,9224.64,-0.625981,1.46152,0.571473,-0.708952,-3.97306,-7.60183,3.54876,2.52756,3.43643,-3.37318,1.25185,1.95327,-0.430742,1.99167,1.38528,0.439469,3.35733,-3.21518,-3.33649,-3.33716,1.63613,2.87364,0.216347,-1.19264,2.34646,1.38095,0.250252,2.26893 10117.5,10135.7,9223.59,-0.644241,3.50756,1.18011,1.32346,-4.09529,-1.15572,8.91836,0.864807,0.810206,-4.21922,0.85698,1.54667,-0.984211,1.49262,0.424346,0.272079,0.55043,-3.11065,-4.92549,-5.21789,0.616593,0.933381,0.453042,-0.907799,0.816878,0.888407,-1.07882,0.897744 10109,10134,9221.44,1.24811,3.97674,3.11247,-1.16572,-9.20759,1.26864,10.07,0.861166,0.629341,-5.07074,1.84156,0.554677,0.501606,2.3508,-1.99158,1.42546,-0.0624237,-4.75601,-4.11731,-5.27973,3.12042,0.927954,2.01431,1.91643,2.26937,-2.42322,-1.85499,2.11246 10103,10135.6,9219.87,2.2046,4.10281,1.87105,-2.44462,-1.81059,2.73657,16.517,1.49188,0.862687,-1.50652,2.91423,-2.27191,-0.311967,3.16828,-6.05317,-0.647296,-0.600809,-9.86797,-3.317,-4.05579,3.51099,-1.77799,-1.17227,0.17711,-2.12588,-5.86398,-2.08211,1.43944 10103.9,10138.7,9220.3,3.77174,5.49059,1.2637,1.03751,-12.6254,-6.24364,0.90728,3.65224,3.71822,2.59825,4.31988,1.86088,-2.62582,4.43061,-1.00461,2.10803,1.47555,-3.28777,-8.18549,-4.31695,2.95113,-1.34785,0.676274,-1.38936,-3.04336,-1.37001,-2.35773,2.00922 10108.6,10140.8,9221.82,-0.70593,3.90046,-1.14247,-3.0764,-1.47295,-1.10809,-0.510284,3.79285,2.60078,-1.28697,3.77566,2.32766,-3.54475,2.99719,-1.20306,1.33262,-0.719923,-9.06449,-7.33119,-4.80493,-0.721145,-2.4024,1.79362,-1.97223,-5.04385,0.0875954,-1.73778,0.950888 10113.1,10142.1,9223.55,-1.06377,0.843971,-1.44889,-5.32939,2.69029,-3.83385,-5.63119,0.535717,-1.61039,-5.59267,1.26514,2.05707,-3.31026,-0.958826,1.33732,1.46551,-3.13585,-9.66605,-6.00234,-4.35532,-0.26599,-0.831562,2.98878,0.128679,-2.54674,-0.278737,-3.58409,-1.324 10120.7,10142.9,9227.01,3.56995,1.04759,3.75113,-1.7421,5.12807,3.1454,2.38504,-1.62768,-2.93793,-5.71266,-0.530001,2.84448,-2.04436,-1.31251,2.17243,2.11298,-0.867238,-7.66197,-6.87331,-3.32769,-0.373459,-0.116178,2.03689,0.379397,-0.00605166,-0.182103,-4.1657,-1.22794 10135.1,10142.1,9232.63,4.13322,3.14571,5.42112,-9.50857,6.61076,-1.5265,-1.3563,-0.229734,-0.953633,-2.39287,0.0907423,-2.25912,-2.95494,-0.622513,-0.878638,3.11006,2.20909,-12.7591,-4.65267,-0.652931,-0.508727,-0.484787,-1.43884,-3.89903,-1.68783,-1.20607,-1.47415,-0.30987 10150.6,10139.9,9237.26,7.08686,7.1115,3.05908,-7.31514,-2.75139,-6.15754,-6.75994,1.34201,0.583247,1.72791,0.0586144,-1.05549,-2.23348,1.35232,0.957745,3.9225,0.27845,-7.28043,-8.71747,-3.21629,1.12263,-1.08286,-3.72117,-4.10901,-0.817087,-0.319549,-0.171801,1.86899 10161.3,10137.9,9238.2,5.45348,5.872,0.0360833,-8.71486,1.68904,-1.57501,-9.84544,2.70784,2.39605,-1.45535,-0.548901,-2.93743,2.31592,2.21738,-0.0678836,1.75621,-1.90485,-7.83172,-5.34721,-0.902631,2.89369,0.938874,1.08004,0.946796,3.39736,-3.2386,1.23533,3.43628 10168.7,10135,9236.89,1.9988,3.16081,-0.959961,-1.65775,15.8147,12.2058,-6.43511,1.69639,2.59198,-2.06327,-0.47323,-4.35241,3.77438,3.79233,-2.16153,-2.08622,-2.56136,-3.89096,-0.736348,5.49778,-0.475583,0.770127,3.05002,3.17719,3.81221,-4.99556,1.59718,3.01185 10178.3,10131.2,9237.28,0.818385,-0.233269,1.46873,6.63122,10.9706,17.5879,-3.54675,0.677416,3.72244,0.655626,-0.201865,-1.16835,1.57109,5.42876,-0.444523,-1.12764,-0.256929,5.62565,-1.99386,6.4084,-2.47406,1.18593,3.2834,3.0293,3.51573,-2.53776,0.959038,3.23253 10193.3,10130.2,9242.16,-2.48525,-2.35837,2.98987,5.98816,11.4719,15.9039,-4.84232,-0.825315,2.54659,1.43064,-0.659643,-2.96556,0.571285,2.41784,-2.00371,-0.757574,1.41844,6.37057,1.42823,7.71148,-4.93994,-1.54988,-0.232174,-1.34349,-1.26249,-2.05601,1.26179,0.464125 10210.2,10133.3,9250.5,-0.302459,-1.69801,0.843368,2.30597,6.15326,11.0157,-5.9274,-1.05244,-1.68469,-0.278629,-0.694935,-0.891837,1.23651,-0.21345,-0.305015,-0.0987808,0.160233,4.91775,0.166271,3.92353,-3.88399,-2.55526,0.198425,-0.923912,-1.86728,-0.552523,1.22445,1.15572 10221,10137.3,9258.6,-1.56339,-0.256664,0.840544,-1.61826,11.0061,14.4706,-2.59098,0.449882,-1.65171,-1.89163,-1.35949,-1.40198,3.60618,0.270121,-1.02351,-1.1912,0.778059,-0.110922,0.867721,2.27546,-5.20223,-2.14642,1.17716,-1.36266,-2.51971,-1.10085,2.42789,2.32548 10222.9,10141.6,9264.61,-4.74868,-0.212232,1.05283,-1.29221,10.744,4.75459,-2.81401,0.644295,0.850172,0.179994,-3.01777,-4.30435,2.71079,-1.12735,-1.29174,-2.07496,1.34575,1.0376,2.5823,1.95702,-4.5778,-1.28586,-0.494008,-4.39926,-5.46478,-2.40477,1.70545,-0.546783 10222.5,10148.7,9269.02,-3.49502,-0.678579,-0.213247,8.06515,8.4472,0.736921,12.8231,-0.680516,1.09355,1.44143,-3.62765,-2.08929,0.194595,-2.35671,-0.392866,-2.86869,-0.655593,6.76095,0.52286,-1.94996,-0.69629,-1.94695,-3.05311,-3.36287,-5.8798,-2.04553,-0.962602,-2.08692 10226.3,10155.2,9271.48,-1.96969,-0.131236,-7.34816,10.3469,1.43629,-18.1274,6.28789,-1.94889,-4.21799,9.10578,-0.96868,-0.513386,-5.07894,-4.75252,3.07715,-1.21549,-4.62974,12.6049,-2.11208,-4.5134,4.07597,-2.26695,-5.31607,-0.080814,-4.75562,0.0499323,-2.60796,-2.05158 10230.1,10151.7,9270.27,-0.441668,1.99564,-2.24149,10.4542,-4.09391,-6.45561,-1.77752,0.712394,-1.02642,8.25875,2.54249,4.31177,-1.67116,1.28898,3.90167,2.27301,-0.292013,13.1856,-3.31394,-4.23242,0.509949,-0.582218,-1.55254,1.54596,0.383257,3.15094,0.659781,3.83919 10224.9,10138.7,9266.49,4.67287,5.1299,-1.26323,13.4301,-10.2745,-9.49416,-12.2719,-1.18436,-2.87586,6.16837,2.83569,6.07774,-2.8315,2.00898,6.40272,2.01559,-1.86315,15.8694,-4.72684,-3.25468,-2.65905,-3.311,-6.24296,-4.21139,-3.70695,4.80612,0.395122,1.76566 10212.8,10131.4,9265.67,3.01888,4.86272,2.80549,9.41976,5.08199,16.7307,3.01517,-1.39232,-0.901598,-3.17761,2.70511,2.89126,0.206015,2.09237,1.79821,0.427067,-0.286912,4.97158,1.88506,1.52106,-4.78901,-3.10639,-5.19696,-1.88352,-1.17405,1.76068,1.66502,-0.462334 10205.3,10137.3,9271.29,5.0191,6.44861,-1.029,10.2232,1.46143,6.79866,-7.1328,-3.52906,-8.32347,-3.93806,2.03961,4.301,-3.73195,-3.92217,6.44854,2.90593,-2.49697,11.4551,-0.562561,1.57056,0.711111,-0.350636,-4.25263,3.76126,3.75639,3.70316,-1.79131,-3.47622 10205.7,10147.7,9278.59,5.83546,6.36501,-0.202118,7.16455,-12.9828,-12.4607,-27.3389,-3.33415,-9.60681,-6.26496,-0.539386,6.78879,-3.91681,-6.10831,9.8609,6.12423,0.502419,17.71,-2.72276,0.90307,5.89102,4.35576,1.47131,6.87862,9.08531,6.44279,-3.45175,-1.92878 10205.4,10153.7,9279.43,2.61204,3.79426,2.8599,4.2373,-6.30104,-6.55433,-17.9117,-2.30217,-4.33352,-8.56342,-2.54108,4.06241,-0.221565,-2.25183,3.87958,2.42384,1.7425,10.0636,-0.274803,1.38918,2.9688,2.49859,1.85002,3.57782,5.56749,4.25356,-1.57246,0.769565 10198.3,10155.2,9271.53,1.79363,-0.436721,3.46418,1.17919,-6.21503,-12.0337,-14.7144,-0.753172,-0.422946,-10.0673,-1.05729,0.16841,0.00393219,0.329848,3.06417,0.641188,1.13987,4.50086,-1.96838,-0.158451,2.22687,1.01485,-0.617827,-1.82684,0.837829,1.35672,-0.969077,2.83866 10187,10154.7,9258.9,0.357944,-3.85399,-0.403587,-0.905802,-6.94279,-16.6984,-17.7781,-0.22625,-1.87358,-4.80273,-0.208291,-3.41762,-1.38116,-0.435891,4.56144,1.47257,0.881539,4.31043,-2.35524,-0.63135,2.49929,2.73787,-0.3439,-0.967951,0.479767,-1.25236,-0.198644,2.70849 10175.5,10150.8,9245.55,-2.22289,-4.64417,-1.57873,-3.37822,-3.35046,-9.88201,-14.3071,0.168661,-0.756661,-2.69992,-1.57269,-4.61371,-0.741804,-0.794809,1.95045,1.34471,1.90438,0.670421,-1.36383,-0.0207592,1.95603,4.44548,1.70081,0.896225,1.96219,-2.68814,1.37985,1.21966 10163.9,10144.5,9233.39,-1.0609,-3.6573,-1.22008,-1.66234,-8.72059,-9.8591,-9.71449,-0.237702,2.4907,-0.383432,-2.45784,-2.52105,-0.451308,-0.95008,0.101755,0.998499,0.0147502,0.763548,-2.08901,-0.286814,2.08671,3.24587,1.98374,-1.03823,1.41551,-1.64013,0.866956,-0.452541 10152.5,10140.9,9224.11,1.58528,-1.3177,-2.21666,-0.770113,-12.1162,-14.2306,-0.877621,-0.372338,1.62768,2.76293,-0.69447,0.389726,-2.24466,-0.492948,-1.07534,1.2119,-2.84085,1.62365,-4.58137,-3.47859,2.38127,-0.58689,-1.20067,-5.12188,-1.38938,0.191315,-1.00868,-0.231626 10144.9,10141,9218.45,2.9188,-0.174985,-4.58083,-6.94645,-12.0718,-23.1781,-6.27315,-0.364715,-3.24703,1.70145,0.993811,-0.598274,-3.56103,-0.759525,0.496704,2.46032,-1.89983,0.597576,-2.01394,-2.93857,4.73883,-0.682548,-1.34504,-3.70636,-1.23983,0.0550942,-2.01066,1.58053 10141.8,10139.7,9215.32,1.06474,0.421951,-5.29652,-9.2234,8.36446,-5.7284,0.960531,-0.909556,-4.90704,0.770291,1.54135,-5.62095,-2.20122,-1.09503,-2.35206,-0.974175,-1.0101,-7.23319,3.01594,0.768168,2.39478,-1.32615,-1.6404,1.53725,-1.51813,-3.97654,-1.7665,0.833795 10141.4,10134.3,9214.23,0.86273,1.35397,-0.657898,-4.72598,2.71892,1.93911,-8.71178,0.127278,0.812447,5.14689,3.34014,-5.47575,-0.124804,-2.70815,-0.541837,-0.600256,1.53834,-3.53843,0.0605411,2.43643,0.689316,0.936364,1.45495,3.58725,0.917646,-4.12549,-2.16127,-1.91164 10145.6,10128.8,9217.09,0.035273,1.26692,3.11502,-4.96307,-6.78084,1.02172,-8.79811,2.69846,4.94751,11.3598,6.51275,-2.0705,0.657905,-2.59061,-0.35795,1.18908,3.42851,-3.05799,-3.41004,0.806424,0.399374,2.92706,4.4301,0.273598,0.553543,-1.76552,-0.755718,-3.46001 10157.5,10128.8,9225.31,0.248702,0.312336,2.57768,-4.36878,-7.1619,-0.049009,-3.2758,2.7151,1.99544,11.1247,7.80862,3.2311,1.05086,1.13953,0.117826,1.5885,2.6575,-2.74279,-2.82058,-0.206648,1.25493,1.71967,2.81266,-4.13773,-2.45207,2.50385,0.789243,-0.268176 10170.7,10133.1,9236.11,-2.23675,-0.885477,2.34602,-6.30375,3.19378,12.3402,5.26964,2.51006,1.86666,4.33237,6.63528,4.85198,3.48519,8.46812,-2.52066,-0.634166,3.57125,-6.40349,1.46869,0.818123,-1.68738,1.2743,1.91738,-0.951766,-0.403311,4.63843,3.18061,7.04436 10176.7,10136.2,9243.78,0.782244,0.338989,-0.179665,0.677035,-11.8864,-9.98092,-16.6014,-0.0876104,-1.39338,0.511794,2.05749,5.37285,2.64871,7.7119,4.8232,-1.23349,2.56586,8.98335,0.643413,1.73431,-0.63479,2.49537,-0.600719,2.26345,1.69812,6.71431,2.31721,8.10433 10176.8,10136.6,9245.84,-3.20567,1.13405,3.92668,-1.78597,-0.236073,-2.19382,-11.4115,3.08973,1.33702,-3.27145,0.727769,-0.100717,5.38921,8.19297,0.492232,-2.20151,5.25989,3.6589,4.08819,2.21554,-1.32513,3.54291,0.119275,3.23854,3.862,2.19948,5.28701,6.25834 10178.4,10137.4,9245.74,-5.53585,0.420645,5.85295,-4.47724,14.54,12.4497,8.36972,4.99424,2.57479,-4.3639,0.677018,-2.6813,6.67898,7.5884,-5.54187,-1.3688,4.05586,-6.15054,4.2909,-0.899213,-1.24567,1.90686,-0.469126,1.72139,5.00978,-1.65339,6.96518,3.71489 10184.8,10141.1,9247.89,-4.95644,-1.91401,3.7243,-7.95873,7.49028,6.40526,5.31843,3.53676,4.4376,-3.95261,0.746514,-2.92295,5.17495,5.09822,-5.56387,2.13589,1.74219,-7.51099,1.13636,-2.24892,-0.712168,1.40767,0.401594,-0.663717,6.22808,-1.51586,5.59537,1.86444 10195.1,10147.9,9253.27,-3.98,-3.06823,-2.05534,-6.10099,3.83685,4.55708,3.92119,0.928846,2.49159,0.0763172,1.14792,-2.88509,3.3624,3.14131,-4.76678,1.53759,-2.49281,-5.00974,0.3227,-1.57677,-2.36177,0.558465,1.76223,-0.153596,3.21585,-0.248642,3.44061,1.09292 10206.6,10155.3,9259.98,-4.64998,-1.64546,-4.6585,-6.92405,-1.23826,-1.4651,-7.80907,2.03872,0.322905,5.35637,2.9557,-1.90346,0.941137,2.90995,-2.25745,1.6362,-2.73525,-3.06893,0.361893,-0.410406,-1.95298,3.18373,4.96997,3.18307,2.09522,2.29277,1.29516,1.46329 10215.1,10159.8,9265.65,-5.64262,-2.22323,-2.32616,-8.62966,1.24852,3.53986,-7.11813,2.5704,-0.221435,0.41167,0.765415,-1.44792,2.10023,1.14341,-1.90736,0.761342,-0.0657556,-6.90094,4.60419,2.00852,-1.1143,4.44335,7.23913,4.6059,2.18355,1.92624,1.0442,1.06642 10218.9,10161,9269.98,-5.54728,-2.69742,0.623383,-4.54971,5.62832,12.115,1.60837,0.527375,0.225195,-4.35554,-1.09064,-1.69716,2.68584,-2.42078,-3.28377,-0.48855,1.46337,-7.59929,7.41232,3.78152,-1.52786,1.12019,5.14455,0.902689,0.791392,0.171231,1.01653,-2.1951 10225.1,10161.4,9274.87,-4.18459,-1.40959,4.0543,-3.78563,4.56469,13.1486,7.4468,1.32559,4.01602,-4.26528,2.47676,-0.706977,1.49841,-2.44619,-4.48237,0.314642,3.21848,-7.78537,6.45365,2.67192,-0.518631,-0.579868,3.1551,-3.30298,0.42352,0.385421,1.09082,-3.38628 10238.6,10163.7,9281.72,0.163978,0.29531,1.39945,-1.88245,0.770367,3.01996,6.47156,0.843119,3.05229,-2.89342,3.69162,1.01002,0.156961,-1.63668,-1.88068,0.459627,0.572044,-3.8789,6.07964,1.73877,1.04155,-0.952277,-0.352698,-3.89818,-1.13337,1.63306,0.655322,-3.05775 10252.3,10168.8,9289.58,1.69242,0.803041,0.969081,-1.57571,10.1963,10.1486,9.01137,-0.23779,2.45598,-11.8335,0.764195,0.347471,0.63322,0.818036,-2.67947,-0.48707,-0.0121974,-5.92175,4.75178,1.31186,-0.59319,-0.865273,-2.13114,-0.629395,-0.22624,0.187864,0.687159,-1.38416 10258.4,10175.1,9296.44,0.693656,-1.47018,1.57507,-4.07861,13.9151,7.913,3.87705,-2.41045,1.40643,-18.8401,-3.38044,-3.78137,0.444306,-0.142111,-3.19856,-0.633983,1.26609,-6.96487,4.03731,1.86282,-0.255938,0.885239,0.576534,4.16798,1.48633,-2.91027,0.44246,-1.26861 10259.2,10179.7,9301.13,-1.11281,-2.9356,3.48279,-4.07376,14.5961,4.75668,2.95063,-2.50321,1.99968,-15.2573,-3.94817,-6.19421,0.994523,-0.409685,-3.36826,-1.30752,2.89435,-7.11783,2.3961,1.75016,-0.287404,0.839505,2.32354,3.16514,0.431073,-4.23834,0.224613,-1.13459 10258.9,10180.8,9303.2,-3.70956,-2.93593,3.76222,-6.98265,14.1006,4.36509,3.13521,0.524873,3.4745,-8.19672,-0.812591,-7.54285,2.87285,0.165482,-4.34303,-3.00502,3.10194,-11.8146,3.48326,1.87454,-2.39007,-1.71717,-0.0308325,-3.00344,-3.10099,-5.07511,0.999296,-0.291248 10259.7,10178.9,9302.61,-2.50722,-0.863499,1.6361,-7.29671,5.65875,7.35687,6.74534,2.86707,2.5541,-4.10002,1.92641,-4.21325,3.79643,1.11564,-2.85299,-3.384,0.718232,-13.5344,2.15514,-0.378278,-3.09826,-4.48668,-4.09564,-6.07121,-4.62941,-4.63714,1.35609,1.33932 10264.3,10176.2,9300.58,-1.50986,-0.476834,0.153861,-9.03392,2.34462,9.76008,11.2624,0.958254,-0.70443,-6.3101,0.886002,-3.04957,4.20237,0.687347,-2.59931,-4.30057,-0.344332,-15.3463,3.30618,0.212706,-1.83037,-5.39362,-6.37009,-5.79293,-5.6463,-5.17005,1.45394,1.2199 10270.2,10175.5,9299.06,-1.8193,-1.62584,1.49621,-15.2891,-0.19176,0.694336,7.97111,-0.906134,-1.88497,-6.47048,-0.900237,-3.70282,1.23614,0.322582,-3.93212,-3.45866,1.71962,-16.8955,0.58688,-0.409914,-0.259588,-2.68512,-3.64588,-3.35838,-4.51583,-4.19392,0.240148,0.159851 10270.2,10179.6,9298.63,-1.90388,-3.42457,3.36972,-15.5947,6.83754,-2.72512,7.96959,-1.26132,-2.35887,-7.13988,-3.00989,-4.84946,-1.32472,-2.90407,-7.21556,-3.99747,1.63284,-18.121,1.49353,-0.486008,-0.289734,-2.44221,-2.61409,-4.74746,-6.81336,-4.22186,-0.397997,-3.01155 10263.1,10186.3,9296.94,0.1046,-2.95923,0.55802,-3.53552,11.956,6.06043,20.0157,-0.175478,-1.81809,-1.77528,-2.10279,-0.283075,-3.48288,-4.09089,-6.41457,-3.4926,-1.98205,-11.2644,1.51324,-2.56718,2.01317,-3.17178,-3.03644,-4.28621,-6.82533,-2.57386,-0.732198,-4.52782 10250.3,10186.7,9289.82,0.787893,-2.63004,-4.83671,4.59987,9.90165,5.11396,20.1712,-1.49013,-0.900383,3.2704,-1.38302,1.01612,-3.51797,-3.65748,-2.01906,-2.31487,-4.58178,-0.663723,4.99631,0.0846666,6.20019,-1.32911,-0.366123,-0.708005,-3.05462,-1.4169,-1.33549,-4.03837 10229.6,10174.2,9276.51,2.92922,1.43172,-8.45959,7.92191,9.82817,0.906035,15.1761,-5.66535,-4.80598,8.92318,-1.50732,0.863702,-4.19618,-1.72605,1.43049,-1.60336,-7.78679,7.9456,2.20311,0.976306,4.6808,-2.0774,-1.41618,1.52784,-1.00485,0.251303,-2.51818,-3.24837 10203.9,10154.8,9263.01,1.97737,4.88419,1.86761,-1.89071,16.8831,21.8027,18.6752,-2.85592,-0.407409,1.1857,1.57668,2.90834,1.42619,5.01683,-2.88862,1.13125,-1.02838,-3.77013,-1.83294,-0.874118,-1.82318,-1.06152,0.617181,1.34269,3.38069,1.15764,1.12216,1.38647 10184.5,10141.2,9256.68,5.24597,7.64832,2.18557,1.58328,4.92602,9.28816,-0.0172234,-2.70209,-2.36954,2.63625,2.45988,6.65341,1.30855,2.45772,0.884071,4.15289,-0.306199,0.501745,-3.91598,-0.843063,-3.78083,-0.751671,-0.908618,-0.353576,1.46737,4.59599,1.10914,-1.05414 10178.9,10140.4,9258.57,8.5511,8.38576,-0.704081,10.0442,3.87995,9.53107,4.06474,-2.33977,-3.33414,3.45052,0.769206,8.44243,0.151836,-0.110094,2.50423,3.89258,-1.86971,4.86933,-2.34618,0.208276,-3.54318,-0.382483,-0.444637,3.17545,1.86638,6.31308,-0.0788599,-2.11239 10182.7,10148,9263.52,7.664,6.75263,-0.540997,5.42972,-5.04193,-7.98425,-8.29464,-0.166299,-0.588527,3.31557,0.500806,4.72146,-2.51571,-1.43305,5.52369,5.671,1.03703,8.03067,0.0463032,4.16527,0.993743,2.27,2.01907,5.48701,6.28587,6.50446,-0.915646,-0.555951 10185.6,10156.6,9266.64,4.26252,2.60407,3.65205,1.35764,1.93964,-1.71464,3.62386,0.664968,2.07164,-1.84774,-1.41728,2.03742,-1.93901,-0.955849,2.55509,2.24827,3.4143,2.08534,1.52467,4.36357,2.40504,-0.149419,1.87333,2.56701,3.76988,3.58853,-0.290298,1.53656 10182.8,10164.1,9266.99,3.44774,1.00051,3.58435,5.06036,-3.20427,-1.32409,2.16178,-1.24869,0.986594,2.68824,-3.10496,3.75494,-3.03899,-1.36189,2.85639,-0.797041,2.25309,6.84226,-1.01807,1.45026,1.64915,-1.77668,1.47461,1.32051,0.0174875,3.15498,-1.91103,0.915561 10177.6,10169.5,9265.47,2.97062,0.742454,2.19308,3.39405,-10.2555,-6.11354,-8.35604,-2.29312,-0.492631,4.2024,-2.46282,2.85236,-2.05854,-1.07623,3.34902,-1.67951,1.43015,9.72371,1.0556,1.2093,0.0329592,0.933345,2.62882,4.14907,1.43657,2.25242,-2.21302,0.424466 10175.1,10171.1,9262.53,2.78573,0.66686,2.0545,2.76769,-2.38316,1.38611,1.33538,-1.98843,-1.22362,0.719734,-1.48276,0.571928,-0.303568,1.13172,0.533248,-2.57485,0.218063,4.75694,4.12677,1.25451,-2.29974,1.77459,2.18864,5.66448,2.31972,-0.197648,-0.423422,1.24127 10176.1,10170.7,9258.49,5.31438,0.737423,2.23937,7.15555,-6.03862,-6.93885,2.59027,-2.08985,-1.82474,1.76361,-1.51506,2.40133,-2.94977,1.13326,2.34185,-1.4691,-0.319475,6.55378,0.151184,-0.820336,-1.03183,0.737373,1.0173,1.60097,0.120988,0.706961,-1.06361,1.61191 10177.1,10171.1,9253.43,5.27989,0.124242,0.594136,6.40228,-14.4792,-17.9873,-7.83873,-2.70593,-2.84279,6.19952,-1.02819,4.22035,-3.89328,-0.655654,4.6427,-0.543649,-0.312946,7.67303,-3.34568,-2.99026,0.892734,0.193866,0.437901,-1.37172,-2.06494,3.10779,-2.09072,0.969194 10175,10171.9,9247.28,2.27598,-1.11333,-0.371999,2.70022,-5.44405,-1.24932,2.95574,-2.54561,-3.07604,2.81372,-0.48024,4.11824,2.04907,-0.370621,1.24343,-2.71039,-1.27809,-0.906837,-1.29061,-4.80376,-0.177684,-0.68347,-0.0356975,0.976652,-2.58184,2.60538,-0.53245,1.0079 10170.6,10171.1,9240.98,0.484599,0.0646839,-1.51326,2.89899,-3.4319,-0.213982,2.47953,-0.834731,-2.00581,5.72898,0.227883,2.67222,2.27602,0.0505934,1.31844,-2.26552,-2.6972,-0.975391,-0.869576,-3.70984,-1.26158,-0.292123,-0.590846,2.58737,-1.84822,1.62378,-0.526111,-0.491878 10166.9,10167.6,9236.09,0.964725,-0.0392702,-0.079079,4.19696,-8.77705,-7.3393,-5.33084,1.7816,1.00552,6.00308,-0.645333,1.80016,-0.345783,0.537513,3.29513,-0.258503,-1.94323,3.02276,-2.07851,-0.708951,-0.985472,0.42465,-0.0047685,-0.0149723,-1.37113,0.550535,-0.779034,-0.484969 10166.1,10161.5,9233.6,-0.598547,-1.76595,-1.06041,-0.952044,-3.22733,-6.25839,-1.71002,3.5389,3.14678,2.52469,-0.94774,-0.697306,-1.82073,1.8162,-0.398189,-0.0962201,-1.17773,-3.11075,-1.86249,-0.148137,-0.912351,0.0729367,0.372787,-1.52491,-1.99794,-1.67208,0.753712,1.02245 10167.9,10154.5,9233.85,1.32924,-0.579085,-4.09528,3.27081,-6.78357,-9.38603,-3.06915,1.95927,0.70163,2.46784,-0.635142,0.854662,-1.03664,2.44479,0.381434,0.976493,-2.1874,1.35415,-3.25712,-1.85514,0.202589,0.286026,0.720155,0.627719,-0.687001,-0.872865,1.21871,2.25385 10170.4,10147.3,9236.23,1.55419,0.655793,-3.90119,3.65032,-6.92144,-3.81534,-0.829364,1.59907,-0.150104,0.588015,0.212751,1.04803,3.09472,3.79829,-0.218751,1.11779,-1.55055,0.933332,-1.25266,-2.59487,0.647035,1.39731,2.58953,2.8589,1.80309,-1.43261,2.52993,2.79953 10171.9,10139.7,9239.22,2.16966,0.513128,-2.93705,2.73804,-10.8601,-4.50483,3.76187,1.03924,-0.676839,-1.4866,-1.19577,1.6866,5.98311,3.12642,0.0885709,0.9896,-0.594518,0.533618,0.379411,-3.82145,2.32664,2.22298,3.60721,3.05218,2.2889,-1.98702,2.79897,1.35025 10172.4,10133.5,9242.05,0.627291,0.905709,1.39363,2.99372,-15.425,-9.09382,2.11414,1.04226,2.10526,-4.39506,-2.77953,2.15891,6.66724,1.70369,-0.372333,1.40462,2.59187,2.26874,-0.378224,-3.69675,3.0335,2.25396,3.10192,0.0429504,0.10951,-0.799702,2.66794,-0.282681 10173.8,10130.2,9245.36,-1.33644,1.42161,3.11004,3.93858,-17.0646,-12.116,1.67239,1.94826,5.54306,-3.85205,-1.5475,2.52019,4.33814,1.15019,-0.541069,1.99129,3.05378,4.25369,-2.76731,-2.80645,1.85733,0.988299,2.88783,-1.97077,-2.83768,1.85125,2.84766,0.389147 10176.4,10130.9,9250,-3.53503,0.391503,-0.270572,1.95882,-15.1875,-18.5758,-1.42497,2.28845,5.40786,-2.12974,1.20821,0.911564,0.2788,0.0689856,-0.00271805,2.01928,-0.20812,3.23848,-1.98612,0.0245125,0.488358,-1.18054,1.47019,-3.47437,-4.6287,2.11498,2.20934,0.993318 10178.8,10135.9,9255.56,-3.20255,-0.268054,-3.48033,2.47099,-11.3536,-16.9308,2.01776,1.40976,1.56328,0.853625,1.89586,1.47109,-1.50849,0.167668,0.627511,1.41809,-4.21425,2.05546,-2.39209,-0.416193,0.276633,-1.50971,-0.820011,-1.25927,-1.76,0.153711,0.431209,1.48315 10181.2,10144.1,9260.31,-2.49125,-0.613263,-3.86482,0.287362,-9.17309,-14.1157,3.48478,0.196793,-1.25386,2.83848,0.198147,-0.0165582,0.471677,-0.139327,-0.216901,-0.966032,-5.2193,-1.40546,-0.977273,-1.2574,1.78779,0.134179,-1.72164,0.653388,0.313432,-3.37716,-0.587605,0.861387 10186.6,10151.1,9263.12,-0.0358474,0.714951,-5.47328,-0.875177,-17.5089,-13.8361,0.471247,0.643912,-2.41975,9.9458,0.993041,0.803296,-0.226386,0.0668295,2.19176,-1.16819,-4.40868,0.69383,-3.38706,-3.58218,3.07732,2.10253,1.79789,2.06744,1.83904,-2.15516,-1.67344,0.661882 10193.4,10152.2,9264.85,-2.78688,1.85556,-1.96216,-7.27433,-5.61022,0.625161,3.91544,2.78407,0.13042,8.01854,3.573,-2.43853,-1.07905,0.148792,-1.48277,-2.3792,0.378784,-7.05144,-1.06108,-1.76148,0.135824,1.71393,3.80312,-1.43656,0.702495,-1.95731,-0.703674,-0.33177 10196.9,10148.7,9267.46,1.41437,4.41491,0.0330121,-0.96198,-19.7539,-11.561,-5.49424,1.03618,-0.588315,13.1158,4.11913,1.82776,-4.02743,-1.24038,4.49417,2.16391,1.61464,5.33203,-6.2827,-3.22771,2.42673,4.53812,5.27571,1.95384,4.83592,2.15944,-2.23414,-0.0179182 10195.1,10146.6,9271.67,-0.599083,4.08109,5.56207,-0.651956,-1.899,4.41751,8.64946,-0.00765143,1.65381,7.40697,3.13743,0.528221,-1.17274,-0.333192,-1.34405,0.810869,3.04978,-1.96585,-3.00608,-1.02587,-0.427114,2.63482,2.33223,1.44749,2.70602,-0.508442,-0.782524,0.838544 10190.6,10149.1,9275.95,0.560997,3.32623,0.00253245,1.6273,-9.62681,-9.32197,-7.13248,-1.74244,-2.26773,10.279,2.01853,1.79006,-2.32577,-1.861,2.70102,2.63733,-0.668516,4.89049,-2.56801,1.67809,-0.682542,1.07859,-0.730879,1.04436,0.219305,1.04839,-1.30085,-0.204558 10188,10153.1,9277.72,-1.05102,1.4439,-1.2902,0.37219,3.61058,7.8905,-0.13638,-0.797121,-3.203,3.7144,-0.467361,1.43319,1.01941,-0.964803,1.27849,1.32106,-0.71757,-0.281666,1.82319,4.43107,-2.93419,-0.102775,-2.79816,1.60946,-0.350934,0.837113,0.975085,-0.206216 10189.3,10155.8,9275.17,1.71247,1.79065,-0.806826,4.2591,-1.07113,5.08033,-3.80833,-1.05846,-3.93516,4.86697,-2.48519,4.41458,1.0147,-2.04319,5.76698,3.04901,0.621182,6.18537,-0.471514,3.74338,0.0954557,1.78055,-2.23478,4.29533,3.28968,4.08665,-0.45381,-1.12752 10190.8,10155.9,9267.91,0.0885688,1.62773,3.97676,0.475719,6.50171,12.0036,4.17355,0.0800788,0.877184,4.13283,-1.66529,2.3731,1.22312,-1.52431,1.32333,1.30085,4.02821,0.00402446,-0.278254,3.83144,-0.00616006,1.70507,0.14686,2.05675,3.75234,3.42709,-1.13997,-2.28219 10186.5,10152.6,9257.34,-0.152071,1.1051,2.98089,-3.26014,-3.23874,0.545145,-3.74253,0.650653,4.32612,4.55661,-0.349067,0.443991,-1.54712,-2.37082,1.08068,1.11666,3.19332,0.114235,-4.77887,1.03262,0.526047,1.57427,1.96416,-1.21359,2.2522,2.81775,-2.19914,-3.20958 10175.9,10146,9246.33,-2.37365,-0.801223,1.8448,-4.49245,2.73452,3.45587,0.665856,0.804743,7.15539,-1.25789,-1.25952,-2.70716,-1.07845,-2.04441,-1.93328,-1.35806,1.5978,-5.1161,-5.79834,-0.925826,-2.80177,-1.15512,-1.39234,-4.88988,-2.71874,-0.727928,-1.17586,-2.55528 10163.6,10137.3,9237.87,-0.803469,-2.78044,-0.895544,-1.96323,-0.541223,-3.95959,-1.23923,0.0489646,5.82687,-0.842944,-2.20839,-1.37161,-0.868195,-0.366623,-0.326653,-0.542204,-0.442138,-3.06811,-5.05951,-1.77693,-2.56412,-2.0747,-5.18551,-5.90628,-3.59607,-1.51359,-1.0358,-0.0442413 10154.4,10129.1,9233.99,1.23915,-3.76005,-2.64612,0.723829,-3.148,-4.96491,0.57486,-0.202117,2.21428,-0.386009,-2.61213,0.591537,-0.420445,2.51457,0.848114,0.0155665,-2.8099,-0.688955,-1.65728,-1.68576,-0.314736,-2.37588,-7.30164,-5.93878,-1.09582,-1.08092,-1.23666,3.04974 10147.7,10124.3,9234.84,0.130569,-3.33534,-5.30783,0.228073,-1.79103,-2.90284,1.72325,0.336059,-1.67646,0.805152,-2.51359,-1.68843,-1.08056,2.79024,0.667811,-0.918425,-5.25023,-0.613583,-1.21144,-3.86108,1.12026,-2.87087,-6.96217,-3.74878,-0.871173,-1.99148,-1.4983,3.13726 10141.9,10125,9238.34,-2.3342,-3.74514,-6.28736,0.247636,2.71253,3.12847,7.57994,-0.0401623,-2.07147,0.481455,-3.97685,-4.46362,-0.415913,1.42821,-0.575486,-2.68041,-4.57327,-2.24353,-2.60028,-5.84863,0.625916,-3.42977,-3.6369,-0.844099,-3.5874,-4.64335,-0.985747,1.2717 10139.9,10130.2,9242.19,-1.31024,-4.72475,-7.14762,0.73153,1.45053,-5.53508,5.90136,-2.31863,0.194991,0.488804,-6.97821,-4.41928,-2.29074,-1.35009,0.919216,-2.89533,-3.25509,-0.799203,-1.99553,-4.14064,2.04707,-1.98553,-0.137078,-0.0166083,-4.9352,-5.40326,-1.67739,-1.42035 10146.2,10135.6,9246.04,1.48702,-3.36982,-6.22071,1.74719,2.56435,-13.0074,1.99705,-3.21561,2.91416,0.844878,-6.7988,-2.16439,-5.4962,-1.85975,2.13575,-1.59383,-2.91884,1.52462,-1.3314,-1.85117,3.6544,-0.430522,0.692754,-0.840642,-3.31251,-2.33908,-3.05762,-2.1983 10158.1,10136.1,9250.8,0.841737,-2.49661,-1.39476,-1.47649,15.6927,0.965199,10.869,-0.546861,4.02682,-3.15137,-2.65822,-1.05518,-4.77058,0.229656,-2.58261,-1.60934,-0.689737,-5.44364,-0.234473,-1.95479,2.60062,-0.769404,0.484685,-2.21476,-2.21659,-0.527818,-2.3356,-0.631119 10167.2,10131.4,9256.17,1.43756,-1.64599,0.0828565,1.10643,1.09851,-8.71597,-1.14743,1.16785,1.24835,1.69522,0.678389,1.91657,-5.73395,-1.26925,0.618759,0.671225,0.99422,2.5392,-3.14056,-3.00047,3.39733,-0.267724,0.865602,-1.72338,-1.28093,1.59131,-3.58079,-1.60917 10168.5,10125.9,9259.95,0.111755,-1.49369,1.18289,-0.284048,-1.52165,-7.82514,1.91577,2.83987,1.30957,4.34859,2.31828,0.547347,-5.35341,-2.95714,0.120479,-0.07344,1.25038,0.863374,-1.97606,-2.63292,2.99367,-1.51317,-0.192761,-1.94301,-2.34527,-0.816782,-4.15688,-3.69083 10164.7,10123.5,9260.03,2.54631,0.123647,1.85441,0.291179,-2.26534,-5.622,0.403256,2.75151,1.92159,5.45502,4.02912,0.277333,-3.49437,-2.59529,1.68451,1.03176,0.611114,1.05444,-1.37086,-0.762577,2.09659,-3.15435,-1.66892,-4.18628,-2.03484,-0.59484,-4.5361,-4.06338 10160.7,10123.9,9256.02,4.16394,1.15842,1.00215,-1.41089,3.00077,3.69915,2.12147,1.50602,1.11373,3.7783,5.12886,1.27055,-1.0735,0.163066,0.715848,1.75274,0.248762,-1.87449,-2.70607,-0.0821427,-0.982237,-3.91753,-0.603176,-5.15131,-1.55797,1.9122,-2.63806,-2.45448 10157.6,10124.8,9249.1,1.13904,0.752742,1.28292,-3.44794,5.87463,13.5955,-3.90547,0.053564,0.392376,-2.17549,4.02652,0.800942,2.14933,0.991305,-1.00534,1.93346,1.74799,-4.3887,-2.62983,2.12002,-3.97726,-2.37985,1.92724,-3.91126,-1.80145,3.29901,0.515867,-2.07875 10155.9,10125.9,9241.01,-1.21278,1.24353,0.0902419,-1.38693,3.90257,17.0687,-1.7671,-0.621263,-0.743581,-3.56603,3.19768,0.515647,2.83626,-0.394058,-0.965446,2.53295,1.02968,-3.73706,-0.646373,4.19926,-3.90665,0.100245,2.07717,0.65145,-0.4389,3.45695,1.30478,-2.26372 10156.9,10129,9233.19,-0.519545,3.45514,-0.128203,0.470911,-4.34917,11.6069,-5.37302,-0.249794,0.0908138,-1.64961,3.7305,0.887725,1.28233,-0.50548,0.651175,4.68216,0.481759,0.131141,2.83721,7.4517,-1.51906,2.02591,0.478488,2.8447,3.96564,4.21205,0.0189546,-1.26083 10160.2,10134.9,9226.61,0.334619,3.63902,-1.33005,0.500933,-0.0390483,15.3466,3.49804,-1.22599,-0.443012,-1.29729,1.85728,0.83413,0.663791,1.08815,-1.61332,2.35978,-1.91003,-1.54128,7.06018,8.52392,-0.0931056,-0.631766,-1.8937,1.21041,3.92464,3.0125,0.582016,-0.0552563 10165.1,10142,9222.12,-0.0501124,2.72845,-2.35233,0.461804,-3.24106,3.89637,-4.4752,-1.7395,-0.658087,1.46568,0.74815,1.9358,-1.37579,1.26993,0.248403,2.1501,-1.97865,2.84403,4.93078,6.34449,2.55208,-1.66616,-1.28941,-0.85475,2.44335,3.28626,0.575625,0.0867697 10169,10147.2,9219.92,-2.57524,1.55278,1.64717,-0.408592,2.78686,3.93608,-3.35557,-1.05071,0.358949,-1.71793,1.23509,0.730307,-0.807758,0.469476,-0.799756,2.26666,1.42763,2.57756,3.31921,4.24278,2.32673,-1.92157,-0.625841,-1.7385,0.55312,2.469,0.416022,0.102824 10167.7,10149.8,9219.39,-2.61236,0.265041,4.14099,-1.10443,5.68968,5.75872,0.437178,-1.27371,-1.44794,-5.50529,0.962099,-1.7594,-0.014506,-1.47838,-2.10998,2.88166,2.32266,2.31558,3.04189,2.76494,1.13588,-2.76241,-2.5749,-1.37983,-0.132212,1.62609,0.00182996,-0.567092 10161.2,10151.5,9219.88,-1.00231,0.225002,2.94421,2.03312,-0.355979,4.16591,-0.636307,-0.980578,-3.17075,-4.4683,-0.0413473,-0.96548,-0.194949,-0.798368,-1.08568,3.94015,1.20872,6.21739,0.493017,0.663456,-1.20346,-2.76074,-4.99576,-0.484664,1.27829,1.87168,-0.0347963,-0.649195 10155.5,10153.9,9220.83,-0.939771,0.647249,0.0634509,3.2582,-1.62031,4.0693,-0.997477,-0.169163,-4.01209,-4.20755,-1.14083,-0.040949,0.676499,1.0769,-0.637069,2.85891,0.53402,4.18699,0.666861,0.369829,-2.63692,-0.336214,-3.73798,1.47577,2.81105,-0.292838,0.0270106,-0.151526 10154.1,10157.5,9221.67,-1.65802,1.59847,-3.57612,1.52401,6.37221,4.48866,-1.46299,-0.915699,-6.98915,-0.340048,-0.952717,-2.18866,-0.811792,-0.642645,-0.622625,-0.300884,-1.00057,-1.15759,2.44751,2.6773,-1.823,1.29837,-1.91591,2.49204,1.93197,-3.59974,-1.91245,-2.4109 10154.4,10160.7,9221.98,-0.583463,-0.108757,-4.6507,-0.0693877,5.35637,4.425,-6.56889,-1.82597,-8.57191,2.85503,-1.05825,-2.33955,-3.22781,-4.76081,2.05753,-0.861931,-1.83229,-0.124382,0.503483,2.18131,1.30665,2.42826,0.824233,3.84653,2.09007,-3.3925,-4.31649,-3.96112 10153.4,10159.2,9221.68,-2.76485,-4.09131,-2.87698,-1.10712,12.5336,12.9839,-4.34652,-1.87041,-6.50663,-1.43881,-2.78497,-4.09349,-3.27711,-7.58611,-0.918956,-2.43732,-1.68029,-2.93885,1.37614,1.00354,-0.202025,0.252735,-1.35224,2.14941,-1.22668,-3.85694,-3.91196,-5.39514 10153.1,10150.6,9221.82,-3.95579,-6.11602,-1.95691,-0.571033,7.36799,2.23424,-8.23593,-1.15065,-2.89936,-3.34966,-3.42278,-4.92737,-4.22729,-7.57776,-1.53936,-2.4826,-0.485854,-2.05301,1.35048,0.235875,-0.851581,0.299046,-3.65228,0.452501,-2.53126,-4.14097,-3.0318,-6.032 10156.5,10138.1,9224.22,-1.72219,-4.81284,-2.04034,3.64429,-3.40667,-8.21149,-2.06758,-0.247629,0.240041,0.844032,-2.55693,-2.29071,-5.62686,-4.10255,0.955484,-2.58578,-0.573095,1.96046,-2.89531,-2.47853,1.00662,1.59082,-2.31097,1.60096,-0.355857,-3.59741,-2.54995,-3.16362 10162.5,10126.5,9229.66,-1.48624,-2.31864,-1.19917,5.07688,-2.15075,-4.48733,6.81643,1.19375,3.4529,3.66948,-1.49639,-1.71619,-5.51437,-1.29231,-0.407537,-4.604,-2.54282,0.0824236,-5.27449,-4.81883,0.767691,-1.39492,-2.55861,-0.325428,-1.75464,-3.59903,-1.89829,-0.732932 10167.7,10118.7,9237.56,-1.06333,-0.880843,-0.709075,2.8371,-10.0447,-10.4348,-2.5904,3.18465,5.97115,6.33779,-0.55058,-1.01646,-4.14332,-1.6247,-0.0193591,-4.01402,-3.73144,0.38443,-5.50468,-6.41294,-0.295721,-3.62009,-2.70822,-3.1355,-4.45086,-2.10376,-1.79258,-1.22716 10172.5,10116.9,9247.18,1.551,0.130326,-0.490568,5.87654,-14.5436,-8.35183,-0.790109,3.39107,4.7174,8.28156,-0.0057788,2.6686,-1.84943,-1.48071,1.03911,-4.0934,-3.48936,2.7605,-6.22541,-8.72046,-2.487,-3.9855,-3.15508,-4.85806,-6.30628,-0.1826,-2.22861,-1.91313 10179.7,10122.6,9257.78,1.5355,1.00586,-2.46594,5.55739,-10.6179,-9.89219,1.01847,2.02002,1.55047,10.3651,1.59035,2.3257,-3.02423,-0.681756,0.379055,-4.13859,-2.86252,2.65539,-7.09955,-8.4785,-1.80811,-2.44766,-3.84586,-6.08215,-4.18234,0.309597,-3.66089,-1.78168 10188.9,10134.4,9267.84,0.423127,-1.44673,-6.16369,2.54558,-3.2605,-10.2788,1.93481,-0.460125,-1.55478,7.53447,1.04311,-2.037,-5.33297,-0.715827,-0.912315,-4.00679,-5.27357,1.32517,-7.02947,-5.6844,2.49,-1.1701,-4.14164,-4.46692,0.160721,-1.23591,-5.46575,-0.678645 10196.3,10145.5,9275.21,0.204833,-4.851,-9.24744,3.38063,-3.90706,-1.89916,-0.318999,-3.05687,-4.83175,3.88926,-1.68472,-4.52857,-6.76493,0.053409,0.356074,-2.44354,-9.25902,3.95243,-8.99635,-3.68403,4.07743,-1.41439,-4.06526,0.784286,2.50666,-1.59161,-6.31937,0.0761621 10200.4,10148.5,9278.92,-3.06966,-5.752,-6.27773,-0.452092,4.18213,13.2473,-12.0757,-4.47092,-6.49884,-5.96616,-4.08975,-9.08064,-3.65565,-1.03612,-1.9757,-2.79369,-8.22081,-3.13926,-2.68074,1.98539,-1.47914,-4.27865,-6.82097,-0.0420558,-2.72616,-3.80964,-3.69263,-2.81706 10202.3,10144.3,9279.66,1.7621,-1.2767,-1.87182,1.61337,-6.80859,14.4514,-16.815,-2.07514,-4.63562,0.0307544,-1.49074,-2.29138,-1.18636,-1.08621,1.86862,0.689509,-4.2555,-0.913166,-4.04706,-1.13903,-2.95495,-1.4359,-3.45987,4.36607,0.619825,-1.53464,-2.06409,-2.58631 10201.6,10141.5,9277.89,2.73427,2.11183,3.79277,1.71546,-5.8859,13.3557,-11.3022,2.79327,2.37116,13.2011,3.98285,0.966107,0.039656,-0.715821,2.85166,2.34242,2.77476,-0.0888099,-4.98538,-3.4432,-1.83877,3.57211,2.68075,7.05565,6.45616,-1.54302,-1.24469,-1.49869 10196,10143.8,9273.55,-2.52737,0.202188,7.08167,-4.89952,6.71679,10.6699,0.756855,5.54471,7.25909,13.9583,6.39787,-2.37566,0.745793,-1.45474,-1.09404,0.910205,7.21143,-6.92492,-3.24203,-2.89701,-0.543452,6.07649,7.33376,6.57894,6.15484,-4.40884,0.0587056,-1.11052 10186.2,10147.8,9267.63,-4.31786,0.145523,8.74123,-1.12372,3.61382,5.90919,-2.20636,4.87121,7.93339,10.8223,5.77747,-1.02016,1.70524,-1.23974,-1.99873,1.22043,7.18349,-2.02393,-4.52471,-1.19367,-1.87015,5.60664,6.92162,5.30532,3.03549,-3.16865,1.33872,-1.3693 10178.3,10151.3,9262.07,-1.01371,-0.36759,7.07326,3.03463,-3.67644,6.41668,1.01659,3.32806,5.69645,6.11989,4.17302,3.13986,4.40199,0.31144,-2.58094,-0.0539033,4.16067,1.49299,-3.2753,-1.39228,-2.172,3.33149,4.19598,3.46064,0.616277,-0.818505,3.98959,0.698301 10177.2,10154.3,9257.94,2.09186,0.0766925,2.17884,5.08344,-13.9717,-0.882929,-3.84368,2.86526,4.57806,7.77504,4.75117,6.29349,4.58116,4.04706,1.06485,0.914494,1.84175,7.12093,-3.92066,-3.04038,-1.76589,1.29071,2.74094,1.46176,1.98937,3.12251,5.09485,3.84087 10179.4,10155.4,9254.74,0.187596,-0.882072,-0.665652,4.15319,-3.56212,6.25634,3.46947,2.99756,3.30879,0.859046,5.1349,3.91232,5.90056,6.60019,0.839946,-0.162343,-0.484405,2.65509,-1.8674,-3.50916,-5.10299,-1.60522,1.28388,-0.0295086,1.05,2.81748,5.21994,5.53563 10178.8,10153.1,9251.26,-1.91139,-0.154839,-0.832651,7.32065,-8.14661,3.20829,-4.61065,3.9011,1.20806,1.29028,6.11631,4.24084,4.66918,7.38927,3.1094,1.72009,-0.436683,6.06925,-3.83738,-3.64103,-8.35166,-0.222316,1.74303,3.43329,2.82215,3.91599,3.2218,6.05878 10175,10149.2,9246.46,-3.00223,-0.829219,2.18951,8.12634,-8.29635,3.98254,-2.55022,3.58933,0.0476173,2.00734,2.85452,5.13863,4.39434,5.86178,1.57419,0.321093,2.11151,4.62819,-0.677836,-1.98205,-7.44972,1.36379,2.52895,5.12261,2.10196,3.15929,2.77152,6.16477 10170.8,10147.7,9240.32,-2.09934,-1.33891,3.77143,6.49402,-6.43302,-0.0826344,0.87837,1.12061,0.421557,1.06025,-1.52903,5.64507,3.68263,3.49536,1.25096,-1.4957,2.92854,4.60413,2.40658,-0.645265,-3.32217,0.987715,2.60908,1.94117,-0.424246,2.85508,2.71473,4.88469 10167.3,10148.7,9234.04,-1.71112,-2.89318,3.67043,1.66277,3.35424,4.57631,10.1924,-0.35173,1.35064,-5.80931,-1.82085,3.64176,4.57117,2.2882,0.924739,-2.41648,2.22467,2.19365,5.80375,-0.426137,-2.32705,-0.919332,2.09081,-2.34116,-2.25007,1.71251,3.40172,3.5108 10165.7,10149.1,9229.23,-1.45001,-3.05548,2.45599,-0.349391,3.71978,4.53119,5.144,-0.0754888,2.20722,-6.90377,0.948441,2.13514,3.08117,1.83942,2.86791,-0.010419,2.66035,5.23219,5.6626,-0.804354,-2.37724,-1.67323,0.673861,-3.53649,-1.59081,1.76997,2.75549,2.29186 10167.4,10147.1,9226.8,-1.49928,-2.70714,1.88393,-0.842721,-0.225431,3.25531,1.41947,0.140255,3.21042,-3.88608,1.41104,1.86088,-0.091131,0.642157,1.94581,0.307133,3.18746,6.22574,4.30938,-1.01513,-1.1936,-1.8575,-0.588364,-1.42784,-2.08205,1.85519,1.46316,1.06047 10171.1,10143.9,9226.48,-2.01672,-2.40053,3.06391,-0.0599903,-8.34303,2.94718,-5.04409,-0.199276,4.0892,-3.68083,-0.226057,2.75547,-0.686676,-0.843757,0.670264,-0.458086,3.08212,7.11729,2.84836,0.933537,-1.50789,-1.59001,0.179663,0.0589795,-2.55704,3.42709,0.775783,0.360096 10175,10140.6,9227.89,-1.34782,-2.60865,2.14445,1.39294,-10.3608,4.5868,-8.2559,-1.78039,0.356678,-10.0047,-3.28868,2.87133,1.85333,-3.67234,1.53223,-1.27653,0.113475,6.97877,4.49731,3.38158,-3.24882,-2.09817,-0.213742,-0.816136,-3.92766,4.36792,1.46638,-0.25462 10179,10139.5,9231.01,-0.683001,-1.14693,0.835389,1.45465,-4.93888,6.92044,-3.2459,-1.76518,-2.11784,-11.5638,-3.99539,3.25477,2.97649,-3.54233,2.62301,-0.286071,-1.99677,5.44349,5.35012,2.55683,-3.04093,-1.82791,-1.42661,0.583625,-2.6178,3.43693,2.29735,-0.308687 10185.5,10142.2,9235.77,-0.0852919,0.0218383,0.522022,1.091,-4.00515,-0.71681,-2.72016,-1.24891,-1.4593,-5.53454,-2.81228,2.98724,1.40275,-1.35994,4.37674,1.00841,-2.02092,6.34309,4.01241,0.223476,0.719167,-0.617158,-1.79277,2.19906,-0.00915837,1.60933,1.1106,-0.276707 10194.7,10147.7,9242.28,-0.507821,-1.45713,1.82236,1.06383,0.990703,1.16431,3.40878,-1.35424,0.436421,-3.7364,-2.82733,0.844561,2.18188,1.42103,2.14788,-1.48658,-0.956157,3.31294,2.03859,-1.09837,2.11718,-0.147919,0.113767,0.665977,1.0134,-0.758268,0.662046,1.48327 10202.3,10153,9250.68,-0.953894,-1.28733,1.09826,0.183582,-2.63676,-4.1377,-2.89907,-0.851983,3.07691,-0.452803,-2.18838,0.00930997,2.87142,4.0314,0.911046,-1.55443,1.18147,4.24956,-2.48362,-1.23019,1.72571,2.11001,5.29268,-0.281886,3.31927,-0.100871,1.85826,4.09941 10205.4,10156.4,9259.89,-1.27754,0.134823,0.181405,0.430733,3.94306,1.54036,2.99815,-1.16285,4.70226,-4.24342,-1.81256,1.00154,4.93307,6.24027,-1.59843,-1.48742,2.34844,2.10305,-2.00905,-0.662325,0.626241,1.17997,6.74123,-1.67701,1.35772,0.491316,4.32271,6.53414 10204.9,10157.9,9267.94,0.0906612,2.16352,-0.379486,5.42194,2.73054,2.84047,-1.4914,-1.83181,4.02307,-5.15449,-0.262248,3.79351,5.21678,7.80905,0.384689,1.27337,2.9796,6.90988,1.28339,2.20996,-0.91791,-0.163496,3.78903,-1.75168,-0.655347,2.9127,4.88667,7.66747 10203.5,10159,9273.39,2.81598,1.22437,-0.368556,7.79675,3.42922,7.94279,4.57077,-0.708312,0.0968463,-6.10539,0.906129,5.55489,5.11842,8.21484,-0.0671665,1.22889,2.37144,6.24544,4.97372,3.9233,-2.49967,0.267274,-0.310124,1.09266,-0.410233,4.04567,4.74621,8.0612 10203.2,10162.2,9275.77,5.91857,0.355765,0.897437,11.4606,-3.5509,6.21936,2.57301,-0.0103725,-3.12789,-4.93913,0.601331,6.94209,5.77388,6.93334,1.15761,0.716978,2.28439,10.4648,4.58557,4.39511,-2.76356,2.73426,-1.51427,4.03252,2.99548,5.47757,3.66414,6.66569 10203.5,10167.2,9275.21,3.60261,-0.370029,0.212296,6.53742,-1.17501,1.39057,4.60494,-1.59955,-3.36286,-6.83681,-0.619753,2.05525,7.21718,4.0699,-0.311278,-1.80144,1.07578,6.02142,4.81799,3.05296,-1.94492,1.84126,-1.66326,1.40391,1.77364,2.95825,3.1993,3.61198 10203.2,10169.7,9272.52,1.94895,1.27875,-0.411546,7.45768,-3.75161,0.551798,7.13428,-3.82068,-2.61405,-4.51085,-0.839975,-0.654388,7.59238,3.63367,1.11679,-0.895324,0.0589114,6.72608,0.605615,-0.28023,-1.84675,-0.134175,-0.468956,-1.06577,2.10307,1.19208,2.14254,2.35948 10201,10166,9269.14,-0.454618,0.774031,2.06017,2.8462,-0.622985,0.18548,5.53147,-2.50822,-2.46147,-4.96779,0.0109421,-5.95039,4.88549,1.45711,-1.36876,0.21175,1.58667,0.959389,-1.72767,-0.999701,-1.91612,-0.271218,-0.271307,-3.60937,2.2528,-2.81471,1.29832,0.342989 10196.9,10158.5,9266.51,1.16537,-1.9421,4.60098,6.66208,-8.91079,-4.05041,0.977918,-0.375912,-2.52562,-2.44083,-1.83608,-5.04574,0.870179,-2.88837,0.903319,2.45464,2.77487,7.13809,-7.32993,-2.29902,0.410437,1.61472,1.76486,-2.68616,2.88565,-3.79142,-0.830458,-1.20118 10194.1,10152.5,9265.18,-4.11534,-5.864,4.81522,5.05616,0.145339,-4.93641,2.59855,0.656712,1.10696,-4.83177,-6.68192,-7.2593,-1.01756,-6.50992,-0.623669,0.165413,3.83811,5.84041,-5.84841,-0.103661,1.98729,0.416145,1.34348,-6.16515,-2.67871,-5.57128,-1.65554,-3.26762 10194.1,10148.4,9264.07,-6.59722,-4.92656,-2.01588,3.7417,0.726794,-18.2936,5.15057,-0.276157,1.50739,-0.538248,-8.52874,-4.00362,-4.55022,-5.27015,0.604573,-0.930054,-0.109161,8.19838,-8.17669,-2.1092,4.17484,-1.56197,-1.02102,-5.8341,-5.50376,-1.7134,-2.50895,-3.06608 10193.9,10142,9261.25,-7.62788,-2.98611,1.9356,-1.40885,17.3716,4.06957,22.1809,1.39972,5.64224,-7.94302,-5.59134,-1.45901,0.439725,1.11211,-6.73411,-3.11746,1.4598,-4.78344,-2.09513,-0.404037,0.473396,-4.22587,-2.43839,-5.70551,-5.26427,-0.515338,1.20082,0.113119 10190.4,10132.9,9256.55,-0.061965,0.47587,-3.01478,1.28661,-2.15014,-14.2047,7.89898,0.463674,0.911903,2.0883,-1.64338,3.11185,-2.21723,0.781415,-1.37312,0.396228,-1.38267,3.09944,-1.8496,-1.29836,2.6087,-3.15966,-2.03297,-3.33185,-3.23065,2.92606,0.328003,-0.0324179 10185,10126,9252.36,-0.460313,1.71643,-3.7396,-2.47922,-1.49725,-15.3645,-1.80975,0.715758,-0.981069,-0.691494,-0.794101,-0.106849,-2.08179,-0.30971,-1.53311,0.428815,-0.320026,-0.221114,2.28648,0.175576,3.04606,-1.33911,-0.290353,-5.37868,-3.63253,0.919151,0.306196,-0.421839 10178.6,10124.8,9251.04,-1.00256,1.33259,-4.2472,-1.03971,2.95821,-4.55752,1.84476,0.117356,-4.36831,-4.27268,-1.02576,-0.886254,0.661063,-0.0446314,-0.718596,-0.508343,-2.00182,-0.337999,2.57329,-0.613947,2.18595,0.685998,2.2221,-1.4549,-2.89677,-0.0111036,1.2411,0.83044 10170.8,10127.6,9252.97,-1.71108,0.0714348,-2.91875,-0.0818013,10.0027,5.28964,4.84662,0.115636,-5.97389,-2.97492,0.466922,-1.16018,3.14319,-0.484977,-0.73996,-1.40938,-2.86898,-1.18229,2.85098,1.59393,-0.709864,0.769892,0.0526875,0.667581,-4.09633,-0.130706,2.87503,0.28772 10163.4,10130.8,9256.69,-0.0482655,-0.561906,-4.41924,-1.93638,1.00001,-3.80859,-6.74655,-0.693966,-6.90741,3.83606,-0.443929,0.133173,1.32042,-4.12952,2.21239,-0.401666,-2.83084,1.48444,3.60821,4.7162,0.0479322,1.57325,-2.9423,0.781086,-3.57562,1.01359,1.5974,-1.03302 10159.1,10132.9,9259.9,0.830676,1.38376,-3.59798,1.88876,1.90766,6.33722,1.16568,-1.88109,-5.49532,7.56995,-3.97276,2.47056,-1.10217,-4.02745,0.530141,-1.80729,-2.44923,1.11112,6.04583,5.79514,-1.61378,0.146823,-4.31812,1.65679,-0.82556,0.385538,-1.6035,-0.921055 10159.8,10135.2,9260.63,-0.16576,1.00018,-5.12473,0.442361,0.505831,-5.64864,-2.63413,-2.52592,-5.46478,4.95174,-4.3147,0.782684,-5.73615,-4.82371,0.266276,-1.86669,-4.0481,-1.31822,9.03428,5.18538,0.835431,-1.04748,-4.21294,1.0615,-0.105573,-1.22812,-5.24566,-3.63422 10165.2,10138.1,9258.46,0.205477,-0.680098,-4.46762,5.26891,1.18115,-1.68502,7.13137,-1.22722,-4.01706,-1.7858,-0.511666,3.55446,-3.85553,-2.43205,1.3525,-0.694302,-4.16672,-0.729833,7.26617,2.38627,0.742375,-2.04911,-3.24066,2.72775,2.10783,0.115275,-4.78462,-4.34396 10171.6,10139.6,9254.61,-1.51268,-2.23477,-5.13237,-3.29461,-0.317239,-10.5071,-7.94002,1.87205,-2.15615,-2.57627,4.52526,1.46446,-2.39092,-3.68309,1.44927,1.27351,-2.10555,-3.67494,7.0263,3.64847,0.370668,0.612656,-2.452,4.76347,5.31087,1.21101,-2.18927,-4.86589 10174.6,10139.6,9250.85,-0.380976,0.430706,-4.77251,1.24603,3.57465,-3.14504,-10.8805,1.4131,-3.82203,6.1265,4.05681,1.86576,-2.69539,-3.84931,0.571097,0.0445532,-3.61574,1.0929,5.45496,4.67637,-2.69117,0.376736,-3.44843,8.26613,5.44059,2.39248,-1.35143,-3.43895 10173.2,10141.8,9247.9,-0.967231,0.660605,-0.333774,0.682442,10.1733,9.80472,-4.02844,0.296976,-2.0856,1.70749,0.105393,-0.302007,-2.02762,-1.68176,-2.57321,-1.85542,-2.20576,-3.56605,7.81712,4.57148,-0.717533,0.00661063,0.070936,7.88567,3.00205,-0.188925,-1.30646,-0.417109 10169.8,10147.8,9245.05,1.57911,1.89614,-1.23894,5.44327,1.1255,2.7455,0.888702,-2.69789,-2.29535,1.37374,-2.16695,0.277041,-2.61632,-0.168021,1.19527,-0.966804,-1.39634,2.02717,6.13068,1.74285,2.61838,-0.673957,2.42798,5.71141,1.0237,-0.190537,-2.48355,-0.424022 10166.9,10152.4,9241.4,1.48812,1.56883,0.00439658,-1.99079,-5.3945,-7.45076,-2.79497,-1.09824,0.438405,1.08335,0.567998,-2.12211,0.537132,0.235065,2.13962,0.850241,2.33283,0.11668,5.71046,0.316621,2.37782,1.5783,4.38674,4.44102,2.85837,-0.867284,0.197126,-0.632035 10166,10149.9,9237.21,3.10346,3.20745,-0.0787972,3.26164,-1.99167,1.15174,7.73898,0.388067,-1.3872,7.93093,2.89628,-0.846609,2.95243,1.10786,0.0356645,-0.191303,-1.48335,3.06518,0.833731,-2.48298,-2.62814,-0.329278,-0.0454046,4.84244,1.50962,-0.571214,2.28968,0.0896905 10169.4,10141.9,9233.72,1.54047,2.79665,0.872984,0.435893,0.341067,4.50191,6.31086,2.24353,0.0763229,5.33021,2.30696,-1.94916,2.28551,1.6759,-3.55737,-0.57595,-3.31446,-1.28349,0.109544,-0.911539,-3.08755,0.149125,-2.57658,2.65457,-0.759677,-1.72314,1.73795,1.22082 10175.5,10134.5,9231.85,3.08721,1.31195,-0.463831,-2.78365,-16.0641,-12.4959,-7.90321,1.44639,2.2521,2.09953,-0.628689,0.674957,-0.991746,0.999703,0.501374,1.08647,-1.9555,-0.457535,-1.969,0.140249,0.679574,4.05153,-1.26929,2.9472,1.23177,0.0460567,-1.18548,1.19414 10178.5,10132.3,9231.94,4.8578,-0.156201,-1.83619,3.45539,-10.5983,-4.40534,-3.25278,-1.48511,1.7839,1.07398,-3.79721,3.44697,-0.661031,-0.19397,1.51898,-2.78611,-1.58924,-1.02247,-4.03291,-0.779814,-2.72459,1.42865,-4.44874,1.96164,0.024013,0.769821,-1.68183,-1.09525 10176,10135.5,9234.24,3.98434,-2.9881,-1.82932,-3.45496,-4.37718,-1.32479,-6.81161,0.242295,3.63988,0.773917,-2.92089,1.50769,1.03257,-1.29175,0.607123,-3.32519,0.794345,-7.2134,-4.18473,-2.11878,-3.48641,2.04926,-1.83971,2.5711,1.8547,-0.444122,0.204744,-0.633906 10170.3,10141.1,9238.24,4.5574,-1.21766,-1.92884,-3.3891,-4.53289,-3.61119,-11.1428,0.87067,2.52674,6.28098,-0.916225,0.833349,-0.285056,-2.02874,2.83162,-0.822357,0.836116,-2.02452,-4.36166,-2.46534,-2.40599,3.53798,0.439996,2.8824,2.66576,-0.190266,-0.411649,-0.335746 10164.8,10146.9,9241.73,1.14271,0.21175,2.54403,-5.97996,8.86795,9.92082,0.583279,0.92891,3.1377,1.52082,0.653327,-2.04189,-0.909795,-1.88382,-1.45444,-1.72465,2.94817,-6.9659,0.661566,-0.779148,-2.33549,3.61435,1.90115,-0.709103,0.572663,-2.44443,-1.61985,-1.24632 10161.8,10151.9,9242.42,0.429305,-0.24402,1.54324,-0.758714,1.99988,2.30697,-0.150645,-1.67843,-0.372931,2.68223,0.974669,-2.18675,-3.69726,-3.84373,0.315076,-1.61503,2.02219,-0.439987,1.5067,0.347441,-0.468043,1.85512,2.51346,-3.61534,-1.61311,-1.68631,-4.32277,-3.31289 10160.6,10154.5,9240.5,-1.6783,-2.7916,3.79283,-1.46484,1.8842,7.0456,3.61276,-2.08564,-1.14902,-3.90469,1.00738,-2.71903,-1.12392,-2.56102,-0.564502,-1.26929,2.87817,-3.80446,2.16188,1.69189,-0.17359,-0.806729,4.45158,-4.99401,-1.9224,-2.1335,-3.41399,-1.5215 10158.8,10152.9,9238.94,-1.26294,-1.55708,2.47997,-0.37092,-5.35681,-1.99801,-4.61673,-3.19995,-3.63982,-3.59422,0.268397,-1.15304,1.21312,-1.94008,2.37467,0.463918,1.03699,-0.249188,1.94821,3.1095,0.656428,-1.26258,5.17342,-2.5293,-0.911564,-0.727538,-1.60047,-0.657086 10157.1,10148.4,9241.47,-0.729297,1.90628,1.50273,8.02209,4.5029,7.25435,-0.943104,-3.87229,-5.15977,-0.605295,-0.786266,-0.00624273,3.2036,-0.99694,1.83674,-0.424322,-0.759934,4.69506,3.12589,4.93905,-1.14094,-2.37706,0.896838,-1.15642,-2.07425,-0.341439,0.651623,-1.90525 10159.3,10145.1,9249.53,-3.61489,-0.368775,4.8318,0.654323,13.8953,20.2332,9.01061,0.740005,1.06482,-1.98312,1.43178,-2.39481,5.44965,2.23927,-2.07082,1.84445,3.36316,-2.3874,5.82791,5.13504,0.331121,1.17574,4.11636,2.46863,2.53744,-2.31289,3.73605,1.261 10166.4,10146.2,9260.39,-0.690065,-0.196533,2.57149,3.28245,1.26863,3.07282,2.3288,0.343504,0.7493,7.7189,2.47287,-2.19401,1.83016,1.49389,2.04941,5.57015,1.68587,7.37325,4.33035,3.86901,3.21355,1.31074,4.30838,4.34097,4.14204,-0.792683,1.91579,1.4487 10174.6,10153.3,9268.63,0.973864,0.288282,4.67663,-0.604468,1.35396,1.77193,6.1612,0.928573,3.56181,0.301872,1.61496,-1.94891,1.37811,1.784,-0.829802,4.5252,2.98522,2.05165,3.03006,0.33278,4.9167,0.692046,4.78248,3.89965,4.1223,-1.28055,0.902128,2.44014 10179.4,10165.9,9270.91,0.383028,0.372248,2.91142,5.26445,-4.52355,-0.481389,-1.47582,-0.0802922,4.09074,-3.4789,-1.84054,-0.641665,1.60157,2.15213,-0.406849,1.24052,1.05589,7.69175,-4.79723,-3.42058,1.48542,-2.69221,-0.604027,-2.8823,-1.41943,-0.386671,1.59434,1.71786 10180.9,10180.3,9268.76,-7.39108,-4.07938,1.96913,5.84801,-1.99672,13.1344,-8.45676,2.45664,8.74322,0.00440195,-3.70354,-4.02376,5.09873,7.07674,-2.94009,-6.27334,-2.18896,9.06615,-15.5002,-6.518,-12.659,-9.2251,-8.78964,-16.0646,-15.2285,-1.36974,7.28841,2.96689nipype-0.9.2/nipype/testing/data/fsLUT_aparc+aseg.pck000066400000000000000000000000001227300005300224530ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/func2anat_InverseWarp.nii.gz000066400000000000000000000000001227300005300242240ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/func2anat_coreg_Affine.txt000066400000000000000000000000001227300005300237470ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/func2anat_coreg_InverseWarp.nii.gz000066400000000000000000000000001227300005300254030ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/func_epi_1_1.nii000066400000000000000000000000001227300005300216270ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/func_to_struct.mat000066400000000000000000000000001227300005300224420ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/functional.nii000066400000000000000000000000001227300005300215410ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/functional.par000066400000000000000000000000001227300005300215440ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/functional.rms000066400000000000000000000000001227300005300215630ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/functional2.nii000066400000000000000000000000001227300005300216230ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/functional3.nii000066400000000000000000000000001227300005300216240ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/functional_1.dcm000066400000000000000000000000001227300005300217450ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/functional_2.dcm000066400000000000000000000000001227300005300217460ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/image.nii000066400000000000000000000000001227300005300204610ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/image.v000066400000000000000000000000001227300005300201470ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/indices-labels.txt000066400000000000000000000000001227300005300223150ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/indices.txt000066400000000000000000000000001227300005300210550ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/label.mgz000066400000000000000000000000001227300005300204740ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/lh-pial.stl000066400000000000000000000000001227300005300207500ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/lh.cope1.mgz000066400000000000000000000000001227300005300210260ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/lh.cope1.nii.gz000066400000000000000000000000001227300005300214270ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/lh.hippocampus.stl000066400000000000000000000000001227300005300223540ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/lh.pial000066400000000000000000000000001227300005300201500ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/lh.pial_converted.gii000066400000000000000000000000001227300005300227700ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/lut_file000066400000000000000000000000001227300005300204240ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/magnitude.nii000066400000000000000000000000001227300005300213540ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/maps.nii000066400000000000000000000000001227300005300203370ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/mask.1D000066400000000000000000000000001227300005300200170ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/mask.mif000066400000000000000000000000001227300005300203260ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/mask.nii000066400000000000000000000000001227300005300203320ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/mean_func.nii.gz000066400000000000000000000000001227300005300217510ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/merged_fsamples.nii000066400000000000000000000000001227300005300225340ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/merged_phsamples.nii000066400000000000000000000000001227300005300227160ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/merged_thsamples.nii000066400000000000000000000000001227300005300227220ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/mni.nii000066400000000000000000000000001227300005300201620ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/mni2t1.nii000066400000000000000000000000001227300005300205110ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/moving1.nii000066400000000000000000000000001227300005300207570ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/moving2.nii000066400000000000000000000000001227300005300207600ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/my_database.db000066400000000000000000000000001227300005300214560ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/network0.aparc+aseg.nii000066400000000000000000000000001227300005300231500ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/network0.gpickle000066400000000000000000000000001227300005300220070ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/pdfs.Bfloat000066400000000000000000000000001227300005300207630ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/peak_directions.mif000066400000000000000000000000001227300005300225360ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/pet_resliced.nii000066400000000000000000000000001227300005300220410ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/phase.nii000066400000000000000000000000001227300005300204770ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/pyscript.m000066400000000000000000000004321227300005300207420ustar00rootroot00000000000000fprintf(1,'Executing %s at %s:\n',mfilename,datestr(now)); ver, try,a=1; ,catch ME, fprintf(2,'MATLAB code threw an exception:\n'); fprintf(2,'%s\n',ME.message); if length(ME.stack) ~= 0, fprintf(2,'File:%s\nName:%s\nLine:%d\n',ME.stack.file,ME.stack.name,ME.stack.line);, end; end;nipype-0.9.2/nipype/testing/data/rc1s1.nii000066400000000000000000000000001227300005300203300ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/rc1s2.nii000066400000000000000000000000001227300005300203310ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/rc2s1.nii000066400000000000000000000000001227300005300203310ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/rc2s2.nii000066400000000000000000000000001227300005300203320ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/ref_class0.nii000066400000000000000000000000001227300005300214200ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/ref_class1.nii000066400000000000000000000000001227300005300214210ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/register.dat000066400000000000000000000000001227300005300212140ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/register.mat000066400000000000000000000000001227300005300212250ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/resp.1D000066400000000000000000000000001227300005300200350ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/resting.nii000066400000000000000000000000001227300005300210520ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/resting2anat_Warp.nii.gz000066400000000000000000000000001227300005300234100ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/resting2anat_coreg_Affine.txt000066400000000000000000000000001227300005300244670ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/rh-pial.stl000066400000000000000000000000001227300005300207560ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/rh.pial_converted.gii000066400000000000000000000000001227300005300227760ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/seed.1D000066400000000000000000000000001227300005300200040ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/seed_mask.nii000066400000000000000000000000001227300005300213320ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/seeds_to_M1.nii000066400000000000000000000000001227300005300215410ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/seeds_to_M2.nii000066400000000000000000000000001227300005300215420ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/session_info.npz000066400000000000000000000000001227300005300221250ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/skeleton_mask.nii.gz000066400000000000000000000000001227300005300226550ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/spmT_0001.img000066400000000000000000000000001227300005300207570ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/spminfo000066400000000000000000000007421227300005300203110ustar00rootroot00000000000000fprintf(1,'Executing %s at %s:\n',mfilename,datestr(now)); ver, try, if isempty(which('spm')), throw(MException('SPMCheck:NotFound','SPM not in matlab path')); end; spm_path = spm('dir'); fprintf(1, 'NIPYPE %s', spm_path); ,catch ME, fprintf(2,'MATLAB code threw an exception:\n'); fprintf(2,'%s\n',ME.message); if length(ME.stack) ~= 0, fprintf(2,'File:%s\nName:%s\nLine:%d\n',ME.stack.file,ME.stack.name,ME.stack.line);, end; end;nipype-0.9.2/nipype/testing/data/streamlines.trk000066400000000000000000000000001227300005300217460ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/struct2mni.nii000066400000000000000000000000001227300005300215110ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/struct_to_func.mat000066400000000000000000000000001227300005300224420ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/structural.nii000066400000000000000000000000001227300005300216070ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/subj1.cff000066400000000000000000000000001227300005300204020ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/subj1.pck000066400000000000000000000000001227300005300204210ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/subj2.cff000066400000000000000000000000001227300005300204030ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/subj2.pck000066400000000000000000000000001227300005300204220ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/subjectDesign.con000066400000000000000000000000001227300005300221700ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/subjectDesign.mat000066400000000000000000000000001227300005300221720ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/surf1.vtk000066400000000000000000000000001227300005300204640ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/surf2.vtk000066400000000000000000000000001227300005300204650ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/targets_MASK1.nii000066400000000000000000000000001227300005300217440ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/targets_MASK2.nii000066400000000000000000000000001227300005300217450ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/tbss_dir/000077500000000000000000000000001227300005300205215ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/tbss_dir/do_not_delete.txt000066400000000000000000000000731227300005300240660ustar00rootroot00000000000000This file has to be here because git ignores empty folders.nipype-0.9.2/nipype/testing/data/tensor_fitted_data.Bdouble000066400000000000000000000000001227300005300240360ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/timeDesign.con000066400000000000000000000000001227300005300214670ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/timeDesign.mat000066400000000000000000000000001227300005300214710ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/timeseries.txt000066400000000000000000000000001227300005300216100ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/topup_encoding.txt000066400000000000000000000000001227300005300224540ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/tract_data.Bfloat000066400000000000000000000000001227300005300221350ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/tracts.Bdouble000066400000000000000000000000001227300005300214740ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/trans.mat000066400000000000000000000000001227300005300205300ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/tst_class0.nii000066400000000000000000000000001227300005300214560ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/tst_class1.nii000066400000000000000000000000001227300005300214570ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/u_rc1s1_Template.nii000066400000000000000000000000001227300005300225070ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/u_rc1s2_Template.nii000066400000000000000000000000001227300005300225100ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/u_rc1s3_Template.nii000066400000000000000000000000001227300005300225110ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/varcope.nii.gz000066400000000000000000000000001227300005300214550ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/varcope1run1.nii.gz000066400000000000000000000000001227300005300223440ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/varcope1run2.nii.gz000066400000000000000000000000001227300005300223450ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/varcope2run1.nii.gz000066400000000000000000000000001227300005300223450ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/varcope2run2.nii.gz000066400000000000000000000000001227300005300223460ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/voxel-order_data.Bfloat000066400000000000000000000000001227300005300232660ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/weights.txt000066400000000000000000000000001227300005300211110ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/wm_mask.mif000066400000000000000000000000001227300005300210310ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/wm_undersampled.nii000066400000000000000000000000001227300005300225650ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/data/zstat1.nii.gz000066400000000000000000000000001227300005300212440ustar00rootroot00000000000000nipype-0.9.2/nipype/testing/decorators.py000066400000000000000000000043521227300005300205220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Extend numpy's decorators to use nipype's gui and data labels. """ from numpy.testing.decorators import * from nibabel.data import DataError def make_label_dec(label, ds=None): """Factory function to create a decorator that applies one or more labels. Parameters ---------- label : str or sequence One or more labels that will be applied by the decorator to the functions it decorates. Labels are attributes of the decorated function with their value set to True. ds : str An optional docstring for the resulting decorator. If not given, a default docstring is auto-generated. Returns ------- ldec : function A decorator. Examples -------- >>> slow = make_label_dec('slow') >>> print slow.__doc__ Labels a test as 'slow' >>> rare = make_label_dec(['slow','hard'], ... "Mix labels 'slow' and 'hard' for rare tests") >>> @rare ... def f(): pass ... >>> >>> f.slow True >>> f.hard True """ if isinstance(label,basestring): labels = [label] else: labels = label # Validate that the given label(s) are OK for use in setattr() by doing a # dry run on a dummy function. tmp = lambda : None for label in labels: setattr(tmp,label,True) # This is the actual decorator we'll return def decor(f): for label in labels: setattr(f,label,True) return f # Apply the user's docstring if ds is None: ds = "Labels a test as %r" % label decor.__doc__ = ds return decor # For tests that need further review def needs_review(msg): """ Skip a test that needs further review. Parameters ---------- msg : string msg regarding the review that needs to be done """ def skip_func(func): return skipif(True, msg)(func) return skip_func # Easier version of the numpy knownfailure def knownfailure(f): return knownfailureif(True)(f) def if_datasource(ds, msg): try: ds.get_filename() except DataError: return skipif(True, msg) return lambda f : f nipype-0.9.2/nipype/testing/setup.py000066400000000000000000000007131227300005300175120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('testing', parent_package, top_path) config.add_data_dir('data') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/testing/utils.py000066400000000000000000000011651227300005300175140ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Additional handy utilities for testing """ __docformat__ = 'restructuredtext' from ..utils.misc import package_check from nose import SkipTest def skip_if_no_package(*args, **kwargs): """Raise SkipTest if package_check fails Parameters ---------- *args Positional parameters passed to `package_check` *kwargs Keyword parameters passed to `package_check` """ package_check(exc_failed_import=SkipTest, exc_failed_check=SkipTest, *args, **kwargs) nipype-0.9.2/nipype/utils/000077500000000000000000000000001227300005300154625ustar00rootroot00000000000000nipype-0.9.2/nipype/utils/README.txt000066400000000000000000000003701227300005300171600ustar00rootroot00000000000000================== Nipype Utilities ================== This directory contains various utilities used in nipype. Some of them have been copied from nipy. Any changes to these should be done upstream. * From nipy: * onetime.py * tmpdirs.py nipype-0.9.2/nipype/utils/__init__.py000066400000000000000000000001641227300005300175740ustar00rootroot00000000000000 from onetime import OneTimeProperty, setattr_on_read from tmpdirs import TemporaryDirectory, InTemporaryDirectory nipype-0.9.2/nipype/utils/config.py000066400000000000000000000121131227300005300172770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Created on 20 Apr 2010 logging options : INFO, DEBUG hash_method : content, timestamp @author: Chris Filo Gorgolewski ''' import ConfigParser from json import load, dump import os import shutil from StringIO import StringIO from warnings import warn from ..external import portalocker homedir = os.environ['HOME'] default_cfg = """ [logging] workflow_level = INFO filemanip_level = INFO interface_level = INFO log_to_file = false log_directory = %s log_size = 16384000 log_rotate = 4 [execution] create_report = true crashdump_dir = %s display_variable = :1 hash_method = timestamp job_finished_timeout = 5 keep_inputs = false local_hash_check = true matplotlib_backend = Agg plugin = Linear remove_node_directories = false remove_unnecessary_outputs = true single_thread_matlab = true stop_on_first_crash = false stop_on_first_rerun = false use_relative_paths = false stop_on_unknown_version = false write_provenance = false parameterize_dirs = true [check] interval = 1209600 """ % (homedir, os.getcwd()) class NipypeConfig(object): """Base nipype config class """ def __init__(self, *args, **kwargs): self._config = ConfigParser.ConfigParser() config_dir = os.path.expanduser('~/.nipype') if not os.path.exists(config_dir): os.makedirs(config_dir) old_config_file = os.path.expanduser('~/.nipype.cfg') new_config_file = os.path.join(config_dir, 'nipype.cfg') # To be deprecated in two releases if os.path.exists(old_config_file): if os.path.exists(new_config_file): msg=("Detected presence of both old (%s, used by versions " "< 0.5.2) and new (%s) config files. This version will " "proceed with the new one. We advise to merge settings " "and remove old config file if you are not planning to " "use previous releases of nipype.") % (old_config_file, new_config_file) warn(msg) else: warn("Moving old config file from: %s to %s" % (old_config_file, new_config_file)) shutil.move(old_config_file, new_config_file) self.data_file = os.path.join(config_dir, 'nipype.json') self._config.readfp(StringIO(default_cfg)) self._config.read([new_config_file, old_config_file, 'nipype.cfg']) def set_default_config(self): self._config.readfp(StringIO(default_cfg)) def enable_debug_mode(self): """Enables debug configuration """ self._config.set('execution', 'stop_on_first_crash', 'true') self._config.set('execution', 'remove_unnecessary_outputs', 'false') self._config.set('execution', 'keep_inputs', 'true') self._config.set('logging', 'workflow_level', 'DEBUG') self._config.set('logging', 'interface_level', 'DEBUG') def set_log_dir(self, log_dir): """Sets logging directory This should be the first thing that is done before any nipype class with logging is imported. """ self._config.set('logging', 'log_directory', log_dir) def get(self, section, option): return self._config.get(section, option) def set(self, section, option, value): return self._config.set(section, option, value) def getboolean(self, section, option): return self._config.getboolean(section, option) def has_option(self, section, option): return self._config.has_option(section, option) @property def _sections(self): return self._config._sections def get_data(self, key): if not os.path.exists(self.data_file): return None with open(self.data_file, 'rt') as file: portalocker.lock(file, portalocker.LOCK_EX) datadict = load(file) if key in datadict: return datadict[key] return None def save_data(self, key, value): datadict = {} if os.path.exists(self.data_file): with open(self.data_file, 'rt') as file: portalocker.lock(file, portalocker.LOCK_EX) datadict = load(file) with open(self.data_file, 'wt') as file: portalocker.lock(file, portalocker.LOCK_EX) datadict[key] = value dump(datadict, file) def update_config(self, config_dict): for section in ['execution', 'logging', 'check']: if section in config_dict: for key, val in config_dict[section].items(): if not key.startswith('__'): self._config.set(section, key, str(val)) def update_matplotlib(self): import matplotlib matplotlib.use(self.get('execution', 'matplotlib_backend')) def enable_provenance(self): self._config.set('execution', 'write_provenance', 'true') self._config.set('execution', 'hash_method', 'content') nipype-0.9.2/nipype/utils/docparse.py000066400000000000000000000254111227300005300176370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utilities to pull in documentation from command-line tools. Examples -------- # Instantiate bet object from nipype.interfaces import fsl from nipype.utils import docparse better = fsl.Bet() docstring = docparse.get_doc(better.cmd, better.opt_map) """ import subprocess from nipype.interfaces.base import CommandLine from nipype.utils.misc import is_container def grab_doc(cmd, trap_error=True): """Run cmd without args and grab documentation. Parameters ---------- cmd : string Command line string trap_error : boolean Ensure that returncode is 0 Returns ------- doc : string The command line documentation """ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = proc.communicate() if trap_error and proc.returncode: msg = 'Attempting to run %s. Returned Error: %s'%(cmd,stderr) raise IOError(msg) if stderr: # A few programs, like fast and fnirt, send their help to # stderr instead of stdout. # XXX: Test for error vs. doc in stderr return stderr return stdout def reverse_opt_map(opt_map): """Reverse the key/value pairs of the option map in the interface classes. Parameters ---------- opt_map : dict Dictionary mapping the attribute name to a command line flag. Each interface class defines these for the command it wraps. Returns ------- rev_opt_map : dict Dictionary mapping the flags to the attribute name. """ # For docs, we only care about the mapping from our attribute # names to the command-line flags. The 'v.split()[0]' below # strips off the string format characters. # if (k != 'flags' and v) , key must not be flags as it is generic, # v must not be None or it cannot be parsed by this line revdict = {} for key, value in opt_map.items(): if is_container(value): # The value is a tuple where the first element is the # format string and the second element is a docstring. value = value[0] if (key != 'flags' and value is not None): revdict[value.split()[0]] = key return revdict def format_params(paramlist, otherlist=None): """Format the parameters according to the nipy style conventions. Since the external programs do not conform to any conventions, the resulting docstrings are not ideal. But at a minimum the Parameters section is reasonably close. Parameters ---------- paramlist : list List of strings where each list item matches exactly one parameter and it's description. These items will go into the 'Parameters' section of the docstring. otherlist : list List of strings, similar to paramlist above. These items will go into the 'Other Parameters' section of the docstring. Returns ------- doc : string The formatted docstring. """ hdr = 'Parameters' delim = '----------' paramlist.insert(0, delim) paramlist.insert(0, hdr) params = '\n'.join(paramlist) otherparams = [] doc = ''.join(params) if otherlist: hdr = 'Others Parameters' delim = '-----------------' otherlist.insert(0, delim) otherlist.insert(0, hdr) otherlist.insert(0, '\n') otherparams = '\n'.join(otherlist) doc = ''.join([doc, otherparams]) return doc def insert_doc(doc, new_items): """Insert ``new_items`` into the beginning of the ``doc`` Docstrings in ``new_items`` will be inserted right after the *Parameters* header but before the existing docs. Parameters ---------- doc : str The existing docstring we're inserting docmentation into. new_items : list List of strings to be inserted in the ``doc``. Examples -------- >>> from nipype.utils.docparse import insert_doc >>> doc = '''Parameters ... ---------- ... outline : ... something about an outline''' >>> new_items = ['infile : str', ' The name of the input file'] >>> new_items.extend(['outfile : str', ' The name of the output file']) >>> newdoc = insert_doc(doc, new_items) >>> print newdoc Parameters ---------- infile : str The name of the input file outfile : str The name of the output file outline : something about an outline """ # Insert new_items after the Parameters header doclist = doc.split('\n') tmpdoc = doclist[:2] # Add new_items tmpdoc.extend(new_items) # Add rest of documents tmpdoc.extend(doclist[2:]) # Insert newlines newdoc = [] for line in tmpdoc: newdoc.append(line) newdoc.append('\n') # We add one too many newlines, remove it. newdoc.pop(-1) return ''.join(newdoc) def build_doc(doc, opts): """Build docstring from doc and options Parameters ---------- rep_doc : string Documentation string opts : dict Dictionary of option attributes and keys. Use reverse_opt_map to reverse flags and attrs from opt_map class attribute. Returns ------- newdoc : string The docstring with flags replaced with attribute names and formated to match nipy standards (as best we can). """ # Split doc into line elements. Generally, each line is an # individual flag/option. doclist = doc.split('\n') newdoc = [] flags_doc = [] for line in doclist: linelist = line.split() if not linelist: # Probably an empty line continue # For lines we care about, the first item is the flag if ',' in linelist[0]: #sometimes flags are only seperated by comma flag = linelist[0].split(',')[0] else: flag = linelist[0] attr = opts.get(flag) if attr is not None: #newline = line.replace(flag, attr) # Replace the flag with our attribute name linelist[0] = '%s :' % str(attr) # Add some line formatting linelist.insert(1, '\n ') newline = ' '.join(linelist) newdoc.append(newline) else: if line[0].isspace(): # For all the docs I've looked at, the flags all have # indentation (spaces) at the start of the line. # Other parts of the docs, like 'usage' statements # start with alpha-numeric characters. We only care # about the flags. flags_doc.append(line) return format_params(newdoc, flags_doc) def get_doc(cmd, opt_map, help_flag=None, trap_error=True): """Get the docstring from our command and options map. Parameters ---------- cmd : string The command whose documentation we are fetching opt_map : dict Dictionary of flags and option attributes. help_flag : string Provide additional help flag. e.g., -h trap_error : boolean Override if underlying command returns a non-zero returncode Returns ------- doc : string The formated docstring """ res = CommandLine('which %s' % cmd.split(' ')[0], terminal_output='allatonce').run() cmd_path = res.runtime.stdout.strip() if cmd_path == '': raise Exception('Command %s not found'%cmd.split(' ')[0]) if help_flag: cmd = ' '.join((cmd,help_flag)) doc = grab_doc(cmd,trap_error) opts = reverse_opt_map(opt_map) return build_doc(doc, opts) def _parse_doc(doc, style=['--']): """Parses a help doc for inputs Parameters ---------- doc : string Documentation string style : string default ['--'] The help command style (--, -) Returns ------- optmap : dict of input parameters """ # Split doc into line elements. Generally, each line is an # individual flag/option. doclist = doc.split('\n') optmap = {} if isinstance(style,str): style = [style] for line in doclist: linelist = line.split() flag =[item for i,item in enumerate(linelist) if i<2 and \ any([item.startswith(s) for s in style]) and \ len(item)>1] if flag: if len(flag)==1: style_idx = [flag[0].startswith(s) for s in style].index(True) flag = flag[0] else: style_idx = [] for f in flag: for i,s in enumerate(style): if f.startswith(s): style_idx.append(i) break flag = flag[style_idx.index(min(style_idx))] style_idx = min(style_idx) optmap[flag.split(style[style_idx])[1]] = '%s %%s'%flag return optmap def get_params_from_doc(cmd, style='--', help_flag=None, trap_error=True): """Auto-generate option map from command line help Parameters ---------- cmd : string The command whose documentation we are fetching style : string default ['--'] The help command style (--, -). Multiple styles can be provided in a list e.g. ['--','-']. help_flag : string Provide additional help flag. e.g., -h trap_error : boolean Override if underlying command returns a non-zero returncode Returns ------- optmap : dict Contains a mapping from input to command line variables """ res = CommandLine('which %s' % cmd.split(' ')[0], terminal_output='allatonce').run() cmd_path = res.runtime.stdout.strip() if cmd_path == '': raise Exception('Command %s not found'%cmd.split(' ')[0]) if help_flag: cmd = ' '.join((cmd,help_flag)) doc = grab_doc(cmd,trap_error) return _parse_doc(doc,style) def replace_opts(rep_doc, opts): """Replace flags with parameter names. This is a simple operation where we replace the command line flags with the attribute names. Parameters ---------- rep_doc : string Documentation string opts : dict Dictionary of option attributes and keys. Use reverse_opt_map to reverse flags and attrs from opt_map class attribute. Returns ------- rep_doc : string New docstring with flags replaces with attribute names. Examples -------- doc = grab_doc('bet') opts = reverse_opt_map(fsl.Bet.opt_map) rep_doc = replace_opts(doc, opts) """ # Replace flags with attribute names for key, val in opts.items(): rep_doc = rep_doc.replace(key, val) return rep_doc nipype-0.9.2/nipype/utils/filemanip.py000066400000000000000000000276031227300005300200100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous file manipulation functions """ import cPickle from glob import glob import gzip import hashlib from hashlib import md5 import json import os import re import shutil import numpy as np from ..interfaces.traits_extension import isdefined from .misc import is_container from .. import logging, config fmlogger = logging.getLogger("filemanip") class FileNotFoundError(Exception): pass def split_filename(fname): """Split a filename into parts: path, base filename and extension. Parameters ---------- fname : str file or path name Returns ------- pth : str base path from fname fname : str filename from fname, without extension ext : str file extension from fname Examples -------- >>> from nipype.utils.filemanip import split_filename >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') >>> pth '/home/data' >>> fname 'subject' >>> ext '.nii.gz' """ special_extensions = [".nii.gz", ".tar.gz"] if fname and fname.endswith(os.path.sep): fname = fname[:-1] pth, fname = os.path.split(fname) ext = None for special_ext in special_extensions: ext_len = len(special_ext) if (len(fname) > ext_len) and \ (fname[-ext_len:].lower() == special_ext.lower()): ext = fname[-ext_len:] fname = fname[:-ext_len] break if not ext: fname, ext = os.path.splitext(fname) return pth, fname, ext def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): """Manipulates path and name of input filename Parameters ---------- fname : string A filename (may or may not include path) prefix : string Characters to prepend to the filename suffix : string Characters to append to the filename newpath : string Path to replace the path of the input fname use_ext : boolean If True (default), appends the extension of the original file to the output name. Returns ------- Absolute path of the modified filename >>> from nipype.utils.filemanip import fname_presuffix >>> fname = 'foo.nii.gz' >>> fname_presuffix(fname,'pre','post','/tmp') '/tmp/prefoopost.nii.gz' """ pth, fname, ext = split_filename(fname) if not use_ext: ext = '' if newpath and isdefined(newpath): pth = os.path.abspath(newpath) return os.path.join(pth, prefix + fname + suffix + ext) def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True): """Calls fname_presuffix for a list of files. """ f2 = [] for fname in fnames: f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext)) return f2 def hash_rename(filename, hashvalue): """renames a file given original filename and hash and sets path to output_directory """ path, name, ext = split_filename(filename) newfilename = ''.join((name, '_0x', hashvalue, ext)) return os.path.join(path, newfilename) def check_forhash(filename): """checks if file has a hash in its filename""" if isinstance(filename, list): filename = filename[0] path, name = os.path.split(filename) if re.search('(_0x[a-z0-9]{32})', name): hashvalue = re.findall('(_0x[a-z0-9]{32})', name) return True, hashvalue else: return False, None def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5): """ Computes hash of a file using 'crypto' module""" hex = None if os.path.isfile(afile): crypto_obj = crypto() fp = file(afile, 'rb') while True: data = fp.read(chunk_len) if not data: break crypto_obj.update(data) fp.close() hex = crypto_obj.hexdigest() return hex def hash_timestamp(afile): """ Computes md5 hash of the timestamp of a file """ md5hex = None if os.path.isfile(afile): md5obj = md5() stat = os.stat(afile) md5obj.update(str(stat.st_size)) md5obj.update(str(stat.st_mtime)) md5hex = md5obj.hexdigest() return md5hex def copyfile(originalfile, newfile, copy=False, create_new=False, hashmethod=None): """Copy or symlink ``originalfile`` to ``newfile``. Parameters ---------- originalfile : str full path to original file newfile : str full path to new file copy : Bool specifies whether to copy or symlink files (default=False) but only for POSIX systems Returns ------- None """ newhash = None orighash = None fmlogger.debug(newfile) if create_new: while os.path.exists(newfile): base, fname, ext = split_filename(newfile) s = re.search('_c[0-9]{4,4}$', fname) i = 0 if s: i = int(s.group()[2:])+1 fname = fname[:-6] + "_c%04d" % i else: fname += "_c%04d" % i newfile = base + os.sep + fname + ext if hashmethod is None: hashmethod = config.get('execution', 'hash_method').lower() elif os.path.exists(newfile): if hashmethod == 'timestamp': newhash = hash_timestamp(newfile) elif hashmethod == 'content': newhash = hash_infile(newfile) fmlogger.debug("File: %s already exists,%s, copy:%d" % (newfile, newhash, copy)) #the following seems unnecessary #if os.name is 'posix' and copy: # if os.path.lexists(newfile) and os.path.islink(newfile): # os.unlink(newfile) # newhash = None if os.name is 'posix' and not copy: if os.path.lexists(newfile): if hashmethod == 'timestamp': orighash = hash_timestamp(originalfile) elif hashmethod == 'content': orighash = hash_infile(originalfile) fmlogger.debug('Original hash: %s, %s' % (originalfile, orighash)) if newhash != orighash: os.unlink(newfile) if (newhash is None) or (newhash != orighash): os.symlink(originalfile, newfile) else: if newhash: if hashmethod == 'timestamp': orighash = hash_timestamp(originalfile) elif hashmethod == 'content': orighash = hash_infile(originalfile) if (newhash is None) or (newhash != orighash): try: fmlogger.debug("Copying File: %s->%s" % (newfile, originalfile)) shutil.copyfile(originalfile, newfile) except shutil.Error, e: fmlogger.warn(e.message) else: fmlogger.debug("File: %s already exists, not overwriting, copy:%d" % (newfile, copy)) if originalfile.endswith(".img"): hdrofile = originalfile[:-4] + ".hdr" hdrnfile = newfile[:-4] + ".hdr" matofile = originalfile[:-4] + ".mat" if os.path.exists(matofile): matnfile = newfile[:-4] + ".mat" copyfile(matofile, matnfile, copy) copyfile(hdrofile, hdrnfile, copy) elif originalfile.endswith(".BRIK"): hdrofile = originalfile[:-4] + ".HEAD" hdrnfile = newfile[:-4] + ".HEAD" copyfile(hdrofile, hdrnfile, copy) return newfile def get_related_files(filename): """Returns a list of related files for Nifti-Pair, Analyze (SPM) and AFNI files """ related_files = [] if filename.endswith(".img") or filename.endswith(".hdr"): path, name, ext = split_filename(filename) for ext in ['.hdr', '.img', '.mat']: related_files.append(os.path.join(path, name + ext)) elif filename.endswith(".BRIK") or filename.endswith(".HEAD"): path, name, ext = split_filename(filename) for ext in ['.BRIK', '.HEAD']: related_files.append(os.path.join(path, name + ext)) if not len(related_files): related_files = [filename] return related_files def copyfiles(filelist, dest, copy=False, create_new=False): """Copy or symlink files in ``filelist`` to ``dest`` directory. Parameters ---------- filelist : list List of files to copy. dest : path/files full path to destination. If it is a list of length greater than 1, then it assumes that these are the names of the new files. copy : Bool specifies whether to copy or symlink files (default=False) but only for posix systems Returns ------- None """ outfiles = filename_to_list(dest) newfiles = [] for i, f in enumerate(filename_to_list(filelist)): if isinstance(f, list): newfiles.insert(i, copyfiles(f, dest, copy=copy, create_new=create_new)) else: if len(outfiles) > 1: destfile = outfiles[i] else: destfile = fname_presuffix(f, newpath=outfiles[0]) destfile = copyfile(f, destfile, copy, create_new=create_new) newfiles.insert(i, destfile) return newfiles def filename_to_list(filename): """Returns a list given either a string or a list """ if isinstance(filename, (str, unicode)): return [filename] elif isinstance(filename, list): return filename elif is_container(filename): return [x for x in filename] else: return None def list_to_filename(filelist): """Returns a list if filelist is a list of length greater than 1, otherwise returns the first element """ if len(filelist) > 1: return filelist else: return filelist[0] def save_json(filename, data): """Save data to a json file Parameters ---------- filename : str Filename to save data in. data : dict Dictionary to save in json file. """ fp = file(filename, 'w') json.dump(data, fp, sort_keys=True, indent=4) fp.close() def load_json(filename): """Load data from a json file Parameters ---------- filename : str Filename to load data from. Returns ------- data : dict """ fp = file(filename, 'r') data = json.load(fp) fp.close() return data def loadflat(infile, *args): """Load an npz file into a dict """ data = np.load(infile) out = {} if args: outargs = np.setdiff1d(args, data.files) if outargs: raise IOError('File does not contain variables: '+str(outargs)) for k in data.files: if k in args or not args: out[k] = [f for f in data[k].flat] if len(out[k]) == 1: out[k] = out[k].pop() return out def loadcrash(infile, *args): if '.pkl' in infile: return loadpkl(infile) else: return loadflat(infile, *args) def loadpkl(infile): """Load a zipped or plain cPickled file """ if infile.endswith('pklz'): pkl_file = gzip.open(infile, 'rb') else: pkl_file = open(infile) return cPickle.load(pkl_file) def savepkl(filename, record): if filename.endswith('pklz'): pkl_file = gzip.open(filename, 'wb') else: pkl_file = open(filename, 'wb') cPickle.dump(record, pkl_file) pkl_file.close() rst_levels = ['=', '-', '~', '+'] def write_rst_header(header, level=0): return '\n'.join((header, ''.join([rst_levels[level] for _ in header]))) + '\n\n' def write_rst_list(items, prefix=''): out = [] for item in items: out.append(prefix + ' ' + str(item)) return '\n'.join(out)+'\n\n' def write_rst_dict(info, prefix=''): out = [] for key, value in sorted(info.items()): out.append(prefix + '* ' + key + ' : ' + str(value)) return '\n'.join(out)+'\n\n' nipype-0.9.2/nipype/utils/logger.py000066400000000000000000000113221227300005300173120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import logging import os import sys try: from ..external.cloghandler import ConcurrentRotatingFileHandler as \ RFHandler except ImportError: # Next 2 lines are optional: issue a warning to the user from warnings import warn warn("ConcurrentLogHandler not installed. Using builtin log handler") from logging.handlers import RotatingFileHandler as RFHandler from .misc import str2bool from .config import NipypeConfig class Logging(object): """Nipype logging class """ fmt = ('%(asctime)s,%(msecs)d %(name)-2s ' '%(levelname)-2s:\n\t %(message)s') datefmt = '%y%m%d-%H:%M:%S' def __init__(self, config): self._config = config logging.basicConfig(format=self.fmt, datefmt=self.datefmt, stream=sys.stdout) #logging.basicConfig(stream=sys.stdout) self._logger = logging.getLogger('workflow') self._fmlogger = logging.getLogger('filemanip') self._iflogger = logging.getLogger('interface') self.loggers = {'workflow': self._logger, 'filemanip': self._fmlogger, 'interface': self._iflogger} self._hdlr = None self.update_logging(self._config) def enable_file_logging(self): config = self._config LOG_FILENAME = os.path.join(config.get('logging', 'log_directory'), 'pypeline.log') hdlr = RFHandler(LOG_FILENAME, maxBytes=int(config.get('logging', 'log_size')), backupCount=int(config.get('logging', 'log_rotate'))) formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt) hdlr.setFormatter(formatter) self._logger.addHandler(hdlr) self._fmlogger.addHandler(hdlr) self._iflogger.addHandler(hdlr) self._hdlr = hdlr def disable_file_logging(self): if self._hdlr: self._logger.removeHandler(self._hdlr) self._fmlogger.removeHandler(self._hdlr) self._iflogger.removeHandler(self._hdlr) self._hdlr = None def update_logging(self, config): self._config = config self.disable_file_logging() self._logger.setLevel(logging.getLevelName(config.get('logging', 'workflow_level'))) self._fmlogger.setLevel(logging.getLevelName(config.get('logging', 'filemanip_level'))) self._iflogger.setLevel(logging.getLevelName(config.get('logging', 'interface_level'))) if str2bool(config.get('logging', 'log_to_file')): self.enable_file_logging() def getLogger(self, name): if name in self.loggers: return self.loggers[name] return None def getLevelName(self, name): return logging.getLevelName(name) def logdebug_dict_differences(self, dold, dnew, prefix=""): """Helper to log what actually changed from old to new values of dictionaries. typical use -- log difference for hashed_inputs """ # Compare against hashed_inputs # Keys: should rarely differ new_keys = set(dnew.keys()) old_keys = set(dold.keys()) if len(new_keys - old_keys): self._logger.debug("%s not previously seen: %s" % (prefix, new_keys - old_keys)) if len(old_keys - new_keys): self._logger.debug("%s not presently seen: %s" % (prefix, old_keys - new_keys)) # Values in common keys would differ quite often, # so we need to join the messages together msgs = [] for k in new_keys.intersection(old_keys): same = False try: new, old = dnew[k], dold[k] same = new == old if not same: # Since JSON does not discriminate between lists and # tuples, we might need to cast them into the same type # as the last resort. And lets try to be more generic same = old.__class__(new) == old except Exception, e: same = False if not same: msgs += ["%s: %r != %r" % (k, dnew[k], dold[k])] if len(msgs): self._logger.debug("%s values differ in fields: %s" % (prefix, ", ".join(msgs))) nipype-0.9.2/nipype/utils/matlabtools.py000066400000000000000000000042071227300005300203600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Useful Functions for working with matlab""" # Stdlib imports import os import re import tempfile # Functions, classes and other top-level code def fltcols(vals): ''' Trivial little function to make 1xN float vector ''' return np.atleast_2d(np.array(vals, dtype=float)) def mlab_tempfile(dir=None): """Returns a temporary file-like object with valid matlab name. The file name is accessible as the .name attribute of the returned object. The caller is responsible for closing the returned object, at which time the underlying file gets deleted from the filesystem. Parameters ---------- dir : str A path to use as the starting directory. Note that this directory must already exist, it is NOT created if it doesn't (in that case, OSError is raised instead). Returns ------- f : A file-like object. Examples -------- >>> fn = mlab_tempfile() >>> import os >>> filename = os.path.basename(fn.name) >>> '-' not in filename True >>> fn.close() """ valid_name = re.compile(r'^\w+$') # Make temp files until we get one whose name is a valid matlab identifier, # since matlab imposes that constraint. Since the temp file routines may # return names that aren't valid matlab names, but we can't control that # directly, we just keep trying until we get a valid name. To avoid an # infinite loop for some strange reason, we only try 100 times. for n in range(100): f = tempfile.NamedTemporaryFile(suffix='.m',prefix='tmp_matlab_', dir=dir) # Check the file name for matlab compilance fname = os.path.splitext(os.path.basename(f.name))[0] if valid_name.match(fname): break # Close the temp file we just made if its name is not valid; the # tempfile module then takes care of deleting the actual file on disk. f.close() else: raise ValueError("Could not make temp file after 100 tries") return f nipype-0.9.2/nipype/utils/misc.py000066400000000000000000000145701227300005300167760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous utility functions """ from cPickle import dumps, loads import inspect from distutils.version import LooseVersion import numpy as np from textwrap import dedent import sys import re def human_order_sorted(l): """Sorts string in human order (i.e. 'stat10' will go after 'stat2')""" def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): if isinstance(text, tuple): text = text[0] return [ atoi(c) for c in re.split('(\d+)', text) ] return sorted(l, key=natural_keys) def trim(docstring, marker=None): if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = sys.maxint for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < sys.maxint: for line in lines[1:]: # replace existing REST marker with doc level marker stripped = line.lstrip().strip().rstrip() if marker is not None and stripped and \ all([s==stripped[0] for s in stripped]) and \ stripped[0] not in [':']: line = line.replace(stripped[0], marker) trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed) def getsource(function): """Returns the source code of a function""" src = dumps(dedent(inspect.getsource(function))) return src def create_function_from_source(function_source, imports=None): """Return a function object from a function source Parameters ---------- function_source : pickled string string in pickled form defining a function imports : list of strings list of import statements in string form that allow the function to be executed in an otherwise empty namespace """ ns = {} import_keys = [] try: if imports is not None: for statement in imports: exec statement in ns import_keys = ns.keys() exec loads(function_source) in ns except Exception, msg: msg = str(msg) + '\nError executing function:\n %s\n'%function_source msg += '\n'.join(["Functions in connection strings have to be standalone.", "They cannot be declared either interactively or inside", "another function or inline in the connect string. Any", "imports should be done inside the function" ]) raise RuntimeError(msg) ns_funcs = list(set(ns) - set(import_keys + ['__builtins__'])) assert len(ns_funcs) == 1, "Function or inputs are ill-defined" funcname = ns_funcs[0] func = ns[funcname] return func def find_indices(condition): "Return the indices where ravel(condition) is true" res, = np.nonzero(np.ravel(condition)) return res def is_container(item): """Checks if item is a container (list, tuple, dict, set) Parameters ---------- item : object object to check for .__iter__ Returns ------- output : Boolean True if container False if not (eg string) """ if hasattr(item, '__iter__'): return True else: return False def container_to_string(cont): """Convert a container to a command line string. Elements of the container are joined with a space between them, suitable for a command line parameter. If the container `cont` is only a sequence, like a string and not a container, it is returned unmodified. Parameters ---------- cont : container A container object like a list, tuple, dict, or a set. Returns ------- cont_str : string Container elements joined into a string. """ if hasattr(cont, '__iter__'): return ' '.join(cont) else: return str(cont) # Dependency checks. Copied this from Nipy, with some modificiations # (added app as a parameter). def package_check(pkg_name, version=None, app=None, checker=LooseVersion, exc_failed_import=ImportError, exc_failed_check=RuntimeError): """Check that the minimal version of the required package is installed. Parameters ---------- pkg_name : string Name of the required package. version : string, optional Minimal version number for required package. app : string, optional Application that is performing the check. For instance, the name of the tutorial being executed that depends on specific packages. Default is *Nipype*. checker : object, optional The class that will perform the version checking. Default is distutils.version.LooseVersion. exc_failed_import : Exception, optional Class of the exception to be thrown if import failed. exc_failed_check : Exception, optional Class of the exception to be thrown if version check failed. Examples -------- package_check('numpy', '1.3') package_check('networkx', '1.0', 'tutorial1') """ if app: msg = '%s requires %s' % (app, pkg_name) else: msg = 'Nipype requires %s' % pkg_name if version: msg += ' with version >= %s' % (version,) try: mod = __import__(pkg_name) except ImportError: raise exc_failed_import(msg) if not version: return try: have_version = mod.__version__ except AttributeError: raise exc_failed_check('Cannot find version for %s' % pkg_name) if checker(have_version) < checker(version): raise exc_failed_check(msg) def str2bool(v): if isinstance(v, bool): return v lower = v.lower() if lower in ("yes", "true", "t", "1"): return True elif lower in ("no", "false", "n", "f", "0"): return False else: raise ValueError("%s cannot be converted to bool"%v) nipype-0.9.2/nipype/utils/onetime.py000066400000000000000000000047641227300005300175070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Descriptor support for NIPY. Utilities to support special Python descriptors [1,2], in particular the use of a useful pattern for properties we call 'one time properties'. These are object attributes which are declared as properties, but become regular attributes once they've been read the first time. They can thus be evaluated later in the object's life cycle, but once evaluated they become normal, static attributes with no function call overhead on access or any other constraints. References ---------- [1] How-To Guide for Descriptors, Raymond Hettinger. http://users.rcn.com/python/download/Descriptor.htm [2] Python data model, http://docs.python.org/reference/datamodel.html """ class OneTimeProperty(object): """A descriptor to make special properties that become normal attributes. """ def __init__(self, func): """Create a OneTimeProperty instance. Parameters ---------- func : method The method that will be called the first time to compute a value. Afterwards, the method's name will be a standard attribute holding the value of this computation. """ self.getter = func self.name = func.func_name def __get__(self, obj, type=None): """ Called on attribute access on the class or instance. """ if obj is None: # Being called on the class, return the original function. This way, # introspection works on the class. return self.getter val = self.getter(obj) #print "** setattr_on_read - loading '%s'" % self.name # dbg setattr(obj, self.name, val) return val def setattr_on_read(func): # XXX - beetter names for this? # - cor_property (copy on read property) # - sor_property (set on read property) # - prop2attr_on_read #... ? """Decorator to create OneTimeProperty attributes. Parameters ---------- func : method The method that will be called the first time to compute a value. Afterwards, the method's name will be a standard attribute holding the value of this computation. Examples -------- >>> class MagicProp(object): ... @setattr_on_read ... def a(self): ... return 99 ... >>> x = MagicProp() >>> 'a' in x.__dict__ False >>> x.a 99 >>> 'a' in x.__dict__ True """ return OneTimeProperty(func) nipype-0.9.2/nipype/utils/provenance.py000066400000000000000000000364031227300005300202020ustar00rootroot00000000000000from cPickle import dumps import json import os import pwd from socket import getfqdn from uuid import uuid1 import numpy as np try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict try: import prov.model as pm except ImportError: from ..external import provcopy as pm from .. import get_info from .filemanip import (md5, hashlib, hash_infile) from .. import logging iflogger = logging.getLogger('interface') foaf = pm.Namespace("foaf", "http://xmlns.com/foaf/0.1/") dcterms = pm.Namespace("dcterms", "http://purl.org/dc/terms/") nipype_ns = pm.Namespace("nipype", "http://nipy.org/nipype/terms/") niiri = pm.Namespace("niiri", "http://iri.nidash.org/") crypto = pm.Namespace("crypto", ("http://id.loc.gov/vocabulary/preservation/" "cryptographicHashFunctions/")) get_id = lambda: niiri[uuid1().hex] def get_attr_id(attr, skip=None): dictwithhash, hashval = get_hashval(attr, skip=skip) return niiri[hashval] max_text_len = 1024000 def get_hashval(inputdict, skip=None): """Return a dictionary of our items with hashes for each file. Searches through dictionary items and if an item is a file, it calculates the md5 hash of the file contents and stores the file name and hash value as the new key value. However, the overall bunch hash is calculated only on the hash value of a file. The path and name of the file are not used in the overall hash calculation. Returns ------- dict_withhash : dict Copy of our dictionary with the new file hashes included with each file. hashvalue : str The md5 hash value of the traited spec """ dict_withhash = {} dict_nofilename = OrderedDict() keys = {} for key in inputdict: if skip is not None and key in skip: continue keys[key.get_uri()] = key for key in sorted(keys): val = inputdict[keys[key]] outname = key try: if isinstance(val, pm.URIRef): val = val.decode() except AttributeError: pass if isinstance(val, pm.QName): val = val.get_uri() if isinstance(val, pm.Literal): val = val.get_value() dict_nofilename[outname] = _get_sorteddict(val) dict_withhash[outname] = _get_sorteddict(val, True) return (dict_withhash, md5(str(dict_nofilename)).hexdigest()) def _get_sorteddict(object, dictwithhash=False): if isinstance(object, dict): out = OrderedDict() for key, val in sorted(object.items()): if val: out[key] = _get_sorteddict(val, dictwithhash) elif isinstance(object, (list, tuple)): out = [] for val in object: if val: out.append(_get_sorteddict(val, dictwithhash)) if isinstance(object, tuple): out = tuple(out) else: if isinstance(object, str) and os.path.isfile(object): hash = hash_infile(object) if dictwithhash: out = (object, hash) else: out = hash elif isinstance(object, float): out = '%.10f' % object else: out = object return out def safe_encode(x, as_literal=True): """Encodes a python value for prov """ if x is None: value = "Unknown" if as_literal: return pm.Literal(value, pm.XSD['string']) else: return value try: if isinstance(x, (str, unicode)): if os.path.exists(x): value = 'file://%s%s' % (getfqdn(), x) if not as_literal: return value try: return pm.URIRef(value) except AttributeError: return pm.Literal(value, pm.XSD['anyURI']) else: if len(x) > max_text_len: value = x[:max_text_len - 13] + ['...Clipped...'] else: value = x if not as_literal: return value return pm.Literal(value, pm.XSD['string']) if isinstance(x, (int,)): if not as_literal: return x return pm.Literal(int(x), pm.XSD['integer']) if isinstance(x, (float,)): if not as_literal: return x return pm.Literal(x, pm.XSD['float']) if isinstance(x, dict): outdict = {} for key, value in x.items(): encoded_value = safe_encode(value, as_literal=False) if isinstance(encoded_value, (pm.Literal,)): outdict[key] = encoded_value.json_representation() else: outdict[key] = encoded_value if not as_literal: return json.dumps(outdict) return pm.Literal(json.dumps(outdict), pm.XSD['string']) if isinstance(x, list): try: nptype = np.array(x).dtype if nptype == np.dtype(object): raise ValueError('dtype object') except ValueError, e: outlist = [] for value in x: encoded_value = safe_encode(value, as_literal=False) if isinstance(encoded_value, (pm.Literal,)): outlist.append(encoded_value.json_representation()) else: outlist.append(encoded_value) else: outlist = x if not as_literal: return json.dumps(outlist) return pm.Literal(json.dumps(outlist), pm.XSD['string']) if not as_literal: return dumps(x) return pm.Literal(dumps(x), nipype_ns['pickle']) except TypeError, e: iflogger.info(e) value = "Could not encode: " + str(e) if not as_literal: return value return pm.Literal(value, pm.XSD['string']) def prov_encode(graph, value, create_container=True): if isinstance(value, list) and create_container: if len(value) > 1: try: entities = [] for item in value: item_entity = prov_encode(graph, item) entities.append(item_entity) if isinstance(item, list): continue if not isinstance(item_entity.get_value()[0], basestring): raise ValueError('Not a string literal') if 'file://' not in item_entity.get_value()[0]: raise ValueError('No file found') id = get_id() entity = graph.collection(identifier=id) for item_entity in entities: graph.hadMember(id, item_entity) except ValueError, e: iflogger.debug(e) entity = prov_encode(graph, value, create_container=False) else: entity = prov_encode(graph, value[0]) else: encoded_literal = safe_encode(value) attr = {pm.PROV['value']: encoded_literal} if isinstance(value, basestring) and os.path.exists(value): attr.update({pm.PROV['Location']: encoded_literal}) if not os.path.isdir(value): sha512 = hash_infile(value, crypto=hashlib.sha512) attr.update({crypto['sha512']: pm.Literal(sha512, pm.XSD['string'])}) id = get_attr_id(attr, skip=[pm.PROV['Location'], pm.PROV['value']]) else: id = get_attr_id(attr, skip=[pm.PROV['Location']]) else: id = get_attr_id(attr) entity = graph.entity(id, attr) return entity def write_provenance(results, filename='provenance', format='turtle'): ps = ProvStore() ps.add_results(results) return ps.write_provenance(filename=filename, format=format) class ProvStore(object): def __init__(self): self.g = pm.ProvBundle(identifier=get_id()) self.g.add_namespace(foaf) self.g.add_namespace(dcterms) self.g.add_namespace(nipype_ns) self.g.add_namespace(niiri) def add_results(self, results): if results.provenance: try: self.g.add_bundle(results.provenance) except pm.ProvException: self.g.add_bundle(results.provenance, get_id()) return self.g runtime = results.runtime interface = results.interface inputs = results.inputs outputs = results.outputs classname = interface.__name__ a0_attrs = {nipype_ns['module']: interface.__module__, nipype_ns["interface"]: classname, pm.PROV["label"]: classname, nipype_ns['duration']: safe_encode(runtime.duration), nipype_ns['working_directory']: safe_encode(runtime.cwd), nipype_ns['return_code']: safe_encode(runtime.returncode), nipype_ns['platform']: safe_encode(runtime.platform), nipype_ns['version']: safe_encode(runtime.version), } try: a0_attrs[foaf["host"]] = pm.URIRef(runtime.hostname) except AttributeError: a0_attrs[foaf["host"]] = pm.Literal(runtime.hostname, pm.XSD['anyURI']) try: a0_attrs.update({nipype_ns['command']: safe_encode(runtime.cmdline)}) a0_attrs.update({nipype_ns['command_path']: safe_encode(runtime.command_path)}) a0_attrs.update({nipype_ns['dependencies']: safe_encode(runtime.dependencies)}) except AttributeError: pass a0 = self.g.activity(get_id(), runtime.startTime, runtime.endTime, a0_attrs) # environment id = get_id() env_collection = self.g.collection(id) env_collection.add_extra_attributes({pm.PROV['type']: nipype_ns['environment'], pm.PROV['label']: "Environment"}) self.g.used(a0, id) # write environment entities for idx, (key, val) in enumerate(sorted(runtime.environ.items())): if key not in ['PATH', 'FSLDIR', 'FREESURFER_HOME', 'ANTSPATH', 'CAMINOPATH', 'CLASSPATH', 'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'FIX_VERTEX_AREA', 'FSF_OUTPUT_FORMAT', 'FSLCONFDIR', 'FSLOUTPUTTYPE', 'LOGNAME', 'USER', 'MKL_NUM_THREADS', 'OMP_NUM_THREADS']: continue in_attr = {pm.PROV["label"]: key, nipype_ns["environment_variable"]: key, pm.PROV["value"]: safe_encode(val)} id = get_attr_id(in_attr) self.g.entity(id, in_attr) self.g.hadMember(env_collection, id) # write input entities if inputs: id = get_id() input_collection = self.g.collection(id) input_collection.add_extra_attributes({pm.PROV['type']: nipype_ns['inputs'], pm.PROV['label']: "Inputs"}) # write input entities for idx, (key, val) in enumerate(sorted(inputs.items())): in_entity = prov_encode(self.g, val).get_identifier() self.g.hadMember(input_collection, in_entity) used_attr = {pm.PROV["label"]: key, nipype_ns["in_port"]: key} self.g.used(activity=a0, entity=in_entity, other_attributes=used_attr) # write output entities if outputs: id = get_id() output_collection = self.g.collection(id) if not isinstance(outputs, dict): outputs = outputs.get_traitsfree() output_collection.add_extra_attributes({pm.PROV['type']: nipype_ns['outputs'], pm.PROV['label']: "Outputs"}) self.g.wasGeneratedBy(output_collection, a0) # write output entities for idx, (key, val) in enumerate(sorted(outputs.items())): out_entity = prov_encode(self.g, val).get_identifier() self.g.hadMember(output_collection, out_entity) gen_attr = {pm.PROV["label"]: key, nipype_ns["out_port"]: key} self.g.generation(out_entity, activity=a0, other_attributes=gen_attr) # write runtime entities id = get_id() runtime_collection = self.g.collection(id) runtime_collection.add_extra_attributes({pm.PROV['type']: nipype_ns['runtime'], pm.PROV['label']: "RuntimeInfo"}) self.g.wasGeneratedBy(runtime_collection, a0) for key, value in sorted(runtime.items()): if not value: continue if key not in ['stdout', 'stderr', 'merged']: continue attr = {pm.PROV["label"]: key, nipype_ns[key]: safe_encode(value)} id = get_id() self.g.entity(get_id(), attr) self.g.hadMember(runtime_collection, id) # create agents user_attr = {pm.PROV["type"]: pm.PROV["Person"], pm.PROV["label"]: pwd.getpwuid(os.geteuid()).pw_name, foaf["name"]: safe_encode(pwd.getpwuid(os.geteuid()).pw_name)} user_agent = self.g.agent(get_attr_id(user_attr), user_attr) agent_attr = {pm.PROV["type"]: pm.PROV["SoftwareAgent"], pm.PROV["label"]: "Nipype", foaf["name"]: safe_encode("Nipype")} for key, value in get_info().items(): agent_attr.update({nipype_ns[key]: safe_encode(value)}) software_agent = self.g.agent(get_attr_id(agent_attr), agent_attr) self.g.wasAssociatedWith(a0, user_agent, None, None, {pm.PROV["hadRole"]: nipype_ns["LoggedInUser"]}) self.g.wasAssociatedWith(a0, software_agent) return self.g def write_provenance(self, filename='provenance', format='turtle'): try: if format in ['turtle', 'all']: self.g.rdf().serialize(filename + '.ttl', format='turtle') except (ImportError, NameError): format = 'all' finally: if format in ['provn', 'all']: with open(filename + '.provn', 'wt') as fp: fp.writelines(self.g.get_provn()) if format in ['json', 'all']: with open(filename + '.json', 'wt') as fp: pm.json.dump(self.g, fp, cls=pm.ProvBundle.JSONEncoder) return self.g nipype-0.9.2/nipype/utils/setup.py000066400000000000000000000007711227300005300172010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from os.path import join from os import getcwd def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('utils', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/utils/spm_docs.py000066400000000000000000000033121227300005300176420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Grab documentation from spm.""" import os from nipype.interfaces import matlab def grab_doc(task_name): """Grab the SPM documentation for the given SPM task named `task_name` Parameters ---------- task_name : string Task name for which we are grabbing documentation. Example task names are ``Realign: Estimate & Reslice``, ``Normalise: Estimate & Write``. See Also -------- spm_flat_config.m : This function can print out all the possible task names. """ cmd = matlab.MatlabCommandLine() # We need to tell Matlab where to find our spm_get_doc.m file. cwd = os.path.dirname(__file__) # Build matlab command mcmd = "addpath('%s');spm_get_doc('%s')" % (cwd, task_name) cmd.inputs.script_lines = mcmd # Run the command and get the documentation out of the result. out = cmd.run() return _strip_header(out.runtime.stdout) def _strip_header(doc): """Strip Matlab header and splash info off doc. Searches for the tag 'NIPYPE' in the doc and returns everyting after that. """ hdr = 'NIPYPE' cruft = '\x1b' # There's some weird cruft at the end of the # docstring, almost looks like the hex for the # escape character 0x1b. try: index = doc.index(hdr) index += len(hdr) index += 1 doc = doc[index:] try: index = doc.index(cruft) except ValueError: index = len(doc) return doc[:index] except KeyError: raise IOError('This docstring was not generated by Nipype!\n') nipype-0.9.2/nipype/utils/spm_flat_config.m000066400000000000000000000021011227300005300207640ustar00rootroot00000000000000function cfgstruct = spm_flat_config(print_names) % Get a flat spm_config structure, with option to print out names % % This calls spm_config() to get the the nested configuration % structure from spm. We use this to fetch documentation, the % flattened structure is much easier to search through. If % print_names is true (value of 1) it will print out the configuration % names. If print_names is false (value of 0), it will only return % the flattened structure. if strcmp(spm('ver'),'SPM5') cfg = spm_config(); else cfgstruct = []; return; end cfgstruct = spm_cfg_list(cfg, {}); if print_names [rows, cols] = size(cfgstruct); for i = 1:cols fprintf(1, '%d : %s\n', i, cfgstruct{i}.name) end end end function objlist = spm_cfg_list(astruct, objlist) % Flatten the nested structure in 'astruct'. % Returns a cell array. % Usage: objlist = spm_cfg_list(astruct, {}) if isfield(astruct, 'values') [rows, cols] = size(astruct.values); for i = 1:cols objlist = spm_cfg_list(astruct.values{i}, objlist); end else objlist = {objlist{:} astruct}; end endnipype-0.9.2/nipype/utils/spm_get_doc.m000066400000000000000000000011641227300005300201250ustar00rootroot00000000000000function doc = spm_get_doc(docname) % Get the documentation from SPM for the functionality named % docname. % % This will search through the spm_config() object and grab the % documentation whose name matches docname. cfgstruct = spm_flat_config(0); [rows, cols] = size(cfgstruct); docstruct.help={'None'}; % Loop over cell array and search for the docname for i = 1:cols if strcmp(cfgstruct{i}.name, docname) docstruct = cfgstruct{i}; break end end % Add a tag so we can strip off the Matlab header information and % only print out the SPM documentation. tag = 'NIPYPE\n'; doc = strcat(tag, docstruct.help{:}); end nipype-0.9.2/nipype/utils/tests/000077500000000000000000000000001227300005300166245ustar00rootroot00000000000000nipype-0.9.2/nipype/utils/tests/__init__.py000066400000000000000000000022521227300005300207360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Please write tests for all code submitted to the repository. The code will be used by many people, and will in due course be used in live analyses, so we need to make sure that we have the best possible defenses against bugs. It also helps us think about code interfaces, and gives examples of code use that can be useful for others using the code. Python's unit testing framework (the U{unittest} module) is used to implement project tests. We use the convention that each package contains a subpackage called tests which contains modules defining test cases (subclasses of U{unittest.TestCase}) for that package. The nipy.utils.tests package contains an example test case called L{test_template.TemplateTest} to get you started writing your tests. Please try to include working test cases for all functions and classes that you contribute. Often, writing tests for your code before the code is written helps to frame your thoughts about what the code should look like. """ nipype-0.9.2/nipype/utils/tests/test_docparse.py000066400000000000000000000026611227300005300220420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from nipype.testing import * from nipype.utils.docparse import reverse_opt_map, build_doc, insert_doc class Foo(object): opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'} foo_doc = """Usage: foo infile outfile [opts] Bunch of options: -o something about an outline -f intensity of fun factor Other stuff: -v verbose """ fmtd_doc = """Parameters ---------- outline : something about an outline fun : intensity of fun factor Others Parameters ----------------- -v verbose""" def test_rev_opt_map(): map = {'-f': 'fun', '-o': 'outline'} rev_map = reverse_opt_map(Foo.opt_map) assert_equal(rev_map, map) def test_build_doc(): opts = reverse_opt_map(Foo.opt_map) doc = build_doc(foo_doc, opts) assert_equal(doc, fmtd_doc) inserted_doc = """Parameters ---------- infile : str The name of the input file outfile : str The name of the output file outline : something about an outline fun : intensity of fun factor Others Parameters ----------------- -v verbose""" def test_insert_doc(): new_items = ['infile : str', ' The name of the input file'] new_items.extend(['outfile : str', ' The name of the output file']) newdoc = insert_doc(fmtd_doc, new_items) assert_equal(newdoc, inserted_doc) nipype-0.9.2/nipype/utils/tests/test_filemanip.py000066400000000000000000000157121227300005300222070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from tempfile import mkstemp, mkdtemp from nipype.testing import assert_equal, assert_true, assert_false from nipype.utils.filemanip import (save_json, load_json, loadflat, fname_presuffix, fnames_presuffix, hash_rename, check_forhash, copyfile, copyfiles, filename_to_list, list_to_filename, split_filename, get_related_files) import numpy as np def test_split_filename(): res = split_filename('foo.nii') yield assert_equal, res, ('', 'foo', '.nii') res = split_filename('foo.nii.gz') yield assert_equal, res, ('', 'foo', '.nii.gz') res = split_filename('/usr/local/foo.nii.gz') yield assert_equal, res, ('/usr/local', 'foo', '.nii.gz') res = split_filename('../usr/local/foo.nii') yield assert_equal, res, ('../usr/local', 'foo', '.nii') res = split_filename('/usr/local/foo.a.b.c.d') yield assert_equal, res, ('/usr/local', 'foo.a.b.c', '.d') def test_fname_presuffix(): fname = 'foo.nii' pth = fname_presuffix(fname, 'pre_', '_post', '/tmp') yield assert_equal, pth, '/tmp/pre_foo_post.nii' fname += '.gz' pth = fname_presuffix(fname, 'pre_', '_post', '/tmp') yield assert_equal, pth, '/tmp/pre_foo_post.nii.gz' pth = fname_presuffix(fname, 'pre_', '_post', '/tmp', use_ext=False) yield assert_equal, pth, '/tmp/pre_foo_post' def test_fnames_presuffix(): fnames = ['foo.nii', 'bar.nii'] pths = fnames_presuffix(fnames, 'pre_', '_post', '/tmp') yield assert_equal, pths, ['/tmp/pre_foo_post.nii', '/tmp/pre_bar_post.nii'] def test_hash_rename(): new_name = hash_rename('foobar.nii', 'abc123') yield assert_equal, new_name, 'foobar_0xabc123.nii' new_name = hash_rename('foobar.nii.gz', 'abc123') yield assert_equal, new_name, 'foobar_0xabc123.nii.gz' def test_check_forhash(): fname = 'foobar' orig_hash = '_0x4323dbcefdc51906decd8edcb3327943' hashed_name = ''.join((fname, orig_hash, '.nii')) result, hash = check_forhash(hashed_name) yield assert_true, result yield assert_equal, hash, [orig_hash] result, hash = check_forhash('foobar.nii') yield assert_false, result yield assert_equal, hash, None def _temp_analyze_files(): """Generate temporary analyze file pair.""" fd, orig_img = mkstemp(suffix = '.img') orig_hdr = orig_img[:-4] + '.hdr' fp = file(orig_hdr, 'w+') fp.close() return orig_img, orig_hdr def test_copyfile(): orig_img, orig_hdr = _temp_analyze_files() pth, fname = os.path.split(orig_img) new_img = os.path.join(pth, 'newfile.img') new_hdr = os.path.join(pth, 'newfile.hdr') copyfile(orig_img, new_img) yield assert_true, os.path.exists(new_img) yield assert_true, os.path.exists(new_hdr) os.unlink(new_img) os.unlink(new_hdr) # final cleanup os.unlink(orig_img) os.unlink(orig_hdr) def test_copyfile_true(): orig_img, orig_hdr = _temp_analyze_files() pth, fname = os.path.split(orig_img) new_img = os.path.join(pth, 'newfile.img') new_hdr = os.path.join(pth, 'newfile.hdr') # Test with copy=True copyfile(orig_img, new_img, copy=True) yield assert_true, os.path.exists(new_img) yield assert_true, os.path.exists(new_hdr) os.unlink(new_img) os.unlink(new_hdr) # final cleanup os.unlink(orig_img) os.unlink(orig_hdr) def test_copyfiles(): orig_img1, orig_hdr1 = _temp_analyze_files() orig_img2, orig_hdr2 = _temp_analyze_files() pth, fname = os.path.split(orig_img1) new_img1 = os.path.join(pth, 'newfile.img') new_hdr1 = os.path.join(pth, 'newfile.hdr') pth, fname = os.path.split(orig_img2) new_img2 = os.path.join(pth, 'secondfile.img') new_hdr2 = os.path.join(pth, 'secondfile.hdr') newfiles = copyfiles([orig_img1, orig_img2], [new_img1, new_img2]) yield assert_true, os.path.exists(new_img1) yield assert_true, os.path.exists(new_hdr1) yield assert_true, os.path.exists(new_img2) yield assert_true, os.path.exists(new_hdr2) # cleanup os.unlink(orig_img1) os.unlink(orig_hdr1) os.unlink(orig_img2) os.unlink(orig_hdr2) os.unlink(new_img1) os.unlink(new_hdr1) os.unlink(new_img2) os.unlink(new_hdr2) def test_filename_to_list(): x = filename_to_list('foo.nii') yield assert_equal, x, ['foo.nii'] x = filename_to_list(['foo.nii']) yield assert_equal, x, ['foo.nii'] x = filename_to_list(('foo', 'bar')) yield assert_equal, x, ['foo', 'bar'] x = filename_to_list(12.34) yield assert_equal, x, None def test_list_to_filename(): x = list_to_filename(['foo.nii']) yield assert_equal, x, 'foo.nii' x = list_to_filename(['foo', 'bar']) yield assert_equal, x, ['foo', 'bar'] def test_json(): # Simple roundtrip test of json files, just a sanity check. adict = dict(a='one', c='three', b='two') fd, name = mkstemp(suffix='.json') save_json(name, adict) # save_json closes the file new_dict = load_json(name) os.unlink(name) yield assert_equal, sorted(adict.items()), sorted(new_dict.items()) def test_loadflat(): alist = [dict(a='one', c='three', b='two'), dict(a='one', c='three', b='two')] fd, name = mkstemp(suffix='.npz') np.savez(name,a=alist) aloaded = loadflat(name)['a'] os.unlink(name) yield assert_equal, len(aloaded), 2 yield assert_equal, sorted(aloaded[0].items()), sorted(alist[0].items()) adict = dict(a='one', c='three', b='two') fd, name = mkstemp(suffix='.npz') np.savez(name,a=adict) aloaded = loadflat(name)['a'] os.unlink(name) yield assert_true, isinstance(aloaded, dict) yield assert_equal, sorted(aloaded.items()), sorted(adict.items()) def test_related_files(): file1 = '/path/test.img' file2 = '/path/test.hdr' file3 = '/path/test.BRIK' file4 = '/path/test.HEAD' file5 = '/path/foo.nii' spm_files1 = get_related_files(file1) spm_files2 = get_related_files(file2) afni_files1 = get_related_files(file3) afni_files2 = get_related_files(file4) yield assert_equal, len(spm_files1), 3 yield assert_equal, len(spm_files2), 3 yield assert_equal, len(afni_files1), 2 yield assert_equal, len(afni_files2), 2 yield assert_equal, len(get_related_files(file5)), 1 yield assert_true, '/path/test.hdr' in spm_files1 yield assert_true, '/path/test.img' in spm_files1 yield assert_true, '/path/test.mat' in spm_files1 yield assert_true, '/path/test.hdr' in spm_files2 yield assert_true, '/path/test.img' in spm_files2 yield assert_true, '/path/test.mat' in spm_files2 yield assert_true, '/path/test.BRIK' in afni_files1 yield assert_true, '/path/test.HEAD' in afni_files1 yield assert_true, '/path/test.BRIK' in afni_files2 yield assert_true, '/path/test.HEAD' in afni_files2 nipype-0.9.2/nipype/utils/tests/test_misc.py000066400000000000000000000031701227300005300211710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from nipype.testing import assert_equal, assert_true, assert_false from nipype.utils.misc import (container_to_string, getsource, create_function_from_source, str2bool) def test_cont_to_str(): # list x = ['a', 'b'] yield assert_equal, container_to_string(x), 'a b' # tuple x = tuple(x) yield assert_equal, container_to_string(x), 'a b' # set x = set(x) y = container_to_string(x) yield assert_true, (y == 'a b') or (y == 'b a') # dict x = dict(a='a', b='b') y = container_to_string(x) yield assert_true, (y == 'a b') or (y == 'b a') # string yield assert_equal, container_to_string('foobar'), 'foobar' # int. Integers are not the main intent of this function, but see # no reason why they shouldn't work. yield assert_equal, container_to_string(123), '123' def _func1(x): return x**3 def test_func_to_str(): def func1(x): return x**2 # Should be ok with both functions! for f in _func1, func1: f_src = getsource(f) f_recreated = create_function_from_source(f_src) yield assert_equal, f(2.3), f_recreated(2.3) def test_str2bool(): yield assert_true, str2bool("yes") yield assert_true, str2bool("true") yield assert_true, str2bool("t") yield assert_true, str2bool("1") yield assert_false, str2bool("no") yield assert_false, str2bool("false") yield assert_false, str2bool("n") yield assert_false, str2bool("f") yield assert_false, str2bool("0") nipype-0.9.2/nipype/utils/tmpdirs.py000066400000000000000000000023561227300005300175240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import shutil from tempfile import template, mkdtemp class TemporaryDirectory(object): """Create and return a temporary directory. This has the same behavior as mkdtemp but can be used as a context manager. For example: with TemporaryDirectory() as tmpdir: ... Upon exiting the context, the directory and everthing contained in it are removed. """ def __init__(self, suffix="", prefix=template, dir=None): self.name = mkdtemp(suffix, prefix, dir) self._closed = False def __enter__(self): return self.name def cleanup(self): if not self._closed: shutil.rmtree(self.name) self._closed = True def __exit__(self, exc, value, tb): self.cleanup() return False class InTemporaryDirectory(TemporaryDirectory): def __enter__(self): self._pwd = os.getcwd() os.chdir(self.name) return super(InTemporaryDirectory, self).__enter__() def __exit__(self, exc, value, tb): os.chdir(self._pwd) return super(InTemporaryDirectory, self).__exit__(exc, value, tb) nipype-0.9.2/nipype/workflows/000077500000000000000000000000001227300005300163575ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/__init__.py000066400000000000000000000001621227300005300204670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: nipype-0.9.2/nipype/workflows/dmri/000077500000000000000000000000001227300005300173125ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/dmri/__init__.py000066400000000000000000000000331227300005300214170ustar00rootroot00000000000000import camino, mrtrix, fsl nipype-0.9.2/nipype/workflows/dmri/camino/000077500000000000000000000000001227300005300205605ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/dmri/camino/__init__.py000066400000000000000000000002621227300005300226710ustar00rootroot00000000000000from diffusion import create_camino_dti_pipeline from connectivity_mapping import create_connectivity_pipeline from group_connectivity import create_group_connectivity_pipeline nipype-0.9.2/nipype/workflows/dmri/camino/connectivity_mapping.py000066400000000000000000000607641227300005300254000ustar00rootroot00000000000000import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.camino as camino import nipype.interfaces.fsl as fsl import nipype.interfaces.camino2trackvis as cam2trk import nipype.interfaces.freesurfer as fs # freesurfer import nipype.interfaces.cmtk as cmtk import nipype.algorithms.misc as misc import inspect import os.path as op from ...misc.utils import (get_affine, get_data_dims, get_vox_dims, select_aparc, select_aparc_annot) def create_connectivity_pipeline(name="connectivity"): """Creates a pipeline that does the same connectivity processing as in the :ref:`example_dmri_connectivity` example script. Given a subject id (and completed Freesurfer reconstruction) diffusion-weighted image, b-values, and b-vectors, the workflow will return the subject's connectome as a Connectome File Format (CFF) file for use in Connectome Viewer (http://www.cmtk.org). Example ------- >>> from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline >>> conmapper = create_connectivity_pipeline("nipype_conmap") >>> conmapper.inputs.inputnode.subjects_dir = '.' >>> conmapper.inputs.inputnode.subject_id = 'subj1' >>> conmapper.inputs.inputnode.dwi = 'data.nii.gz' >>> conmapper.inputs.inputnode.bvecs = 'bvecs' >>> conmapper.inputs.inputnode.bvals = 'bvals' >>> conmapper.run() # doctest: +SKIP Inputs:: inputnode.subject_id inputnode.subjects_dir inputnode.dwi inputnode.bvecs inputnode.bvals inputnode.resolution_network_file Outputs:: outputnode.connectome outputnode.cmatrix outputnode.gpickled_network outputnode.fa outputnode.struct outputnode.trace outputnode.tracts outputnode.tensors """ inputnode_within = pe.Node(interface=util.IdentityInterface(fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir", "resolution_network_file", ]), name="inputnode_within") FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') FreeSurferSourceLH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceLH') FreeSurferSourceLH.inputs.hemi = 'lh' FreeSurferSourceRH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceRH') FreeSurferSourceRH.inputs.hemi = 'rh' """ Since the b values and b vectors come from the FSL course, we must convert it to a scheme file for use in Camino. """ fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") fsl2scheme.inputs.usegradmod = True """ FSL's Brain Extraction tool is used to create a mask from the b0 image """ b0Strip = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0') """ FSL's FLIRT function is used to coregister the b0 mask and the structural image. A convert_xfm node is then used to obtain the inverse of the transformation matrix. FLIRT is used once again to apply the inverse transformation to the parcellated brain image. """ coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister') coregister.inputs.cost = ('normmi') convertxfm = pe.Node(interface=fsl.ConvertXFM(), name = 'convertxfm') convertxfm.inputs.invert_xfm = True inverse = pe.Node(interface=fsl.FLIRT(), name = 'inverse') inverse.inputs.interp = ('nearestneighbour') inverse_AparcAseg = pe.Node(interface=fsl.FLIRT(), name = 'inverse_AparcAseg') inverse_AparcAseg.inputs.interp = ('nearestneighbour') """ A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. Nodes are used to convert the following: * Original structural image to NIFTI * Parcellated white matter image to NIFTI * Parcellated whole-brain image to NIFTI * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI """ mri_convert_Brain = pe.Node(interface=fs.MRIConvert(), name='mri_convert_Brain') mri_convert_Brain.inputs.out_type = 'nii' mri_convert_AparcAseg = mri_convert_Brain.clone('mri_convert_AparcAseg') mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') mris_convertLH.inputs.out_datatype = 'gii' mris_convertRH = mris_convertLH.clone('mris_convertRH') mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') """ In this section we create the nodes necessary for diffusion analysis. First, the diffusion image is converted to voxel order, since this is the format in which Camino does its processing. """ image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") """ Second, diffusion tensors are fit to the voxel-order data. If desired, these tensors can be converted to a Nifti tensor image using the DT2NIfTI interface. """ dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit') """ Next, a lookup table is generated from the schemefile and the signal-to-noise ratio (SNR) of the unweighted (q=0) data. """ dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") dtlutgen.inputs.snr = 16.0 dtlutgen.inputs.inversion = 1 """ In this tutorial we implement probabilistic tractography using the PICo algorithm. PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel; this probabilitiy distribution map is produced using the following node. """ picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") picopdfs.inputs.inputmodel = 'dt' """ Finally, tractography is performed. In this tutorial, we will use only one iteration for time-saving purposes. It is important to note that we use the TrackPICo interface here. This interface now expects the files required for PICo tracking (i.e. the output from picopdfs). Similar interfaces exist for alternative types of tracking, such as Bayesian tracking with Dirac priors (TrackBayesDirac). """ track = pe.Node(interface=camino.TrackPICo(), name="track") track.inputs.iterations = 1 """ Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. """ camino2trackvis = pe.Node(interface=cam2trk.Camino2Trackvis(), name="camino2trackvis") camino2trackvis.inputs.min_length = 30 camino2trackvis.inputs.voxel_order = 'LAS' trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino") """ Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes. """ vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines") procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines") """ We can easily produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers, and then merge them back into a single .nii file. """ fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa') trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace') dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') analyzeheader_fa = pe.Node(interface=camino.AnalyzeHeader(),name='analyzeheader_fa') analyzeheader_fa.inputs.datatype = 'double' analyzeheader_trace = pe.Node(interface=camino.AnalyzeHeader(),name='analyzeheader_trace') analyzeheader_trace.inputs.datatype = 'double' fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii') trace2nii = fa2nii.clone("trace2nii") """ This section adds the Connectome Mapping Toolkit (CMTK) nodes. These interfaces are fairly experimental and may not function properly. In order to perform connectivity mapping using CMTK, the parcellated structural data is rewritten using the indices and parcellation scheme from the connectome mapper (CMP). This process has been written into the ROIGen interface, which will output a remapped aparc+aseg image as well as a dictionary of label information (i.e. name, display colours) pertaining to the original and remapped regions. These label values are input from a user-input lookup table, if specified, and otherwise the default Freesurfer LUT (/freesurfer/FreeSurferColorLUT.txt). """ roigen = pe.Node(interface=cmtk.ROIGen(), name="ROIGen") roigen_structspace = roigen.clone("ROIGen_structspace") """ The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts and outputs a number of different files. The most important of which is the connectivity network itself, which is stored as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the specific tracts that connect between user-selected regions. """ createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes") creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") creatematrix.inputs.count_region_intersections = True """ Here we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. """ CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") gpickledNetworks = pe.Node(interface=util.Merge(1), name="NetworkFiles") """ Since we have now created all our nodes, we can define our workflow and start making connections. """ mapping = pe.Workflow(name='mapping') """ First, we connect the input node to the early conversion functions. FreeSurfer input nodes: """ mapping.connect([(inputnode_within, FreeSurferSource,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode_within, FreeSurferSource,[("subject_id","subject_id")])]) mapping.connect([(inputnode_within, FreeSurferSourceLH,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode_within, FreeSurferSourceLH,[("subject_id","subject_id")])]) mapping.connect([(inputnode_within, FreeSurferSourceRH,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode_within, FreeSurferSourceRH,[("subject_id","subject_id")])]) """ Required conversions for processing in Camino: """ mapping.connect([(inputnode_within, image2voxel, [("dwi", "in_file")]), (inputnode_within, fsl2scheme, [("bvecs", "bvec_file"), ("bvals", "bval_file")]), (image2voxel, dtifit,[['voxel_order','in_file']]), (fsl2scheme, dtifit,[['scheme','scheme_file']]) ]) """ Nifti conversions for the subject's stripped brain image from Freesurfer: """ mapping.connect([(FreeSurferSource, mri_convert_Brain,[('brain','in_file')])]) """ Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) """ mapping.connect([(FreeSurferSourceLH, mris_convertLH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere,[('sphere','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere,[('sphere','in_file')])]) """ The annotation files are converted using the pial surface as a map via the MRIsConvert interface. One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files specifically (rather than i.e. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. """ mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) """ This section coregisters the diffusion-weighted and parcellated white-matter / whole brain images. At present the conmap node connection is left commented, as there have been recent changes in Camino code that have presented some users with errors. """ mapping.connect([(inputnode_within, b0Strip,[('dwi','in_file')])]) mapping.connect([(inputnode_within, b0Strip,[('dwi','t2_guided')])]) # Added to improve damaged brain extraction mapping.connect([(b0Strip, coregister,[('out_file','in_file')])]) mapping.connect([(mri_convert_Brain, coregister,[('out_file','reference')])]) mapping.connect([(coregister, convertxfm,[('out_matrix_file','in_file')])]) mapping.connect([(b0Strip, inverse,[('out_file','reference')])]) mapping.connect([(convertxfm, inverse,[('out_file','in_matrix_file')])]) mapping.connect([(mri_convert_Brain, inverse,[('out_file','in_file')])]) """ The tractography pipeline consists of the following nodes. Further information about the tractography can be found in nipype/examples/dmri_camino_dti.py. """ mapping.connect([(b0Strip, track,[("mask_file","seed_file")])]) mapping.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])]) mapping.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])]) mapping.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])]) mapping.connect([(picopdfs, track,[("pdfs","in_file")])]) """ Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the tensor fitting. This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable. """ mapping.connect([(dtifit, fa,[("tensor_fitted","in_file")])]) mapping.connect([(fa, analyzeheader_fa,[("fa","in_file")])]) mapping.connect([(inputnode_within, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) mapping.connect([(fa, fa2nii,[('fa','data_file')])]) mapping.connect([(inputnode_within, fa2nii,[(('dwi', get_affine), 'affine')])]) mapping.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])]) mapping.connect([(dtifit, trace,[("tensor_fitted","in_file")])]) mapping.connect([(trace, analyzeheader_trace,[("trace","in_file")])]) mapping.connect([(inputnode_within, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) mapping.connect([(trace, trace2nii,[('trace','data_file')])]) mapping.connect([(inputnode_within, trace2nii,[(('dwi', get_affine), 'affine')])]) mapping.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])]) mapping.connect([(dtifit, dteig,[("tensor_fitted","in_file")])]) """ The output tracts are converted to Trackvis format (and back). Here we also use the voxel- and data-grabbing functions defined at the beginning of the pipeline. """ mapping.connect([(track, camino2trackvis, [('tracked','in_file')]), (track, vtkstreamlines,[['tracked','in_file']]), (camino2trackvis, trk2camino,[['trackvis','in_file']]) ]) mapping.connect([(inputnode_within, camino2trackvis,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) """ Here the CMTK connectivity mapping nodes are connected. The original aparc+aseg image is converted to NIFTI, then registered to the diffusion image and delivered to the ROIGen node. The remapped parcellation, original tracts, and label file are then given to CreateMatrix. """ mapping.connect(inputnode_within, 'resolution_network_file', createnodes, 'resolution_network_file') mapping.connect(createnodes, 'node_network', creatematrix, 'resolution_network_file') mapping.connect([(FreeSurferSource, mri_convert_AparcAseg, [(('aparc_aseg', select_aparc), 'in_file')])]) mapping.connect([(b0Strip, inverse_AparcAseg,[('out_file','reference')])]) mapping.connect([(convertxfm, inverse_AparcAseg,[('out_file','in_matrix_file')])]) mapping.connect([(mri_convert_AparcAseg, inverse_AparcAseg,[('out_file','in_file')])]) mapping.connect([(mri_convert_AparcAseg, roigen_structspace,[('out_file','aparc_aseg_file')])]) mapping.connect([(roigen_structspace, createnodes,[("roi_file","roi_file")])]) mapping.connect([(inverse_AparcAseg, roigen,[("out_file","aparc_aseg_file")])]) mapping.connect([(roigen, creatematrix,[("roi_file","roi_file")])]) mapping.connect([(camino2trackvis, creatematrix,[("trackvis","tract_file")])]) mapping.connect([(inputnode_within, creatematrix,[("subject_id","out_matrix_file")])]) mapping.connect([(inputnode_within, creatematrix,[("subject_id","out_matrix_mat_file")])]) """ The merge nodes defined earlier are used here to create lists of the files which are destined for the CFFConverter. """ mapping.connect([(mris_convertLH, giftiSurfaces,[("converted","in1")])]) mapping.connect([(mris_convertRH, giftiSurfaces,[("converted","in2")])]) mapping.connect([(mris_convertLHwhite, giftiSurfaces,[("converted","in3")])]) mapping.connect([(mris_convertRHwhite, giftiSurfaces,[("converted","in4")])]) mapping.connect([(mris_convertLHinflated, giftiSurfaces,[("converted","in5")])]) mapping.connect([(mris_convertRHinflated, giftiSurfaces,[("converted","in6")])]) mapping.connect([(mris_convertLHsphere, giftiSurfaces,[("converted","in7")])]) mapping.connect([(mris_convertRHsphere, giftiSurfaces,[("converted","in8")])]) mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])]) mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])]) mapping.connect([(roigen, niftiVolumes,[("roi_file","in1")])]) mapping.connect([(inputnode_within, niftiVolumes,[("dwi","in2")])]) mapping.connect([(mri_convert_Brain, niftiVolumes,[("out_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file","in1")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file_mm","in2")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_length_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_label_file","in4")])]) """ This block actually connects the merged lists to the CFF converter. We pass the surfaces and volumes that are to be included, as well as the tracts and the network itself. The currently running pipeline (dmri_connectivity.py) is also scraped and included in the CFF file. This makes it easy for the user to examine the entire processing pathway used to generate the end product. """ CFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe())) mapping.connect([(giftiSurfaces, CFFConverter,[("out","gifti_surfaces")])]) mapping.connect([(giftiLabels, CFFConverter,[("out","gifti_labels")])]) mapping.connect([(creatematrix, CFFConverter,[("matrix_files","gpickled_networks")])]) mapping.connect([(niftiVolumes, CFFConverter,[("out","nifti_volumes")])]) mapping.connect([(fiberDataArrays, CFFConverter,[("out","data_files")])]) mapping.connect([(camino2trackvis, CFFConverter,[("trackvis","tract_files")])]) mapping.connect([(inputnode_within, CFFConverter,[("subject_id","title")])]) """ Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding their names to the subject list and their data to the proper folders. """ inputnode = pe.Node(interface=util.IdentityInterface(fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir", "resolution_network_file"]), name="inputnode") outputnode = pe.Node(interface = util.IdentityInterface(fields=["fa", "struct", "trace", "tracts", "connectome", "cmatrix", "networks", "rois", "mean_fiber_length", "fiber_length_std", "tensors"]), name="outputnode") connectivity = pe.Workflow(name="connectivity") connectivity.base_output_dir=name connectivity.connect([(inputnode, mapping, [("dwi", "inputnode_within.dwi"), ("bvals", "inputnode_within.bvals"), ("bvecs", "inputnode_within.bvecs"), ("subject_id", "inputnode_within.subject_id"), ("subjects_dir", "inputnode_within.subjects_dir"), ("resolution_network_file", "inputnode_within.resolution_network_file")]) ]) connectivity.connect([(mapping, outputnode, [("camino2trackvis.trackvis", "tracts"), ("CFFConverter.connectome_file", "connectome"), ("CreateMatrix.matrix_mat_file", "cmatrix"), ("CreateMatrix.mean_fiber_length_matrix_mat_file", "mean_fiber_length"), ("CreateMatrix.fiber_length_std_matrix_mat_file", "fiber_length_std"), ("fa2nii.nifti_file", "fa"), ("CreateMatrix.matrix_files", "networks"), ("ROIGen.roi_file", "rois"), ("mri_convert_Brain.out_file", "struct"), ("trace2nii.nifti_file", "trace"), ("dtifit.tensor_fitted", "tensors")]) ]) return connectivity nipype-0.9.2/nipype/workflows/dmri/camino/diffusion.py000066400000000000000000000243531227300005300231270ustar00rootroot00000000000000import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.camino as camino import nipype.interfaces.fsl as fsl import nipype.interfaces.camino2trackvis as cam2trk import nipype.algorithms.misc as misc from ...misc.utils import get_affine, get_data_dims, get_vox_dims def create_camino_dti_pipeline(name="dtiproc"): """Creates a pipeline that does the same diffusion processing as in the :doc:`../../users/examples/dmri_camino_dti` example script. Given a diffusion-weighted image, b-values, and b-vectors, the workflow will return the tractography computed from diffusion tensors and from PICo probabilistic tractography. Example ------- >>> import os >>> nipype_camino_dti = create_camino_dti_pipeline("nipype_camino_dti") >>> nipype_camino_dti.inputs.inputnode.dwi = os.path.abspath('dwi.nii') >>> nipype_camino_dti.inputs.inputnode.bvecs = os.path.abspath('bvecs') >>> nipype_camino_dti.inputs.inputnode.bvals = os.path.abspath('bvals') >>> nipype_camino_dti.run() # doctest: +SKIP Inputs:: inputnode.dwi inputnode.bvecs inputnode.bvals Outputs:: outputnode.fa outputnode.trace outputnode.tracts_pico outputnode.tracts_dt outputnode.tensors """ inputnode1 = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode1") """ Setup for Diffusion Tensor Computation -------------------------------------- In this section we create the nodes necessary for diffusion analysis. First, the diffusion image is converted to voxel order. """ image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") fsl2scheme.inputs.usegradmod = True """ Second, diffusion tensors are fit to the voxel-order data. """ dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit') """ Next, a lookup table is generated from the schemefile and the signal-to-noise ratio (SNR) of the unweighted (q=0) data. """ dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") dtlutgen.inputs.snr = 16.0 dtlutgen.inputs.inversion = 1 """ In this tutorial we implement probabilistic tractography using the PICo algorithm. PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel; this is produced using the following node. """ picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") picopdfs.inputs.inputmodel = 'dt' """ An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography. """ bet = pe.Node(interface=fsl.BET(), name="bet") bet.inputs.mask = True """ Finally, tractography is performed. First DT streamline tractography. """ trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt") """ Now camino's Probablistic Index of connectivity algorithm. In this tutorial, we will use only 1 iteration for time-saving purposes. """ trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico") trackpico.inputs.iterations = 1 """ Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. """ cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt") cam2trk_dt.inputs.min_length = 30 cam2trk_dt.inputs.voxel_order = 'LAS' cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico") cam2trk_pico.inputs.min_length = 30 cam2trk_pico.inputs.voxel_order = 'LAS' """ Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes. """ #vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines") #procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines") #procstreamlines.inputs.outputtracts = 'oogl' """ We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers. """ fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa') #md = pe.Node(interface=camino.MD(),name='md') trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace') dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') analyzeheader_fa = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_fa") analyzeheader_fa.inputs.datatype = "double" analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace') #analyzeheader_md = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_md") #analyzeheader_md.inputs.datatype = "double" #analyzeheader_trace = analyzeheader_md.clone('analyzeheader_trace') fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii') trace2nii = fa2nii.clone("trace2nii") """ Since we have now created all our nodes, we can now define our workflow and start making connections. """ tractography = pe.Workflow(name='tractography') tractography.connect([(inputnode1, bet,[("dwi","in_file")])]) """ File format conversion """ tractography.connect([(inputnode1, image2voxel, [("dwi", "in_file")]), (inputnode1, fsl2scheme, [("bvecs", "bvec_file"), ("bvals", "bval_file")]) ]) """ Tensor fitting """ tractography.connect([(image2voxel, dtifit,[['voxel_order','in_file']]), (fsl2scheme, dtifit,[['scheme','scheme_file']]) ]) """ Workflow for applying DT streamline tractogpahy """ tractography.connect([(bet, trackdt,[("mask_file","seed_file")])]) tractography.connect([(dtifit, trackdt,[("tensor_fitted","in_file")])]) """ Workflow for applying PICo """ tractography.connect([(bet, trackpico,[("mask_file","seed_file")])]) tractography.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])]) tractography.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])]) tractography.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])]) tractography.connect([(picopdfs, trackpico,[("pdfs","in_file")])]) # Mean diffusivity still appears broken #tractography.connect([(dtifit, md,[("tensor_fitted","in_file")])]) #tractography.connect([(md, analyzeheader_md,[("md","in_file")])]) #tractography.connect([(inputnode, analyzeheader_md,[(('dwi', get_vox_dims), 'voxel_dims'), #(('dwi', get_data_dims), 'data_dims')])]) #This line is commented out because the ProcStreamlines node keeps throwing memory errors #tractography.connect([(track, procstreamlines,[("tracked","in_file")])]) """ Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the tensor fitting. This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable. """ tractography.connect([(dtifit, fa,[("tensor_fitted","in_file")])]) tractography.connect([(fa, analyzeheader_fa,[("fa","in_file")])]) tractography.connect([(inputnode1, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) tractography.connect([(fa, fa2nii,[('fa','data_file')])]) tractography.connect([(inputnode1, fa2nii,[(('dwi', get_affine), 'affine')])]) tractography.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])]) tractography.connect([(dtifit, trace,[("tensor_fitted","in_file")])]) tractography.connect([(trace, analyzeheader_trace,[("trace","in_file")])]) tractography.connect([(inputnode1, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) tractography.connect([(trace, trace2nii,[('trace','data_file')])]) tractography.connect([(inputnode1, trace2nii,[(('dwi', get_affine), 'affine')])]) tractography.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])]) tractography.connect([(dtifit, dteig,[("tensor_fitted","in_file")])]) tractography.connect([(trackpico, cam2trk_pico, [('tracked','in_file')])]) tractography.connect([(trackdt, cam2trk_dt, [('tracked','in_file')])]) tractography.connect([(inputnode1, cam2trk_pico,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) tractography.connect([(inputnode1, cam2trk_dt,[(('dwi', get_vox_dims), 'voxel_dims'), (('dwi', get_data_dims), 'data_dims')])]) inputnode= pe.Node(interface = util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode") outputnode = pe.Node(interface = util.IdentityInterface(fields=["fa", "trace", "tracts_pico", "tracts_dt", "tensors"]), name="outputnode") workflow = pe.Workflow(name=name) workflow.base_output_dir=name workflow.connect([(inputnode, tractography, [("dwi", "inputnode1.dwi"), ("bvals", "inputnode1.bvals"), ("bvecs", "inputnode1.bvecs")])]) workflow.connect([(tractography, outputnode, [("cam2trk_dt.trackvis", "tracts_dt"), ("cam2trk_pico.trackvis", "tracts_pico"), ("fa2nii.nifti_file", "fa"), ("trace2nii.nifti_file", "trace"), ("dtifit.tensor_fitted", "tensors")]) ]) return workflow nipype-0.9.2/nipype/workflows/dmri/camino/group_connectivity.py000066400000000000000000000122541227300005300250700ustar00rootroot00000000000000import os.path as op # system functions import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine from .connectivity_mapping import create_connectivity_pipeline def create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args_dict=0): """Creates a pipeline that performs basic Camino structural connectivity processing on groups of subjects. Given a diffusion-weighted image, and text files containing the associated b-values and b-vectors, the workflow will return each subjects' connectomes in a Connectome File Format (CFF) file, for use in Connectome Viewer (http://www.cmtk.org). Example ------- >>> import nipype.interfaces.freesurfer as fs >>> import nipype.workflows.dmri.camino.group_connectivity as groupwork >>> subjects_dir = '.' >>> data_dir = '.' >>> output_dir = '.' >>> fs.FSCommand.set_default_subjects_dir(subjects_dir) >>> group_list = {} >>> group_list['group1'] = ['subj1', 'subj2'] >>> group_list['group2'] = ['subj3', 'subj4'] >>> template_args = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) >>> group_id = 'group1' >>> l1pipeline = groupwork.create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args) >>> l1pipeline.run() # doctest: +SKIP Inputs:: group_list: Dictionary of subject lists, keyed by group name group_id: String containing the group name data_dir: Path to the data directory subjects_dir: Path to the Freesurfer 'subjects' directory output_dir: Path for the output files template_args_dict: Dictionary of template arguments for the connectivity pipeline datasource e.g. info = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) """ group_infosource = pe.Node(interface=util.IdentityInterface(fields=['group_id']), name="group_infosource") group_infosource.inputs.group_id = group_id subject_list = group_list[group_id] subj_infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="subj_infosource") subj_infosource.iterables = ('subject_id', subject_list) if template_args_dict == 0: info = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) else: info = template_args_dict datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" datasource.inputs.base_directory = data_dir datasource.inputs.field_template = dict(dwi='%s/%s.nii') datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Create a connectivity mapping workflow """ conmapper = create_connectivity_pipeline("nipype_conmap") conmapper.inputs.inputnode.subjects_dir = subjects_dir conmapper.base_dir = op.abspath('conmapper') datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = output_dir datasink.inputs.container = group_id l1pipeline = pe.Workflow(name="l1pipeline_"+group_id) l1pipeline.base_dir = output_dir l1pipeline.base_output_dir = group_id l1pipeline.connect([(subj_infosource, datasource,[('subject_id', 'subject_id')])]) l1pipeline.connect([(subj_infosource, conmapper,[('subject_id', 'inputnode.subject_id')])]) l1pipeline.connect([(datasource, conmapper, [("dwi", "inputnode.dwi"), ("bvals", "inputnode.bvals"), ("bvecs", "inputnode.bvecs"), ])]) l1pipeline.connect([(conmapper, datasink, [("outputnode.connectome", "@l1output.cff"), ("outputnode.fa", "@l1output.fa"), ("outputnode.tracts", "@l1output.tracts"), ("outputnode.trace", "@l1output.trace"), ("outputnode.cmatrix", "@l1output.cmatrix"), ("outputnode.rois", "@l1output.rois"), ("outputnode.struct", "@l1output.struct"), ("outputnode.networks", "@l1output.networks"), ("outputnode.mean_fiber_length", "@l1output.mean_fiber_length"), ("outputnode.fiber_length_std", "@l1output.fiber_length_std"), ])]) l1pipeline.connect([(group_infosource, datasink,[('group_id','@group_id')])]) return l1pipeline nipype-0.9.2/nipype/workflows/dmri/camino/setup.py000066400000000000000000000006511227300005300222740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('camino', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/dmri/connectivity/000077500000000000000000000000001227300005300220305ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/dmri/connectivity/__init__.py000066400000000000000000000006771227300005300241530ustar00rootroot00000000000000from nx import (create_networkx_pipeline, create_cmats_to_csv_pipeline) from group_connectivity import (create_merge_networks_by_group_workflow, create_merge_network_results_by_group_workflow, create_merge_group_networks_workflow, create_merge_group_network_results_workflow, create_average_networks_by_group_workflow) nipype-0.9.2/nipype/workflows/dmri/connectivity/group_connectivity.py000066400000000000000000000565431227300005300263510ustar00rootroot00000000000000import os.path as op import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.interfaces.cmtk as cmtk import nipype.algorithms.misc as misc import nipype.pipeline.engine as pe # pypeline engine from nipype.interfaces.utility import Function from nipype.utils.misc import package_check have_cmp = True try: package_check('cmp') except Exception, e: have_cmp = False else: import cmp def pullnodeIDs(in_network, name_key='dn_name'): """ This function will return the values contained, for each node in a network, given an input key. By default it will return the node names """ import networkx as nx import numpy as np from nipype.interfaces.base import isdefined if not isdefined(in_network): raise ValueError return None try: ntwk = nx.read_graphml(in_network) except: ntwk = nx.read_gpickle(in_network) nodedata = ntwk.node ids = [] integer_nodelist = [] for node in nodedata.keys(): integer_nodelist.append(int(node)) for node in np.sort(integer_nodelist): try: nodeid = nodedata[node][name_key] except KeyError: nodeid = nodedata[str(node)][name_key] ids.append(nodeid) return ids def concatcsv(in_files): """ This function will contatenate two "comma-separated value" text files, but remove the first row (usually column headers) from all but the first file. """ import os.path as op from nipype.utils.filemanip import split_filename if not isinstance(in_files, list): return in_files if isinstance(in_files[0], list): in_files = in_files[0] first = open(in_files[0], 'r') path, name, ext = split_filename(in_files[0]) out_name = op.abspath('concat.csv') out_file = open(out_name, 'w') out_file.write(first.readline()) first.close() for in_file in in_files: file_to_read = open(in_file, 'r') scrap_first_line = file_to_read.readline() for line in file_to_read: out_file.write(line) return out_name def create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir): """Creates a second-level pipeline to merge the Connectome File Format (CFF) outputs from the group-level MRtrix structural connectivity processing pipeline into a single CFF file for each group. Example ------- >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork >>> from nipype.testing import example_data >>> subjects_dir = '.' >>> data_dir = '.' >>> output_dir = '.' >>> group_list = {} >>> group_list['group1'] = ['subj1', 'subj2'] >>> group_list['group2'] = ['subj3', 'subj4'] >>> group_id = 'group1' >>> l2pipeline = groupwork.create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) >>> l2pipeline.run() # doctest: +SKIP Inputs:: group_list: Dictionary of subject lists, keyed by group name group_id: String containing the group name data_dir: Path to the data directory subjects_dir: Path to the Freesurfer 'subjects' directory output_dir: Path for the output files """ group_infosource = pe.Node(interface=util.IdentityInterface( fields=['group_id']), name="group_infosource") group_infosource.inputs.group_id = group_id l2infosource = pe.Node(interface=util.IdentityInterface( fields=['group_id']), name='l2infosource') l2source = pe.Node(nio.DataGrabber( infields=['group_id'], outfields=['CFFfiles']), name='l2source') l2source.inputs.template_args = dict(CFFfiles=[['group_id']]) l2source.inputs.template = op.join(output_dir, '%s/cff/*/connectome.cff') l2source.inputs.base_directory = data_dir l2source.inputs.sort_filelist = True l2inputnode = pe.Node(interface=util.IdentityInterface( fields=['CFFfiles']), name='l2inputnode') MergeCNetworks = pe.Node( interface=cmtk.MergeCNetworks(), name="MergeCNetworks") l2datasink = pe.Node(interface=nio.DataSink(), name="l2datasink") l2datasink.inputs.base_directory = output_dir l2datasink.inputs.container = group_id l2pipeline = pe.Workflow(name="l2output_" + group_id) l2pipeline.base_dir = op.join(output_dir, 'l2output') l2pipeline.connect( [(group_infosource, l2infosource, [('group_id', 'group_id')])]) l2pipeline.connect([ (l2infosource, l2source, [('group_id', 'group_id')]), (l2source, l2inputnode, [('CFFfiles', 'CFFfiles')]), ]) l2pipeline.connect( [(l2inputnode, MergeCNetworks, [('CFFfiles', 'in_files')])]) l2pipeline.connect( [(group_infosource, MergeCNetworks, [('group_id', 'out_file')])]) l2pipeline.connect( [(MergeCNetworks, l2datasink, [('connectome_file', '@l2output')])]) l2pipeline.connect( [(group_infosource, l2datasink, [('group_id', '@group_id')])]) return l2pipeline def create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir): """Creates a second-level pipeline to merge the Connectome File Format (CFF) outputs from the group-level MRtrix structural connectivity processing pipeline into a single CFF file for each group. Example ------- >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork >>> from nipype.testing import example_data >>> subjects_dir = '.' >>> data_dir = '.' >>> output_dir = '.' >>> group_list = {} >>> group_list['group1'] = ['subj1', 'subj2'] >>> group_list['group2'] = ['subj3', 'subj4'] >>> group_id = 'group1' >>> l2pipeline = groupwork.create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) >>> l2pipeline.run() # doctest: +SKIP Inputs:: group_list: Dictionary of subject lists, keyed by group name group_id: String containing the group name data_dir: Path to the data directory subjects_dir: Path to the Freesurfer 'subjects' directory output_dir: Path for the output files """ group_infosource = pe.Node(interface=util.IdentityInterface( fields=['group_id']), name="group_infosource") group_infosource.inputs.group_id = group_id l2infosource = pe.Node(interface=util.IdentityInterface(fields=['group_id', 'merged', ]), name='l2infosource') l2source = pe.Node( nio.DataGrabber( infields=['group_id'], outfields=['CFFfiles', 'CSVmatrices', 'CSVfibers', 'CSVnodal', 'CSVglobal']), name='l2source') l2source.inputs.template_args = dict( CFFfiles=[['group_id']], CSVmatrices=[['group_id']], CSVnodal=[['group_id']], CSVglobal=[['group_id']], CSVfibers=[['group_id']]) l2source.inputs.base_directory = data_dir l2source.inputs.template = '%s/%s' l2source.inputs.field_template = dict( CFFfiles=op.join(output_dir, '%s/cff/*/connectome.cff'), CSVmatrices=op.join(output_dir, '%s/cmatrices_csv/*/*.csv'), CSVnodal=op.join(output_dir, '%s/nxcsv/*/*nodal*.csv'), CSVglobal=op.join(output_dir, '%s/nxcsv/*/*global*.csv'), CSVfibers=op.join(output_dir, '%s/fiber_csv/*/*fibers*.csv')) l2source.inputs.sort_filelist = True l2inputnode = pe.Node(interface=util.IdentityInterface(fields=['CFFfiles', 'CSVfibers', 'CSVmatrices', 'CSVnodal', 'CSVglobal', 'network_file']), name='l2inputnode') MergeCNetworks = pe.Node( interface=cmtk.MergeCNetworks(), name="MergeCNetworks") l2datasink = pe.Node(interface=nio.DataSink(), name="l2datasink") l2datasink.inputs.base_directory = output_dir l2datasink.inputs.container = group_id l2pipeline = pe.Workflow(name="l2output_" + group_id) l2pipeline.base_dir = op.join(output_dir, 'l2output') l2pipeline.connect( [(group_infosource, l2infosource, [('group_id', 'group_id')])]) l2pipeline.connect([ (l2infosource, l2source, [('group_id', 'group_id')]), (l2source, l2inputnode, [('CFFfiles', 'CFFfiles')]), (l2source, l2inputnode, [( 'CSVmatrices', 'CSVmatrices')]), (l2source, l2inputnode, [('CSVnodal', 'CSVnodal')]), (l2source, l2inputnode, [('CSVglobal', 'CSVglobal')]), (l2source, l2inputnode, [('CSVfibers', 'CSVfibers')]), ]) l2pipeline.connect( [(l2inputnode, MergeCNetworks, [('CFFfiles', 'in_files')])]) l2pipeline.connect( [(group_infosource, MergeCNetworks, [('group_id', 'out_file')])]) l2pipeline.connect( [(MergeCNetworks, l2datasink, [('connectome_file', '@l2output')])]) AddCSVColumn_node = pe.Node( interface=misc.AddCSVColumn(), name="AddCSVColumn_node") AddCSVColumn_node.inputs.extra_column_heading = 'group' AddCSVColumn_global = AddCSVColumn_node.clone(name="AddCSVColumn_global") AddCSVColumn_matrices = AddCSVColumn_node.clone( name="AddCSVColumn_matrices") AddCSVColumn_fibers = AddCSVColumn_node.clone(name="AddCSVColumn_fibers") concat_csv_interface = Function( input_names=["in_files"], output_names=["out_name"], function=concatcsv) concat_node_csvs = pe.Node( interface=concat_csv_interface, name='concat_node_csvs') concat_global_csvs = pe.Node( interface=concat_csv_interface, name='concat_global_csvs') concat_matrix_csvs = pe.Node( interface=concat_csv_interface, name='concat_matrix_csvs') concat_fiber_csvs = pe.Node( interface=concat_csv_interface, name='concat_fiber_csvs') l2pipeline.connect( [(l2inputnode, concat_node_csvs, [('CSVnodal', 'in_files')])]) l2pipeline.connect( [(concat_node_csvs, AddCSVColumn_node, [('out_name', 'in_file')])]) l2pipeline.connect([( group_infosource, AddCSVColumn_node, [('group_id', 'extra_field')])]) l2pipeline.connect([( AddCSVColumn_node, l2datasink, [('csv_file', '@l2output.node_csv')])]) l2pipeline.connect( [(group_infosource, l2datasink, [('group_id', '@group_id')])]) l2pipeline.connect( [(l2inputnode, concat_global_csvs, [('CSVglobal', 'in_files')])]) l2pipeline.connect([( concat_global_csvs, AddCSVColumn_global, [('out_name', 'in_file')])]) l2pipeline.connect([(group_infosource, AddCSVColumn_global, [( 'group_id', 'extra_field')])]) l2pipeline.connect([(AddCSVColumn_global, l2datasink, [('csv_file', '@l2output.global_csv')])]) l2pipeline.connect( [(l2inputnode, concat_matrix_csvs, [('CSVmatrices', 'in_files')])]) l2pipeline.connect([(concat_matrix_csvs, AddCSVColumn_matrices, [( 'out_name', 'in_file')])]) l2pipeline.connect([(group_infosource, AddCSVColumn_matrices, [( 'group_id', 'extra_field')])]) l2pipeline.connect([(AddCSVColumn_matrices, l2datasink, [( 'csv_file', '@l2output.cmatrices_csv')])]) l2pipeline.connect( [(l2inputnode, concat_fiber_csvs, [('CSVmatrices', 'in_files')])]) l2pipeline.connect( [(concat_fiber_csvs, AddCSVColumn_fibers, [('out_name', 'in_file')])]) l2pipeline.connect([(group_infosource, AddCSVColumn_fibers, [( 'group_id', 'extra_field')])]) l2pipeline.connect([(AddCSVColumn_fibers, l2datasink, [('csv_file', '@l2output.fibers_csv')])]) return l2pipeline def create_merge_group_networks_workflow(group_list, data_dir, subjects_dir, output_dir, title='group'): """Creates a third-level pipeline to merge the Connectome File Format (CFF) outputs from each group and combines them into a single CFF file for each group. Example ------- >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork >>> from nipype.testing import example_data >>> subjects_dir = '.' >>> data_dir = '.' >>> output_dir = '.' >>> group_list = {} >>> group_list['group1'] = ['subj1', 'subj2'] >>> group_list['group2'] = ['subj3', 'subj4'] >>> l3pipeline = groupwork.create_merge_group_networks_workflow(group_list, data_dir, subjects_dir, output_dir) >>> l3pipeline.run() # doctest: +SKIP Inputs:: group_list: Dictionary of subject lists, keyed by group name data_dir: Path to the data directory subjects_dir: Path to the Freesurfer 'subjects' directory output_dir: Path for the output files title: String to use as a title for the output merged CFF file (default 'group') """ l3infosource = pe.Node(interface=util.IdentityInterface( fields=['group_id']), name='l3infosource') l3infosource.inputs.group_id = group_list.keys() l3source = pe.Node(nio.DataGrabber( infields=['group_id'], outfields=['CFFfiles']), name='l3source') l3source.inputs.template_args = dict(CFFfiles=[['group_id', 'group_id']]) l3source.inputs.template = op.join(output_dir, '%s/%s.cff') l3source.inputs.sort_filelist = True l3inputnode = pe.Node(interface=util.IdentityInterface( fields=['Group_CFFs']), name='l3inputnode') MergeCNetworks_grp = pe.Node( interface=cmtk.MergeCNetworks(), name="MergeCNetworks_grp") MergeCNetworks_grp.inputs.out_file = title l3datasink = pe.Node(interface=nio.DataSink(), name="l3datasink") l3datasink.inputs.base_directory = output_dir l3pipeline = pe.Workflow(name="l3output") l3pipeline.base_dir = output_dir l3pipeline.connect([ (l3infosource, l3source, [('group_id', 'group_id')]), (l3source, l3inputnode, [('CFFfiles', 'Group_CFFs')]), ]) l3pipeline.connect( [(l3inputnode, MergeCNetworks_grp, [('Group_CFFs', 'in_files')])]) l3pipeline.connect([( MergeCNetworks_grp, l3datasink, [('connectome_file', '@l3output')])]) return l3pipeline def create_merge_group_network_results_workflow(group_list, data_dir, subjects_dir, output_dir, title='group'): """Creates a third-level pipeline to merge the Connectome File Format (CFF) outputs from each group and combines them into a single CFF file for each group. This version of the third-level pipeline also concatenates the comma-separated value files for the NetworkX metrics and the connectivity matrices into single files. Example ------- >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork >>> from nipype.testing import example_data >>> subjects_dir = '.' >>> data_dir = '.' >>> output_dir = '.' >>> group_list = {} >>> group_list['group1'] = ['subj1', 'subj2'] >>> group_list['group2'] = ['subj3', 'subj4'] >>> l3pipeline = groupwork.create_merge_group_network_results_workflow(group_list, data_dir, subjects_dir, output_dir) >>> l3pipeline.run() # doctest: +SKIP Inputs:: group_list: Dictionary of subject lists, keyed by group name data_dir: Path to the data directory subjects_dir: Path to the Freesurfer 'subjects' directory output_dir: Path for the output files title: String to use as a title for the output merged CFF file (default 'group') """ l3infosource = pe.Node(interface=util.IdentityInterface( fields=['group_id']), name='l3infosource') l3infosource.inputs.group_id = group_list.keys() l3source = pe.Node(nio.DataGrabber(infields=['group_id'], outfields=['CFFfiles', 'CSVnodemetrics', 'CSVglobalmetrics', 'CSVmatrices']), name='l3source') l3source.inputs.template_args = dict(CFFfiles=[['group_id']], CSVnodemetrics=[['group_id']], CSVglobalmetrics=[['group_id']], CSVmatrices=[['group_id']]) l3source.inputs.template = op.join(output_dir, '%s/%s') l3source.inputs.sort_filelist = True l3source.inputs.field_template = dict( CFFfiles=op.join(output_dir, '%s/*.cff'), CSVnodemetrics=op.join(output_dir, '%s/node_csv/*.csv'), CSVglobalmetrics=op.join(output_dir, '%s/global_csv/*.csv'), CSVmatrices=op.join(output_dir, '%s/cmatrices_csv/*/*.csv')) l3inputnode = pe.Node(interface=util.IdentityInterface(fields=['Group_CFFs', 'Group_CSVnodemetrics', 'Group_CSVglobalmetrics', 'Group_CSVmatrices']), name='l3inputnode') MergeCNetworks_grp = pe.Node(interface=cmtk.MergeCNetworks(), name="MergeCNetworks_grp") MergeCNetworks_grp.inputs.out_file = title l3datasink = pe.Node(interface=nio.DataSink(), name="l3datasink") l3datasink.inputs.base_directory = output_dir l3pipeline = pe.Workflow(name="l3output") l3pipeline.base_dir = output_dir l3pipeline.connect([ (l3infosource, l3source, [('group_id', 'group_id')]), (l3source, l3inputnode, [('CFFfiles', 'Group_CFFs')]), (l3source, l3inputnode, [('CSVnodemetrics', 'Group_CSVnodemetrics')]), (l3source, l3inputnode, [('CSVglobalmetrics', 'Group_CSVglobalmetrics')]), (l3source, l3inputnode, [('CSVmatrices', 'Group_CSVmatrices')]), ]) l3pipeline.connect([(l3inputnode, MergeCNetworks_grp, [('Group_CFFs', 'in_files')])]) l3pipeline.connect([(MergeCNetworks_grp, l3datasink, [('connectome_file', '@l3output')])]) concat_csv_interface = Function(input_names=["in_files"], output_names=["out_name"], function=concatcsv) concat_node_csvs = pe.Node(interface=concat_csv_interface, name='concat_node_csvs') concat_global_csvs = pe.Node(interface=concat_csv_interface, name='concat_global_csvs') concat_matrix_csvs = pe.Node(interface=concat_csv_interface, name='concat_matrix_csvs') l3pipeline.connect([(l3inputnode, concat_node_csvs, [('Group_CSVnodemetrics', 'in_files')])]) l3pipeline.connect([(concat_node_csvs, l3datasink, [('out_name', '@l3output.nodal_csv')])]) l3pipeline.connect([(l3inputnode, concat_global_csvs, [('Group_CSVglobalmetrics', 'in_files')])]) l3pipeline.connect([(concat_global_csvs, l3datasink, [('out_name', '@l3output.global_csv')])]) l3pipeline.connect([(l3inputnode, concat_matrix_csvs, [('Group_CSVmatrices', 'in_files')])]) l3pipeline.connect([(concat_matrix_csvs, l3datasink, [('out_name', '@l3output.csvmatrices')])]) return l3pipeline def create_average_networks_by_group_workflow(group_list, data_dir, subjects_dir, output_dir, title='group_average'): """Creates a fourth-level pipeline to average the networks for two groups and merge them into a single CFF file. This pipeline will also output the average networks in .gexf format, for visualization in other graph viewers, such as Gephi. Example ------- >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork >>> from nipype.testing import example_data >>> subjects_dir = '.' >>> data_dir = '.' >>> output_dir = '.' >>> group_list = {} >>> group_list['group1'] = ['subj1', 'subj2'] >>> group_list['group2'] = ['subj3', 'subj4'] >>> l4pipeline = groupwork.create_average_networks_by_group_workflow(group_list, data_dir, subjects_dir, output_dir) >>> l4pipeline.run() # doctest: +SKIP Inputs:: group_list: Dictionary of subject lists, keyed by group name data_dir: Path to the data directory subjects_dir: Path to the Freesurfer 'subjects' directory output_dir: Path for the output files title: String to use as a title for the output merged CFF file (default 'group') """ l4infosource = pe.Node(interface=util.IdentityInterface(fields=['group_id1', 'group_id2']), name='l4infosource') try: l4infosource.inputs.group_id1 = group_list.keys()[0] l4infosource.inputs.group_id2 = group_list.keys()[1] except IndexError: print 'The create_average_networks_by_group_workflow requires 2 groups' raise Exception l4info = dict(networks=[['group_id', '']], CMatrices=[['group_id', '']], fibmean=[['group_id', 'mean_fiber_length']], fibdev=[['group_id', 'fiber_length_std']]) l4source_grp1 = pe.Node(nio.DataGrabber(infields=['group_id'], outfields=l4info.keys()), name='l4source_grp1') l4source_grp1.inputs.template = '%s/%s' l4source_grp1.inputs.field_template = dict(networks=op.join(output_dir, '%s/networks/*/*%s*intersections*.pck'), CMatrices=op.join(output_dir, '%s/cmatrix/*/*%s*.mat'), fibmean=op.join(output_dir, '%s/mean_fiber_length/*/*%s*.mat'), fibdev=op.join(output_dir, '%s/fiber_length_std/*/*%s*.mat')) l4source_grp1.inputs.base_directory = output_dir l4source_grp1.inputs.template_args = l4info l4source_grp1.inputs.sort_filelist = True l4source_grp2 = l4source_grp1.clone(name='l4source_grp2') l4inputnode = pe.Node(interface=util.IdentityInterface(fields=['networks_grp1', 'networks_grp2', 'CMatrices_grp1', 'CMatrices_grp2', 'fibmean_grp1', 'fibmean_grp2', 'fibdev_grp1', 'fibdev_grp2']), name='l4inputnode') average_networks_grp1 = pe.Node(interface=cmtk.AverageNetworks(), name='average_networks_grp1') average_networks_grp2 = average_networks_grp1.clone('average_networks_grp2') averagecff = pe.Node(interface=cmtk.CFFConverter(), name="averagecff") averagecff.inputs.out_file = title merge_gpickled_averages = pe.Node(interface=util.Merge(2), name='merge_gpickled_averages') merge_gexf_averages = merge_gpickled_averages.clone('merge_gexf_averages') l4datasink = pe.Node(interface=nio.DataSink(), name="l4datasink") l4datasink.inputs.base_directory = output_dir l4pipeline = pe.Workflow(name="l4output") l4pipeline.base_dir = output_dir l4pipeline.connect([ (l4infosource, l4source_grp1, [('group_id1', 'group_id')]), (l4infosource, l4source_grp2, [('group_id2', 'group_id')]), (l4source_grp1, l4inputnode, [('CMatrices', 'CMatrices_grp1')]), (l4source_grp2, l4inputnode, [('CMatrices', 'CMatrices_grp2')]), (l4source_grp1, l4inputnode, [('networks', 'networks_grp1')]), (l4source_grp2, l4inputnode, [('networks', 'networks_grp2')]), (l4source_grp1, l4inputnode, [('fibmean', 'fibmean_grp1')]), (l4source_grp2, l4inputnode, [('fibmean', 'fibmean_grp2')]), (l4source_grp1, l4inputnode, [('fibdev', 'fibdev_grp1')]), (l4source_grp2, l4inputnode, [('fibdev', 'fibdev_grp2')]), ]) l4pipeline.connect([(l4inputnode, average_networks_grp1, [('networks_grp1', 'in_files')])]) l4pipeline.connect([(l4infosource, average_networks_grp1, [('group_id1', 'group_id')])]) l4pipeline.connect([(l4inputnode, average_networks_grp2, [('networks_grp2', 'in_files')])]) l4pipeline.connect([(l4infosource, average_networks_grp2, [('group_id2', 'group_id')])]) l4pipeline.connect([(average_networks_grp1, merge_gpickled_averages, [('gpickled_groupavg', 'in1')])]) l4pipeline.connect([(average_networks_grp2, merge_gpickled_averages, [('gpickled_groupavg', 'in2')])]) l4pipeline.connect([(average_networks_grp1, merge_gexf_averages, [('gexf_groupavg', 'in1')])]) l4pipeline.connect([(average_networks_grp2, merge_gexf_averages, [('gexf_groupavg', 'in2')])]) l4pipeline.connect([(merge_gpickled_averages, l4datasink, [('out', '@l4output.gpickled')])]) l4pipeline.connect([(merge_gpickled_averages, averagecff, [('out', 'gpickled_networks')])]) l4pipeline.connect([(averagecff, l4datasink, [('connectome_file', '@l4output.averagecff')])]) l4pipeline.connect([(merge_gexf_averages, l4datasink, [('out', '@l4output.gexf')])]) return l4pipeline nipype-0.9.2/nipype/workflows/dmri/connectivity/nx.py000066400000000000000000000143461227300005300230370ustar00rootroot00000000000000import nipype.pipeline.engine as pe import nipype.interfaces.utility as util import nipype.interfaces.cmtk as cmtk import nipype.algorithms.misc as misc from .group_connectivity import pullnodeIDs from nipype.algorithms.misc import remove_identical_paths def add_global_to_filename(in_file): from nipype.utils.filemanip import split_filename path, name, ext = split_filename(in_file) return name + '_global' + ext def add_nodal_to_filename(in_file): from nipype.utils.filemanip import split_filename path, name, ext = split_filename(in_file) return name + '_nodal' + ext def create_networkx_pipeline(name="networkx", extra_column_heading="subject"): """Creates a workflow to calculate various graph measures (via NetworkX) on an input network. The output measures are then converted to comma-separated value text files, and an extra column / field is also added. Typically, the user would connect the subject name to this field. Example ------- >>> from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline >>> nx = create_networkx_pipeline("networkx", "subject_id") >>> nx.inputs.inputnode.extra_field = 'subj1' >>> nx.inputs.inputnode.network_file = 'subj1.pck' >>> nx.run() # doctest: +SKIP Inputs:: inputnode.extra_field inputnode.network_file Outputs:: outputnode.network_files outputnode.csv_files outputnode.matlab_files """ inputnode = pe.Node(interface = util.IdentityInterface(fields=["extra_field", "network_file"]), name="inputnode") pipeline = pe.Workflow(name=name) ntwkMetrics = pe.Node(interface=cmtk.NetworkXMetrics(), name="NetworkXMetrics") Matlab2CSV_node = pe.Node(interface=misc.Matlab2CSV(), name="Matlab2CSV_node") MergeCSVFiles_node = pe.Node(interface=misc.MergeCSVFiles(), name="MergeCSVFiles_node") MergeCSVFiles_node.inputs.extra_column_heading = extra_column_heading Matlab2CSV_global = Matlab2CSV_node.clone(name="Matlab2CSV_global") MergeCSVFiles_global = MergeCSVFiles_node.clone(name="MergeCSVFiles_global") MergeCSVFiles_global.inputs.extra_column_heading = extra_column_heading mergeNetworks = pe.Node(interface=util.Merge(2), name="mergeNetworks") mergeCSVs = mergeNetworks.clone("mergeCSVs") pipeline.connect([(inputnode, ntwkMetrics,[("network_file","in_file")])]) pipeline.connect([(ntwkMetrics, Matlab2CSV_node,[("node_measures_matlab","in_file")])]) pipeline.connect([(ntwkMetrics, Matlab2CSV_global,[("global_measures_matlab","in_file")])]) pipeline.connect([(Matlab2CSV_node, MergeCSVFiles_node,[("csv_files","in_files")])]) pipeline.connect([(inputnode, MergeCSVFiles_node, [(("extra_field", add_nodal_to_filename), "out_file")])]) pipeline.connect([(inputnode, MergeCSVFiles_node,[("extra_field","extra_field")])]) pipeline.connect([(inputnode, MergeCSVFiles_node, [(("network_file", pullnodeIDs), "row_headings")])]) pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global,[("csv_files","in_files")])]) pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global, [(("csv_files", remove_identical_paths), "column_headings")])]) #MergeCSVFiles_global.inputs.row_heading_title = 'metric' #MergeCSVFiles_global.inputs.column_headings = ['average'] pipeline.connect([(inputnode, MergeCSVFiles_global, [(("extra_field", add_global_to_filename), "out_file")])]) pipeline.connect([(inputnode, MergeCSVFiles_global,[("extra_field","extra_field")])]) pipeline.connect([(inputnode, mergeNetworks,[("network_file","in1")])]) pipeline.connect([(ntwkMetrics, mergeNetworks,[("gpickled_network_files","in2")])]) outputnode = pe.Node(interface = util.IdentityInterface(fields=["network_files", "csv_files", "matlab_files", "node_csv", "global_csv"]), name="outputnode") pipeline.connect([(MergeCSVFiles_node, outputnode, [("csv_file", "node_csv")])]) pipeline.connect([(MergeCSVFiles_global, outputnode, [("csv_file", "global_csv")])]) pipeline.connect([(MergeCSVFiles_node, mergeCSVs, [("csv_file", "in1")])]) pipeline.connect([(MergeCSVFiles_global, mergeCSVs, [("csv_file", "in2")])]) pipeline.connect([(mergeNetworks, outputnode, [("out", "network_files")])]) pipeline.connect([(mergeCSVs, outputnode, [("out", "csv_files")])]) pipeline.connect([(ntwkMetrics, outputnode,[("matlab_matrix_files", "matlab_files")])]) return pipeline def create_cmats_to_csv_pipeline(name="cmats_to_csv", extra_column_heading="subject"): """Creates a workflow to convert the outputs from CreateMatrix into a single comma-separated value text file. An extra column / field is also added to the text file. Typically, the user would connect the subject name to this field. Example ------- >>> from nipype.workflows.dmri.connectivity.nx import create_cmats_to_csv_pipeline >>> csv = create_cmats_to_csv_pipeline("cmats_to_csv", "subject_id") >>> csv.inputs.inputnode.extra_field = 'subj1' >>> csv.inputs.inputnode.matlab_matrix_files = ['subj1_cmatrix.mat', 'subj1_mean_fiber_length.mat', 'subj1_median_fiber_length.mat', 'subj1_fiber_length_std.mat'] >>> csv.run() # doctest: +SKIP Inputs:: inputnode.extra_field inputnode.matlab_matrix_files Outputs:: outputnode.csv_file """ inputnode = pe.Node(interface = util.IdentityInterface(fields=["extra_field", "matlab_matrix_files"]), name="inputnode") pipeline = pe.Workflow(name=name) Matlab2CSV = pe.MapNode(interface=misc.Matlab2CSV(), name="Matlab2CSV", iterfield=["in_file"]) MergeCSVFiles = pe.Node(interface=misc.MergeCSVFiles(), name="MergeCSVFiles") MergeCSVFiles.inputs.extra_column_heading = extra_column_heading pipeline.connect([(inputnode, Matlab2CSV,[("matlab_matrix_files","in_file")])]) pipeline.connect([(Matlab2CSV, MergeCSVFiles,[("csv_files","in_files")])]) pipeline.connect([(inputnode, MergeCSVFiles,[("extra_field","extra_field")])]) outputnode = pe.Node(interface = util.IdentityInterface(fields=["csv_file"]), name="outputnode") pipeline.connect([(MergeCSVFiles, outputnode, [("csv_file", "csv_file")])]) return pipeline nipype-0.9.2/nipype/workflows/dmri/fsl/000077500000000000000000000000001227300005300200765ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/dmri/fsl/__init__.py000066400000000000000000000006151227300005300222110ustar00rootroot00000000000000from dti import create_bedpostx_pipeline from epi import (fieldmap_correction, topup_correction, create_eddy_correct_pipeline, create_epidewarp_pipeline, create_dmri_preprocessing) from tbss import (create_tbss_1_preproc, create_tbss_2_reg, create_tbss_3_postreg, create_tbss_4_prestats, create_tbss_all, create_tbss_non_FA) nipype-0.9.2/nipype/workflows/dmri/fsl/dti.py000066400000000000000000000173651227300005300212440ustar00rootroot00000000000000# coding: utf-8 import nipype.pipeline.engine as pe import nipype.interfaces.utility as util import nipype.interfaces.fsl as fsl import os #backwards compatibility from epi import create_eddy_correct_pipeline def transpose(samples_over_fibres): import numpy as np a = np.array(samples_over_fibres) if len(a.shape) == 1: a = a.reshape(-1, 1) return a.T.tolist() def create_bedpostx_pipeline(name="bedpostx"): """Creates a pipeline that does the same as bedpostx script from FSL - calculates diffusion model parameters (distributions not MLE) voxelwise for the whole volume (by splitting it slicewise). Example ------- >>> nipype_bedpostx = create_bedpostx_pipeline("nipype_bedpostx") >>> nipype_bedpostx.inputs.inputnode.dwi = 'diffusion.nii' >>> nipype_bedpostx.inputs.inputnode.mask = 'mask.nii' >>> nipype_bedpostx.inputs.inputnode.bvecs = 'bvecs' >>> nipype_bedpostx.inputs.inputnode.bvals = 'bvals' >>> nipype_bedpostx.inputs.xfibres.n_fibres = 2 >>> nipype_bedpostx.inputs.xfibres.fudge = 1 >>> nipype_bedpostx.inputs.xfibres.burn_in = 1000 >>> nipype_bedpostx.inputs.xfibres.n_jumps = 1250 >>> nipype_bedpostx.inputs.xfibres.sample_every = 25 >>> nipype_bedpostx.run() # doctest: +SKIP Inputs:: inputnode.dwi inputnode.mask Outputs:: outputnode.thsamples outputnode.phsamples outputnode.fsamples outputnode.mean_thsamples outputnode.mean_phsamples outputnode.mean_fsamples outputnode.dyads outputnode.dyads_dispersion """ inputnode = pe.Node( interface=util.IdentityInterface(fields=["dwi", "mask"]), name="inputnode") mask_dwi = pe.Node(interface=fsl.ImageMaths(op_string="-mas"), name="mask_dwi") slice_dwi = pe.Node(interface=fsl.Split(dimension="z"), name="slice_dwi") slice_mask = pe.Node(interface=fsl.Split(dimension="z"), name="slice_mask") preproc = pe.Workflow(name="preproc") preproc.connect([(inputnode, mask_dwi, [('dwi', 'in_file')]), (inputnode, mask_dwi, [('mask', 'in_file2')]), (mask_dwi, slice_dwi, [('out_file', 'in_file')]), (inputnode, slice_mask, [('mask', 'in_file')]) ]) xfibres = pe.MapNode(interface=fsl.XFibres(), name="xfibres", iterfield=['dwi', 'mask']) # Normal set of parameters xfibres.inputs.n_fibres = 2 xfibres.inputs.fudge = 1 xfibres.inputs.burn_in = 1000 xfibres.inputs.n_jumps = 1250 xfibres.inputs.sample_every = 25 xfibres.inputs.model = 1 xfibres.inputs.non_linear = True xfibres.inputs.update_proposal_every = 24 inputnode = pe.Node(interface=util.IdentityInterface(fields=["thsamples", "phsamples", "fsamples", "dyads", "mean_dsamples", "mask"]), name="inputnode") merge_thsamples = pe.MapNode(fsl.Merge(dimension="z"), name="merge_thsamples", iterfield=['in_files']) merge_phsamples = pe.MapNode(fsl.Merge(dimension="z"), name="merge_phsamples", iterfield=['in_files']) merge_fsamples = pe.MapNode(fsl.Merge(dimension="z"), name="merge_fsamples", iterfield=['in_files']) merge_mean_dsamples = pe.Node(fsl.Merge(dimension="z"), name="merge_mean_dsamples") mean_thsamples = pe.MapNode(fsl.ImageMaths(op_string="-Tmean"), name="mean_thsamples", iterfield=['in_file']) mean_phsamples = pe.MapNode(fsl.ImageMaths(op_string="-Tmean"), name="mean_phsamples", iterfield=['in_file']) mean_fsamples = pe.MapNode(fsl.ImageMaths(op_string="-Tmean"), name="mean_fsamples", iterfield=['in_file']) make_dyads = pe.MapNode(fsl.MakeDyadicVectors(), name="make_dyads", iterfield=['theta_vol', 'phi_vol']) postproc = pe.Workflow(name="postproc") postproc.connect( [(inputnode, merge_thsamples, [(('thsamples', transpose), 'in_files')]), (inputnode, merge_phsamples, [(( 'phsamples', transpose), 'in_files')]), (inputnode, merge_fsamples, [(( 'fsamples', transpose), 'in_files')]), (inputnode, merge_mean_dsamples, [ ('mean_dsamples', 'in_files')]), (merge_thsamples, mean_thsamples, [ ('merged_file', 'in_file')]), (merge_phsamples, mean_phsamples, [ ('merged_file', 'in_file')]), (merge_fsamples, mean_fsamples, [ ('merged_file', 'in_file')]), (merge_thsamples, make_dyads, [ ('merged_file', 'theta_vol')]), (merge_phsamples, make_dyads, [ ('merged_file', 'phi_vol')]), (inputnode, make_dyads, [('mask', 'mask')]), ]) inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "mask", "bvecs", "bvals"]), name="inputnode") bedpostx = pe.Workflow(name=name) bedpostx.connect([(inputnode, preproc, [('mask', 'inputnode.mask')]), (inputnode, preproc, [('dwi', 'inputnode.dwi')]), (preproc, xfibres, [('slice_dwi.out_files', 'dwi'), ('slice_mask.out_files', 'mask')]), (inputnode, xfibres, [('bvals', 'bvals')]), (inputnode, xfibres, [('bvecs', 'bvecs')]), (inputnode, postproc, [('mask', 'inputnode.mask')]), (xfibres, postproc, [ ('thsamples', 'inputnode.thsamples'), ('phsamples', 'inputnode.phsamples'), ('fsamples', 'inputnode.fsamples'), ('dyads', 'inputnode.dyads'), ('mean_dsamples', 'inputnode.mean_dsamples')]), ]) outputnode = pe.Node( interface=util.IdentityInterface(fields=["thsamples", "phsamples", "fsamples", "mean_thsamples", "mean_phsamples", "mean_fsamples", "dyads", "dyads_dispersion"]), name="outputnode") bedpostx.connect( [(postproc, outputnode, [("merge_thsamples.merged_file", "thsamples"), ("merge_phsamples.merged_file", "phsamples"), ("merge_fsamples.merged_file", "fsamples"), ("mean_thsamples.out_file", "mean_thsamples"), ("mean_phsamples.out_file", "mean_phsamples"), ("mean_fsamples.out_file", "mean_fsamples"), ("make_dyads.dyads", "dyads"), ("make_dyads.dispersion", "dyads_dispersion")]) ]) return bedpostx nipype-0.9.2/nipype/workflows/dmri/fsl/epi.py000066400000000000000000001033651227300005300212350ustar00rootroot00000000000000# coding: utf-8 import nipype.pipeline.engine as pe import nipype.interfaces.utility as niu import nipype.interfaces.fsl as fsl import os def create_dmri_preprocessing(name='dMRI_preprocessing', use_fieldmap=True, fieldmap_registration=False): """Creates a workflow that chains the necessary pipelines to correct for motion, eddy currents, and, if selected, susceptibility artifacts in EPI dMRI sequences. .. warning:: IMPORTANT NOTICE: this workflow rotates the b-vectors, so please be adviced that not all the dicom converters ensure the consistency between the resulting nifti orientation and the b matrix table (e.g. dcm2nii checks it). Example ------- >>> nipype_dmri_preprocess = create_dmri_preprocessing('nipype_dmri_prep') >>> nipype_dmri_preprocess.inputs.inputnode.in_file = 'diffusion.nii' >>> nipype_dmri_preprocess.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> nipype_dmri_preprocess.inputs.inputnode.ref_num = 0 >>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_mag = 'magnitude.nii' >>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_pha = 'phase.nii' >>> nipype_dmri_preprocess.inputs.inputnode.te_diff = 2.46 >>> nipype_dmri_preprocess.inputs.inputnode.epi_echospacing = 0.77 >>> nipype_dmri_preprocess.inputs.inputnode.epi_rev_encoding = False >>> nipype_dmri_preprocess.inputs.inputnode.pi_accel_factor = True >>> nipype_dmri_preprocess.run() # doctest: +SKIP Inputs:: inputnode.in_file - The diffusion data inputnode.in_bvec - The b-matrix file, in FSL format and consistent with the in_file orientation inputnode.ref_num - The reference volume (a b=0 volume in dMRI) inputnode.fieldmap_mag - The magnitude of the fieldmap inputnode.fieldmap_pha - The phase difference of the fieldmap inputnode.te_diff - TE increment used (in msec.) on the fieldmap acquisition (generally 2.46ms for 3T scanners) inputnode.epi_echospacing - The EPI EchoSpacing parameter (in msec.) inputnode.epi_rev_encoding - True if reverse encoding was used (generally False) inputnode.pi_accel_factor - Parallel imaging factor (aka GRAPPA acceleration factor) inputnode.vsm_sigma - Sigma (in mm.) of the gaussian kernel used for in-slice smoothing of the deformation field (voxel shift map, vsm) Outputs:: outputnode.dmri_corrected outputnode.bvec_rotated Optional arguments:: use_fieldmap - True if there are fieldmap files that should be used (default True) fieldmap_registration - True if registration to fieldmap should be performed (default False) """ pipeline = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_bvec', 'ref_num', 'fieldmap_mag', 'fieldmap_pha', 'te_diff', 'epi_echospacing', 'epi_rev_encoding', 'pi_accel_factor', 'vsm_sigma']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['dmri_corrected', 'bvec_rotated']), name='outputnode') motion = create_motion_correct_pipeline() eddy = create_eddy_correct_pipeline() if use_fieldmap: # we have a fieldmap, so lets use it (yay!) susceptibility = create_epidewarp_pipeline( fieldmap_registration=fieldmap_registration) pipeline.connect([ (inputnode, motion, [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), ('ref_num', 'inputnode.ref_num')]), (inputnode, eddy, [('ref_num', 'inputnode.ref_num')]), (motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]), (eddy, susceptibility, [('outputnode.eddy_corrected', 'inputnode.in_file')]), (inputnode, susceptibility, [('ref_num', 'inputnode.ref_num'), ('fieldmap_mag', 'inputnode.fieldmap_mag'), ('fieldmap_pha', 'inputnode.fieldmap_pha'), ('te_diff', 'inputnode.te_diff'), ('epi_echospacing', 'inputnode.epi_echospacing'), ('epi_rev_encoding', 'inputnode.epi_rev_encoding'), ('pi_accel_factor', 'inputnode.pi_accel_factor'), ('vsm_sigma', 'inputnode.vsm_sigma')]), (motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]), (susceptibility, outputnode, [('outputnode.epi_corrected', 'dmri_corrected')]) ]) else: # we don't have a fieldmap, so we just carry on without it :( pipeline.connect([ (inputnode, motion, [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), ('ref_num', 'inputnode.ref_num')]), (inputnode, eddy, [('ref_num', 'inputnode.ref_num')]), (motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]), (motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]), (eddy, outputnode, [('outputnode.eddy_corrected', 'dmri_corrected')]) ]) return pipeline def create_motion_correct_pipeline(name='motion_correct'): """Creates a pipeline that corrects for motion artifact in dMRI sequences. It takes a series of diffusion weighted images and rigidly co-registers them to one reference image. Finally, the b-matrix is rotated accordingly (Leemans et al. 2009 - http://www.ncbi.nlm.nih.gov/pubmed/19319973), making use of the rotation matrix obtained by FLIRT. .. warning:: IMPORTANT NOTICE: this workflow rotates the b-vectors, so please be adviced that not all the dicom converters ensure the consistency between the resulting nifti orientation and the b matrix table (e.g. dcm2nii checks it). Example ------- >>> nipype_motioncorrect = create_motion_correct_pipeline('nipype_motioncorrect') >>> nipype_motioncorrect.inputs.inputnode.in_file = 'diffusion.nii' >>> nipype_motioncorrect.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> nipype_motioncorrect.inputs.inputnode.ref_num = 0 >>> nipype_motioncorrect.run() # doctest: +SKIP Inputs:: inputnode.in_file inputnode.ref_num inputnode.in_bvec Outputs:: outputnode.motion_corrected outputnode.out_bvec """ inputnode = pe.Node( niu.IdentityInterface( fields=['in_file', 'ref_num', 'in_bvec']), name='inputnode') pipeline = pe.Workflow(name=name) split = pe.Node(fsl.Split(dimension='t'), name='split') pick_ref = pe.Node(niu.Select(), name='pick_ref') coregistration = pe.MapNode(fsl.FLIRT(no_search=True, interp='spline', padding_size=1, dof=6), name='coregistration', iterfield=['in_file']) rotate_bvecs = pe.Node(niu.Function(input_names=['in_bvec', 'in_matrix'], output_names=[ 'out_file'], function=_rotate_bvecs), name='rotate_b_matrix') merge = pe.Node(fsl.Merge(dimension='t'), name='merge') outputnode = pe.Node( niu.IdentityInterface( fields=['motion_corrected', 'out_bvec']), name='outputnode') pipeline.connect([ (inputnode, split, [('in_file', 'in_file')]) ,(split, pick_ref, [('out_files', 'inlist')]) ,(inputnode, pick_ref, [('ref_num', 'index')]) ,(split, coregistration, [('out_files', 'in_file')]) ,(inputnode, rotate_bvecs, [('in_bvec', 'in_bvec')]) ,(coregistration, rotate_bvecs, [('out_matrix_file', 'in_matrix')]) ,(pick_ref, coregistration, [('out', 'reference')]) ,(coregistration, merge, [('out_file', 'in_files')]) ,(merge, outputnode, [('merged_file', 'motion_corrected')]) ,(rotate_bvecs, outputnode, [('out_file', 'out_bvec')]) ]) return pipeline def create_eddy_correct_pipeline(name='eddy_correct'): """Creates a pipeline that replaces eddy_correct script in FSL. It takes a series of diffusion weighted images and linearly co-registers them to one reference image. No rotation of the B-matrix is performed, so this pipeline should be executed after the motion correction pipeline. Example ------- >>> nipype_eddycorrect = create_eddy_correct_pipeline('nipype_eddycorrect') >>> nipype_eddycorrect.inputs.inputnode.in_file = 'diffusion.nii' >>> nipype_eddycorrect.inputs.inputnode.ref_num = 0 >>> nipype_eddycorrect.run() # doctest: +SKIP Inputs:: inputnode.in_file inputnode.ref_num Outputs:: outputnode.eddy_corrected """ inputnode = pe.Node( niu.IdentityInterface(fields=['in_file', 'ref_num']), name='inputnode') pipeline = pe.Workflow(name=name) split = pe.Node(fsl.Split(dimension='t'), name='split') pick_ref = pe.Node(niu.Select(), name='pick_ref') coregistration = pe.MapNode(fsl.FLIRT(no_search=True, padding_size=1, dof=12, interp='spline'), name='coregistration', iterfield=['in_file']) merge = pe.Node(fsl.Merge(dimension='t'), name='merge') outputnode = pe.Node( niu.IdentityInterface(fields=['eddy_corrected']), name='outputnode') pipeline.connect([ (inputnode, split, [('in_file', 'in_file')]) ,(split, pick_ref, [('out_files', 'inlist')]) ,(inputnode, pick_ref, [('ref_num', 'index')]) ,(split, coregistration, [('out_files', 'in_file')]) ,(pick_ref, coregistration, [('out', 'reference')]) ,(coregistration, merge, [('out_file', 'in_files')]) ,(merge, outputnode, [('merged_file', 'eddy_corrected')]) ]) return pipeline def fieldmap_correction(name='fieldmap_correction', nocheck=False): """ Fieldmap-based retrospective correction of EPI images for the susceptibility distortion artifact (Jezzard et al., 1995). Fieldmap images are assumed to be already registered to EPI data, and a brain mask is required. Replaces the former workflow, still available as create_epidewarp_pipeline(). The difference with respect the epidewarp pipeline is that now the workflow uses the new fsl_prepare_fieldmap available as of FSL 5.0. Example ------- >>> nipype_epicorrect = fieldmap_correction('nipype_epidewarp') >>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii' >>> nipype_epicorrect.inputs.inputnode.in_mask = 'brainmask.nii' >>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii' >>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii' >>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46 >>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77 >>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y' >>> nipype_epicorrect.run() # doctest: +SKIP Inputs:: inputnode.in_file - The volume acquired with EPI sequence inputnode.in_mask - A brain mask inputnode.fieldmap_pha - The phase difference map from the fieldmapping, registered to in_file inputnode.fieldmap_mag - The magnitud maps (usually 4D, one magnitude per GRE scan) from the fieldmapping, registered to in_file inputnode.te_diff - Time difference in msec. between TE in ms of the fieldmapping (usually a GRE sequence). inputnode.epi_echospacing - The effective echo spacing (aka dwell time) in msec. of the EPI sequence. If EPI was acquired with parallel imaging, then the effective echo spacing is eff_es = es / acc_factor. inputnode.encoding_direction - The phase encoding direction in EPI acquisition (default y) inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map) Outputs:: outputnode.epi_corrected outputnode.out_vsm """ inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_mask', 'fieldmap_pha', 'fieldmap_mag', 'te_diff', 'epi_echospacing', 'vsm_sigma', 'encoding_direction' ]), name='inputnode' ) pipeline = pe.Workflow(name=name) # Keep first frame from magnitude select_mag = pe.Node(fsl.utils.ExtractROI( t_size=1, t_min=0), name='select_magnitude') # Mask magnitude (it is required by PreparedFieldMap) mask_mag = pe.Node( fsl.maths.ApplyMask(), name='mask_magnitude' ) # Run fsl_prepare_fieldmap fslprep = pe.Node( fsl.PrepareFieldmap(), name='prepare_fieldmap' ) if nocheck: fslprep.inputs.nocheck = True # Use FUGUE to generate the voxel shift map (vsm) vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm') # VSM demean is not anymore present in the epi_reg script #vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[ # 'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift') # fugue_epi dwi_split = pe.Node(niu.Function(input_names=[ 'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split') # 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name ) dwi_applyxfm = pe.MapNode(fsl.FUGUE( icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue') # Merge back all volumes dwi_merge = pe.Node(fsl.utils.Merge( dimension='t'), name='dwi_merge') outputnode = pe.Node( niu.IdentityInterface(fields=['epi_corrected','out_vsm']), name='outputnode') pipeline.connect([ (inputnode, select_mag, [('fieldmap_mag', 'in_file')]) ,(inputnode, fslprep, [('fieldmap_pha', 'in_phase'),('te_diff', 'delta_TE') ]) ,(inputnode, mask_mag, [('in_mask', 'mask_file' )]) ,(select_mag, mask_mag, [('roi_file', 'in_file')]) ,(mask_mag, fslprep, [('out_file', 'in_magnitude')]) ,(fslprep, vsm, [('out_fieldmap', 'phasemap_file')]) ,(inputnode, vsm, [('fieldmap_mag', 'in_file'), ('encoding_direction','unwarp_direction'), (('te_diff', _ms2sec), 'asym_se_time'), ('vsm_sigma', 'smooth2d'), (('epi_echospacing', _ms2sec), 'dwell_time')]) ,(mask_mag, vsm, [('out_file', 'mask_file')]) ,(inputnode, dwi_split, [('in_file', 'in_file')]) ,(dwi_split, dwi_applyxfm, [('out_files', 'in_file')]) ,(mask_mag, dwi_applyxfm, [('out_file', 'mask_file')]) ,(vsm, dwi_applyxfm, [('shift_out_file', 'shift_in_file')]) ,(inputnode, dwi_applyxfm, [('encoding_direction','unwarp_direction')]) ,(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')]) ,(dwi_merge, outputnode, [('merged_file', 'epi_corrected')]) ,(vsm, outputnode, [('shift_out_file','out_vsm') ]) ]) return pipeline def topup_correction( name='topup_correction' ): """ Corrects for susceptibilty distortion of EPI images when one reverse encoding dataset has been acquired Example ------- >>> nipype_epicorrect = topup_correction('nipype_topup') >>> nipype_epicorrect.inputs.inputnode.in_file_dir = 'epi.nii' >>> nipype_epicorrect.inputs.inputnode.in_file_rev = 'epi_rev.nii' >>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y' >>> nipype_epicorrect.inputs.inputnode.ref_num = 0 >>> nipype_epicorrect.run() # doctest: +SKIP Inputs:: inputnode.in_file_dir - EPI volume acquired in 'forward' phase encoding inputnode.in_file_rev - EPI volume acquired in 'reversed' phase encoding inputnode.encoding_direction - Direction encoding of in_file_dir inputnode.ref_num - Identifier of the reference volumes (usually B0 volume) Outputs:: outputnode.epi_corrected """ pipeline = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface( fields=['in_file_dir', 'in_file_rev', 'encoding_direction', 'readout_times', 'ref_num' ]), name='inputnode' ) outputnode = pe.Node( niu.IdentityInterface( fields=['out_fieldcoef', 'out_movpar', 'out_topup', 'out_enc_file', 'epi_corrected' ]), name='outputnode' ) b0_dir = pe.Node( fsl.ExtractROI( t_size=1 ), name='b0_1' ) b0_rev = pe.Node( fsl.ExtractROI( t_size=1 ), name='b0_2' ) combin = pe.Node( niu.Merge(2), name='merge' ) combin2 = pe.Node( niu.Merge(2), name='merge2' ) merged = pe.Node( fsl.Merge( dimension='t' ), name='b0_comb' ) topup = pe.Node( fsl.TOPUP(), name='topup' ) applytopup = pe.Node( fsl.ApplyTOPUP(in_index=[1,2] ), name='applytopup' ) pipeline.connect([ (inputnode, b0_dir, [('in_file_dir','in_file'),('ref_num','t_min')] ) ,(inputnode, b0_rev, [('in_file_rev','in_file'),('ref_num','t_min')] ) ,(inputnode, combin2, [('in_file_dir','in1'),('in_file_rev','in2') ] ) ,(b0_dir, combin, [('roi_file','in1')] ) ,(b0_rev, combin, [('roi_file','in2')] ) ,(combin, merged, [('out', 'in_files')] ) ,(merged, topup, [('merged_file','in_file')]) ,(inputnode, topup, [('encoding_direction','encoding_direction'),('readout_times','readout_times') ]) ,(topup, applytopup, [('out_topup','in_topup'),('out_enc_file','encoding_file')]) ,(combin2, applytopup, [('out','in_files')] ) ,(topup, outputnode, [('out_fieldcoef','out_fieldcoef'),('out_movpar','out_movpar'), ('out_topup','out_topup'),('out_enc_file','out_enc_file') ]) ,(applytopup,outputnode, [('out_corrected','epi_corrected')]) ]) return pipeline def create_epidewarp_pipeline(name='epidewarp', fieldmap_registration=False): """ Replaces the epidewarp.fsl script (http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl) for susceptibility distortion correction of dMRI & fMRI acquired with EPI sequences and the fieldmap information (Jezzard et al., 1995) using FSL's FUGUE. The registration to the (warped) fieldmap (strictly following the original script) is available using fieldmap_registration=True. Example ------- >>> nipype_epicorrect = create_epidewarp_pipeline('nipype_epidewarp', fieldmap_registration=False) >>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii' >>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii' >>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii' >>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46 >>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77 >>> nipype_epicorrect.inputs.inputnode.epi_rev_encoding = False >>> nipype_epicorrect.inputs.inputnode.ref_num = 0 >>> nipype_epicorrect.inputs.inputnode.pi_accel_factor = 1.0 >>> nipype_epicorrect.run() # doctest: +SKIP Inputs:: inputnode.in_file - The volume acquired with EPI sequence inputnode.fieldmap_mag - The magnitude of the fieldmap inputnode.fieldmap_pha - The phase difference of the fieldmap inputnode.te_diff - Time difference between TE in ms. inputnode.epi_echospacing - The echo spacing (aka dwell time) in the EPI sequence inputnode.epi_ph_encoding_dir - The phase encoding direction in EPI acquisition (default y) inputnode.epi_rev_encoding - True if it is acquired with reverse encoding inputnode.pi_accel_factor - Acceleration factor used for EPI parallel imaging (GRAPPA) inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map) inputnode.ref_num - The reference volume (B=0 in dMRI or a central frame in fMRI) Outputs:: outputnode.epi_corrected Optional arguments:: fieldmap_registration - True if registration to fieldmap should be done (default False) """ inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'fieldmap_mag', 'fieldmap_pha', 'te_diff', 'epi_echospacing', 'epi_ph_encoding_dir', 'epi_rev_encoding', 'pi_accel_factor', 'vsm_sigma', 'ref_num', 'unwarp_direction' ]), name='inputnode') pipeline = pe.Workflow(name=name) # Keep first frame from magnitude select_mag = pe.Node(fsl.utils.ExtractROI( t_size=1, t_min=0), name='select_magnitude') # mask_brain mask_mag = pe.Node(fsl.BET(mask=True), name='mask_magnitude') mask_mag_dil = pe.Node(niu.Function(input_names=[ 'in_file'], output_names=['out_file'], function=_dilate_mask), name='mask_dilate') # Compute dwell time dwell_time = pe.Node(niu.Function(input_names=['dwell_time', 'pi_factor', 'is_reverse_encoding'], output_names=[ 'dwell_time'], function=_compute_dwelltime), name='dwell_time') # Normalize phase diff to be [-pi, pi) norm_pha = pe.Node(niu.Function(input_names=['in_file'], output_names=[ 'out_file'], function=_prepare_phasediff), name='normalize_phasediff') # Execute FSL PRELUDE: prelude -p %s -a %s -o %s -f -v -m %s prelude = pe.Node(fsl.PRELUDE( process3d=True), name='phase_unwrap') fill_phase = pe.Node(niu.Function(input_names=['in_file'], output_names=[ 'out_file'], function=_fill_phase), name='fill_phasediff') # to assure that vsm is same dimension as mag. The input only affects the output dimension. # The content of the input has no effect on the vsm. The de-warped mag volume is # meaningless and will be thrown away # fugue -i %s -u %s -p %s --dwell=%s --asym=%s --mask=%s --saveshift=%s % # ( mag_name, magdw_name, ph_name, esp, tediff, mask_name, vsmmag_name) vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm') vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[ 'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift') # fugue_epi dwi_split = pe.Node(niu.Function(input_names=[ 'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split') # 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name ) dwi_applyxfm = pe.MapNode(fsl.FUGUE( icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue') # Merge back all volumes dwi_merge = pe.Node(fsl.utils.Merge( dimension='t'), name='dwi_merge') outputnode = pe.Node( niu.IdentityInterface(fields=['epi_corrected']), name='outputnode') pipeline.connect([ (inputnode, dwell_time, [('epi_echospacing', 'dwell_time'), ('pi_accel_factor', 'pi_factor'), ('epi_rev_encoding', 'is_reverse_encoding')]) ,(inputnode, select_mag, [('fieldmap_mag', 'in_file')]) ,(inputnode, norm_pha, [('fieldmap_pha', 'in_file')]) ,(select_mag, mask_mag, [('roi_file', 'in_file')]) ,(mask_mag, mask_mag_dil, [('mask_file', 'in_file')]) ,(select_mag, prelude, [('roi_file', 'magnitude_file')]) ,(norm_pha, prelude, [('out_file', 'phase_file')]) ,(mask_mag_dil, prelude, [('out_file', 'mask_file')]) ,(prelude, fill_phase, [('unwrapped_phase_file', 'in_file')]) ,(inputnode, vsm, [('fieldmap_mag', 'in_file')]) ,(fill_phase, vsm, [('out_file', 'phasemap_file')]) ,(inputnode, vsm, [(('te_diff', _ms2sec), 'asym_se_time'), ('vsm_sigma', 'smooth2d')]) ,(dwell_time, vsm, [(('dwell_time', _ms2sec), 'dwell_time')]) ,(mask_mag_dil, vsm, [('out_file', 'mask_file')]) ,(mask_mag_dil, vsm_mean, [('out_file', 'mask_file')]) ,(vsm, vsm_mean, [('unwarped_file', 'in_unwarped'), ('shift_out_file', 'in_file')]) ,(inputnode, dwi_split, [('in_file', 'in_file')]) ,(dwi_split, dwi_applyxfm, [('out_files', 'in_file')]) ,(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')]) ,(dwi_merge, outputnode, [('merged_file', 'epi_corrected')]) ]) if fieldmap_registration: """ Register magfw to example epi. There are some parameters here that may need to be tweaked. Should probably strip the mag Pre-condition: forward warp the mag in order to reg with func. What does mask do here? """ # Select reference volume from EPI (B0 in dMRI and a middle frame in # fMRI) select_epi = pe.Node(fsl.utils.ExtractROI( t_size=1), name='select_epi') # fugue -i %s -w %s --loadshift=%s --mask=%s % ( mag_name, magfw_name, # vsmmag_name, mask_name ), log ) # Forward Map vsm_fwd = pe.Node(fsl.FUGUE( save_warped=True), name='vsm_fwd') vsm_reg = pe.Node(fsl.FLIRT(bins=256, cost='corratio', dof=6, interp='spline', searchr_x=[ -10, 10], searchr_y=[-10, 10], searchr_z=[-10, 10]), name='vsm_registration') # 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( vsmmag_name, ref_epi, vsmmag_name, magfw_mat_out ) vsm_applyxfm = pe.Node(fsl.ApplyXfm( interp='spline'), name='vsm_apply_xfm') # 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( mask_name, ref_epi, mask_name, magfw_mat_out ) msk_applyxfm = pe.Node(fsl.ApplyXfm( interp='nearestneighbour'), name='msk_apply_xfm') pipeline.connect([ (inputnode, select_epi, [('in_file', 'in_file'), ('ref_num', 't_min')]) ,(select_epi, vsm_reg, [('roi_file', 'reference')]) ,(vsm, vsm_fwd, [('shift_out_file', 'shift_in_file')]) ,(mask_mag_dil, vsm_fwd, [('out_file', 'mask_file')]) ,(inputnode, vsm_fwd, [('fieldmap_mag', 'in_file')]) ,(vsm_fwd, vsm_reg, [('warped_file', 'in_file')]) ,(vsm_reg, msk_applyxfm, [('out_matrix_file', 'in_matrix_file')]) ,(select_epi, msk_applyxfm, [('roi_file', 'reference')]) ,(mask_mag_dil, msk_applyxfm, [('out_file', 'in_file')]) ,(vsm_reg, vsm_applyxfm, [('out_matrix_file', 'in_matrix_file')]) ,(select_epi, vsm_applyxfm, [('roi_file', 'reference')]) ,(vsm_mean, vsm_applyxfm, [('out_file', 'in_file')]) ,(msk_applyxfm, dwi_applyxfm, [('out_file', 'mask_file')]) ,(vsm_applyxfm, dwi_applyxfm, [('out_file', 'shift_in_file')]) ]) else: pipeline.connect([ (mask_mag_dil, dwi_applyxfm, [('out_file', 'mask_file')]) ,( vsm_mean, dwi_applyxfm, [('out_file', 'shift_in_file')]) ]) return pipeline def _rotate_bvecs(in_bvec, in_matrix): import os import numpy as np name, fext = os.path.splitext(os.path.basename(in_bvec)) if fext == '.gz': name, _ = os.path.splitext(name) out_file = os.path.abspath('./%s_rotated.bvec' % name) bvecs = np.loadtxt(in_bvec) new_bvecs = np.zeros(shape=bvecs.T.shape) #pre-initialise array, 3 col format for i, vol_matrix in enumerate(in_matrix[0::]): #start index at 0 bvec = np.matrix(bvecs[:, i]) rot = np.matrix(np.loadtxt(vol_matrix)[0:3, 0:3]) new_bvecs[i] = (np.array(rot * bvec.T).T)[0] #fill each volume with x,y,z as we go along np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f') return out_file def _cat_logs(in_files): import shutil import os name, fext = os.path.splitext(os.path.basename(in_files[0])) if fext == '.gz': name, _ = os.path.splitext(name) out_file = os.path.abspath('./%s_ecclog.log' % name) out_str = '' with open(out_file, 'wb') as totallog: for i, fname in enumerate(in_files): totallog.write('\n\npreprocessing %d\n' % i) with open(fname) as inlog: for line in inlog: totallog.write(line) return out_file def _compute_dwelltime(dwell_time=0.68, pi_factor=1.0, is_reverse_encoding=False): dwell_time *= (1.0/pi_factor) if is_reverse_encoding: dwell_time *= -1.0 return dwell_time def _effective_echospacing( dwell_time, pi_factor=1.0 ): dwelltime = 1.0e-3 * dwell_time * ( 1.0/pi_factor ) return dwelltime def _prepare_phasediff(in_file): import nibabel as nib import os import numpy as np img = nib.load(in_file) max_diff = np.max(img.get_data().reshape(-1)) min_diff = np.min(img.get_data().reshape(-1)) A = (2.0 * np.pi)/(max_diff-min_diff) B = np.pi - (A * max_diff) diff_norm = img.get_data() * A + B name, fext = os.path.splitext(os.path.basename(in_file)) if fext == '.gz': name, _ = os.path.splitext(name) out_file = os.path.abspath('./%s_2pi.nii.gz' % name) nib.save(nib.Nifti1Image( diff_norm, img.get_affine(), img.get_header()), out_file) return out_file def _dilate_mask(in_file, iterations=4): import nibabel as nib import scipy.ndimage as ndimage import os img = nib.load(in_file) img._data = ndimage.binary_dilation(img.get_data(), iterations=iterations) name, fext = os.path.splitext(os.path.basename(in_file)) if fext == '.gz': name, _ = os.path.splitext(name) out_file = os.path.abspath('./%s_dil.nii.gz' % name) nib.save(img, out_file) return out_file def _fill_phase(in_file): import nibabel as nib import os import numpy as np img = nib.load(in_file) dumb_img = nib.Nifti1Image(np.zeros( img.get_shape()), img.get_affine(), img.get_header()) out_nii = nib.funcs.concat_images((img, dumb_img)) name, fext = os.path.splitext(os.path.basename(in_file)) if fext == '.gz': name, _ = os.path.splitext(name) out_file = os.path.abspath('./%s_fill.nii.gz' % name) nib.save(out_nii, out_file) return out_file def _vsm_remove_mean(in_file, mask_file, in_unwarped): import nibabel as nib import os import numpy as np import numpy.ma as ma img = nib.load(in_file) msk = nib.load(mask_file).get_data() img_data = img.get_data() img_data[msk == 0] = 0 vsmmag_masked = ma.masked_values(img_data.reshape(-1), 0.0) vsmmag_masked = vsmmag_masked - vsmmag_masked.mean() img._data = vsmmag_masked.reshape(img.get_shape()) name, fext = os.path.splitext(os.path.basename(in_file)) if fext == '.gz': name, _ = os.path.splitext(name) out_file = os.path.abspath('./%s_demeaned.nii.gz' % name) nib.save(img, out_file) return out_file def _ms2sec(val): return val*1e-3; def _split_dwi(in_file): import nibabel as nib import os out_files = [] frames = nib.funcs.four_to_three(nib.load(in_file)) name, fext = os.path.splitext(os.path.basename(in_file)) if fext == '.gz': name, _ = os.path.splitext(name) for i, frame in enumerate(frames): out_file = os.path.abspath('./%s_%03d.nii.gz' % (name, i)) nib.save(frame, out_file) out_files.append(out_file) return out_files nipype-0.9.2/nipype/workflows/dmri/fsl/setup.py000066400000000000000000000006461227300005300216160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fsl', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/dmri/fsl/tbss.py000066400000000000000000000562711227300005300214360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from warnings import warn import nipype.pipeline.engine as pe import nipype.interfaces.utility as util import nipype.interfaces.fsl as fsl def tbss1_op_string(in_files): import nibabel as nib op_strings = [] for infile in in_files: img = nib.load(infile) dimtup = tuple([d - 2 for d in img.get_shape()]) op_str = '-min 1 -ero -roi 1 %d 1 %d 1 %d 0 1' % dimtup op_strings.append(op_str) return op_strings def create_tbss_1_preproc(name='tbss_1_preproc'): """Preprocess FA data for TBSS: erodes a little and zero end slicers and creates masks(for use in FLIRT & FNIRT from FSL). A pipeline that does the same as tbss_1_preproc script in FSL Example ------- >>> from nipype.workflows.dmri.fsl import tbss >>> tbss1 = tbss.create_tbss_1_preproc() >>> tbss1.inputs.inputnode.fa_list = ['s1_FA.nii', 's2_FA.nii', 's3_FA.nii'] Inputs:: inputnode.fa_list Outputs:: outputnode.fa_list outputnode.mask_list outputnode.slices """ # Define the inputnode inputnode = pe.Node(interface=util.IdentityInterface(fields=["fa_list"]), name="inputnode") # Prep the FA images prepfa = pe.MapNode(fsl.ImageMaths(suffix="_prep"), name="prepfa", iterfield=['in_file', 'op_string']) # Slicer slicer = pe.MapNode(fsl.Slicer(all_axial=True, image_width=1280), name='slicer', iterfield=['in_file']) # Create a mask getmask1 = pe.MapNode(fsl.ImageMaths(op_string="-bin", suffix="_mask"), name="getmask1", iterfield=['in_file']) getmask2 = pe.MapNode(fsl.MultiImageMaths(op_string="-dilD -dilD -sub 1 -abs -add %s"), name="getmask2", iterfield=['in_file', 'operand_files']) # $FSLDIR/bin/fslmaths FA/${f}_FA_mask -dilD -dilD -sub 1 -abs -add FA/${f}_FA_mask FA/${f}_FA_mask -odt char # Define the tbss1 workflow tbss1 = pe.Workflow(name=name) tbss1.connect([ (inputnode, prepfa, [("fa_list", "in_file")]), (inputnode, prepfa, [(("fa_list", tbss1_op_string), "op_string")]), (prepfa, getmask1, [("out_file", "in_file")]), (getmask1, getmask2, [("out_file", "in_file"), ("out_file", "operand_files")]), (prepfa, slicer, [('out_file', 'in_file')]), ]) # Define the outputnode outputnode = pe.Node(interface=util.IdentityInterface(fields=["fa_list", "mask_list", "slices"]), name="outputnode") tbss1.connect([ (prepfa, outputnode, [("out_file", "fa_list")]), (getmask2, outputnode, [("out_file", "mask_list")]), (slicer, outputnode, [('out_file', 'slices')]) ]) return tbss1 def create_tbss_2_reg(name="tbss_2_reg"): """TBSS nonlinear registration: A pipeline that does the same as 'tbss_2_reg -t' script in FSL. '-n' option is not supported at the moment. Example ------- >>> from nipype.workflows.dmri.fsl import tbss >>> tbss2 = create_tbss_2_reg(name="tbss2") >>> tbss2.inputs.inputnode.target = fsl.Info.standard_image("FMRIB58_FA_1mm.nii.gz") # doctest: +SKIP >>> tbss2.inputs.inputnode.fa_list = ['s1_FA.nii', 's2_FA.nii', 's3_FA.nii'] >>> tbss2.inputs.inputnode.mask_list = ['s1_mask.nii', 's2_mask.nii', 's3_mask.nii'] Inputs:: inputnode.fa_list inputnode.mask_list inputnode.target Outputs:: outputnode.field_list """ # Define the inputnode inputnode = pe.Node(interface=util.IdentityInterface(fields=["fa_list", "mask_list", "target"]), name="inputnode") # Flirt the FA image to the target flirt = pe.MapNode(interface=fsl.FLIRT(dof=12), iterfield=['in_file', 'in_weight'], name="flirt") fnirt = pe.MapNode(interface=fsl.FNIRT(fieldcoeff_file=True), iterfield=['in_file', 'inmask_file', 'affine_file'], name="fnirt") # Fnirt the FA image to the target if fsl.no_fsl(): warn('NO FSL found') else: config_file = os.path.join(os.environ["FSLDIR"], "etc/flirtsch/FA_2_FMRIB58_1mm.cnf") fnirt.inputs.config_file=config_file # Define the registration workflow tbss2 = pe.Workflow(name=name) # Connect up the registration workflow tbss2.connect([ (inputnode, flirt, [("fa_list", "in_file"), ("target", "reference"), ("mask_list", "in_weight")]), (inputnode, fnirt, [("fa_list", "in_file"), ("mask_list", "inmask_file"), ("target", "ref_file")]), (flirt, fnirt, [("out_matrix_file", "affine_file")]), ]) # Define the outputnode outputnode = pe.Node(interface=util.IdentityInterface(fields=['field_list']), name="outputnode") tbss2.connect([ (fnirt, outputnode, [('fieldcoeff_file', 'field_list')]) ]) return tbss2 def create_tbss_3_postreg(name='tbss_3_postreg', estimate_skeleton=True): """Post-registration processing: derive mean_FA and mean_FA_skeleton from mean of all subjects in study. Target is assumed to be FMRIB58_FA_1mm. A pipeline that does the same as 'tbss_3_postreg -S' script from FSL Setting 'estimate_skeleton to False will use precomputed FMRIB58_FA-skeleton_1mm skeleton (same as 'tbss_3_postreg -T'). Example ------- >>> from nipype.workflows.dmri.fsl import tbss >>> tbss3 = tbss.create_tbss_3_postreg() >>> tbss3.inputs.inputnode.fa_list = ['s1_wrapped_FA.nii', 's2_wrapped_FA.nii', 's3_wrapped_FA.nii'] Inputs:: inputnode.field_list inputnode.fa_list Outputs:: outputnode.groupmask outputnode.skeleton_file outputnode.meanfa_file outputnode.mergefa_file """ # Create the inputnode inputnode = pe.Node(interface=util.IdentityInterface(fields=['field_list', 'fa_list']), name='inputnode') # Apply the warpfield to the masked FA image applywarp = pe.MapNode(interface=fsl.ApplyWarp(), iterfield=['in_file', 'field_file'], name="applywarp") if fsl.no_fsl(): warn('NO FSL found') else: applywarp.inputs.ref_file = fsl.Info.standard_image("FMRIB58_FA_1mm.nii.gz") # Merge the FA files into a 4D file mergefa = pe.Node(fsl.Merge(dimension="t"), name="mergefa") # Get a group mask groupmask = pe.Node(fsl.ImageMaths(op_string="-max 0 -Tmin -bin", out_data_type="char", suffix="_mask"), name="groupmask") maskgroup = pe.Node(fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskgroup") tbss3 = pe.Workflow(name=name) tbss3.connect([ (inputnode, applywarp, [("fa_list", "in_file"), ("field_list", "field_file")]), (applywarp, mergefa, [("out_file", "in_files")]), (mergefa, groupmask, [("merged_file", "in_file")]), (mergefa, maskgroup, [("merged_file", "in_file")]), (groupmask, maskgroup, [("out_file", "in_file2")]), ]) # Create outputnode outputnode = pe.Node(interface=util.IdentityInterface(fields=['groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file']), name='outputnode') if estimate_skeleton: # Take the mean over the fourth dimension meanfa = pe.Node(fsl.ImageMaths(op_string="-Tmean", suffix="_mean"), name="meanfa") # Use the mean FA volume to generate a tract skeleton makeskeleton = pe.Node(fsl.TractSkeleton(skeleton_file=True), name="makeskeleton") tbss3.connect([ (maskgroup, meanfa, [("out_file", "in_file")]), (meanfa, makeskeleton, [("out_file", "in_file")]), (groupmask, outputnode, [('out_file', 'groupmask')]), (makeskeleton, outputnode, [('skeleton_file', 'skeleton_file')]), (meanfa, outputnode, [('out_file', 'meanfa_file')]), (maskgroup, outputnode, [('out_file', 'mergefa_file')]) ]) else: #$FSLDIR/bin/fslmaths $FSLDIR/data/standard/FMRIB58_FA_1mm -mas mean_FA_mask mean_FA maskstd = pe.Node(fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskstd") maskstd.inputs.in_file = fsl.Info.standard_image("FMRIB58_FA_1mm.nii.gz") #$FSLDIR/bin/fslmaths mean_FA -bin mean_FA_mask binmaskstd = pe.Node(fsl.ImageMaths(op_string="-bin"), name="binmaskstd") #$FSLDIR/bin/fslmaths all_FA -mas mean_FA_mask all_FA maskgroup2 = pe.Node(fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskgroup2") tbss3.connect([ (groupmask, maskstd, [("out_file", "in_file2")]), (maskstd, binmaskstd, [("out_file", "in_file")]), (maskgroup, maskgroup2, [("out_file", "in_file")]), (binmaskstd, maskgroup2, [("out_file", "in_file2")]) ]) outputnode.inputs.skeleton_file = fsl.Info.standard_image("FMRIB58_FA-skeleton_1mm.nii.gz") tbss3.connect([ (binmaskstd, outputnode, [('out_file', 'groupmask')]), (maskstd, outputnode, [('out_file', 'meanfa_file')]), (maskgroup2, outputnode, [('out_file', 'mergefa_file')]) ]) return tbss3 def tbss4_op_string(skeleton_thresh): op_string = "-thr %.1f -bin" % skeleton_thresh return op_string def create_tbss_4_prestats(name='tbss_4_prestats'): """Post-registration processing:Creating skeleton mask using a threshold projecting all FA data onto skeleton. A pipeline that does the same as tbss_4_prestats script from FSL Example ------- >>> from nipype.workflows.dmri.fsl import tbss >>> tbss4 = tbss.create_tbss_4_prestats(name='tbss4') >>> tbss4.inputs.inputnode.skeleton_thresh = 0.2 Inputs:: inputnode.skeleton_thresh inputnode.groupmask inputnode.skeleton_file inputnode.meanfa_file inputnode.mergefa_file Outputs:: outputnode.all_FA_skeletonised outputnode.mean_FA_skeleton_mask outputnode.distance_map outputnode.skeleton_file """ # Create inputnode inputnode = pe.Node(interface=util.IdentityInterface(fields=['groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file', 'skeleton_thresh']), name='inputnode') # Mask the skeleton at the threshold skeletonmask = pe.Node(fsl.ImageMaths( suffix="_mask"), name="skeletonmask") # Invert the brainmask then add in the tract skeleton invertmask = pe.Node(fsl.ImageMaths(suffix="_inv", op_string="-mul -1 -add 1 -add"), name="invertmask") # Generate a distance map with the tract skeleton distancemap = pe.Node(fsl.DistanceMap(), name="distancemap") # Project the FA values onto the skeleton projectfa = pe.Node(fsl.TractSkeleton(project_data=True, skeleton_file=True, use_cingulum_mask=True), name="projectfa") # Create tbss4 workflow tbss4 = pe.Workflow(name=name) tbss4.connect([ (inputnode, invertmask, [("groupmask", "in_file")]), (inputnode, skeletonmask, [("skeleton_file", "in_file"), (('skeleton_thresh', tbss4_op_string), 'op_string')]), (inputnode, projectfa, [('skeleton_thresh', 'threshold'), ("meanfa_file", "in_file"), ("mergefa_file", "data_file")]), (skeletonmask, invertmask, [("out_file", "in_file2")]), (invertmask, distancemap, [("out_file", "in_file")]), (distancemap, projectfa, [("distance_map", "distance_map")]), ]) # Create the outputnode outputnode = pe.Node(interface=util.IdentityInterface(fields=['projectedfa_file', 'skeleton_mask', 'distance_map', 'skeleton_file']), name='outputnode') tbss4.connect([ (projectfa, outputnode, [('projected_data', 'projectedfa_file'), ('skeleton_file', 'skeleton_file') ]), (distancemap, outputnode, [('distance_map', 'distance_map')]), (skeletonmask, outputnode, [('out_file', 'skeleton_mask')]) ]) return tbss4 def create_tbss_all(name='tbss_all', estimate_skeleton=True): """Create a pipeline that combines create_tbss_* pipelines Example ------- >>> from nipype.workflows.dmri.fsl import tbss >>> tbss = tbss.create_tbss_all('tbss') >>> tbss.inputs.inputnode.skeleton_thresh = 0.2 Inputs:: inputnode.fa_list inputnode.skeleton_thresh Outputs:: outputnode.meanfa_file outputnode.projectedfa_file outputnode.skeleton_file outputnode.skeleton_mask """ # Define the inputnode inputnode = pe.Node(interface=util.IdentityInterface(fields=['fa_list', 'skeleton_thresh']), name='inputnode') tbss1 = create_tbss_1_preproc(name='tbss1') tbss2 = create_tbss_2_reg(name='tbss2') if fsl.no_fsl(): warn('NO FSL found') else: tbss2.inputs.inputnode.target = fsl.Info.standard_image("FMRIB58_FA_1mm.nii.gz") tbss3 = create_tbss_3_postreg(name='tbss3', estimate_skeleton=estimate_skeleton) tbss4 = create_tbss_4_prestats(name='tbss4') tbss_all = pe.Workflow(name=name) tbss_all.connect([ (inputnode, tbss1, [('fa_list', 'inputnode.fa_list')]), (inputnode, tbss4, [('skeleton_thresh', 'inputnode.skeleton_thresh')]), (tbss1, tbss2, [('outputnode.fa_list', 'inputnode.fa_list'), ('outputnode.mask_list', 'inputnode.mask_list')]), (tbss1, tbss3, [('outputnode.fa_list', 'inputnode.fa_list')]), (tbss2, tbss3, [('outputnode.field_list', 'inputnode.field_list')]), (tbss3, tbss4, [ ('outputnode.groupmask', 'inputnode.groupmask'), ('outputnode.skeleton_file', 'inputnode.skeleton_file'), ('outputnode.meanfa_file', 'inputnode.meanfa_file'), ('outputnode.mergefa_file', 'inputnode.mergefa_file') ]) ]) # Define the outputnode outputnode = pe.Node(interface=util.IdentityInterface(fields=['groupmask', 'skeleton_file3', 'meanfa_file', 'mergefa_file', 'projectedfa_file', 'skeleton_file4', 'skeleton_mask', 'distance_map']), name='outputnode') outputall_node = pe.Node(interface=util.IdentityInterface( fields=['fa_list1', 'mask_list1', 'field_list2', 'groupmask3', 'skeleton_file3', 'meanfa_file3', 'mergefa_file3', 'projectedfa_file4', 'skeleton_mask4', 'distance_map4']), name='outputall_node') tbss_all.connect([ (tbss3, outputnode, [('outputnode.meanfa_file', 'meanfa_file'), ('outputnode.mergefa_file', 'mergefa_file'), ('outputnode.groupmask', 'groupmask'), ('outputnode.skeleton_file', 'skeleton_file3'), ]), (tbss4, outputnode, [('outputnode.projectedfa_file', 'projectedfa_file'), ('outputnode.skeleton_file', 'skeleton_file4'), ('outputnode.skeleton_mask', 'skeleton_mask'), ('outputnode.distance_map', 'distance_map'), ]), (tbss1, outputall_node, [('outputnode.fa_list', 'fa_list1'), ('outputnode.mask_list', 'mask_list1'), ]), (tbss2, outputall_node, [('outputnode.field_list', 'field_list2'), ]), (tbss3, outputall_node, [ ('outputnode.meanfa_file', 'meanfa_file3'), ('outputnode.mergefa_file', 'mergefa_file3'), ('outputnode.groupmask', 'groupmask3'), ('outputnode.skeleton_file', 'skeleton_file3'), ]), (tbss4, outputall_node, [ ('outputnode.projectedfa_file', 'projectedfa_file4'), ('outputnode.skeleton_mask', 'skeleton_mask4'), ('outputnode.distance_map', 'distance_map4'), ]), ]) return tbss_all def create_tbss_non_FA(name='tbss_non_FA'): """ A pipeline that implement tbss_non_FA in FSL Example ------- >>> from nipype.workflows.dmri.fsl import tbss >>> tbss_MD = tbss.create_tbss_non_FA() >>> tbss_MD.inputs.inputnode.file_list = [] >>> tbss_MD.inputs.inputnode.field_list = [] >>> tbss_MD.inputs.inputnode.skeleton_thresh = 0.2 >>> tbss_MD.inputs.inputnode.groupmask = './xxx' >>> tbss_MD.inputs.inputnode.meanfa_file = './xxx' >>> tbss_MD.inputs.inputnode.distance_map = [] Inputs:: inputnode.file_list inputnode.field_list inputnode.skeleton_thresh inputnode.groupmask inputnode.meanfa_file inputnode.distance_map Outputs:: outputnode.projected_nonFA_file """ # Define the inputnode inputnode = pe.Node(interface=util.IdentityInterface(fields=['file_list', 'field_list', 'skeleton_thresh', 'groupmask', 'meanfa_file', 'distance_map']), name='inputnode') # Apply the warpfield to the non FA image applywarp = pe.MapNode(interface=fsl.ApplyWarp(), iterfield=['in_file', 'field_file'], name="applywarp") if fsl.no_fsl(): warn('NO FSL found') else: applywarp.inputs.ref_file = fsl.Info.standard_image("FMRIB58_FA_1mm.nii.gz") # Merge the non FA files into a 4D file merge = pe.Node(fsl.Merge(dimension="t"), name="merge") #merged_file="all_FA.nii.gz" maskgroup = pe.Node(fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskgroup") projectfa = pe.Node(fsl.TractSkeleton(project_data=True, #projected_data = 'test.nii.gz', use_cingulum_mask=True ), name="projectfa") tbss_non_FA = pe.Workflow(name=name) tbss_non_FA.connect([ (inputnode, applywarp, [('file_list', 'in_file'), ('field_list', 'field_file'), ]), (applywarp, merge, [("out_file", "in_files")]), (merge, maskgroup, [("merged_file", "in_file")]), (inputnode, maskgroup, [('groupmask', 'in_file2')]), (maskgroup, projectfa, [('out_file', 'data_file')]), (inputnode, projectfa, [('skeleton_thresh', 'threshold'), ("meanfa_file", "in_file"), ("distance_map", "distance_map"), ]), ]) # Define the outputnode outputnode = pe.Node(interface=util.IdentityInterface( fields=['projected_nonFA_file']), name='outputnode') tbss_non_FA.connect([ (projectfa, outputnode, [('projected_data', 'projected_nonFA_file'), ]), ]) return tbss_non_FA nipype-0.9.2/nipype/workflows/dmri/fsl/tests/000077500000000000000000000000001227300005300212405ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/dmri/fsl/tests/__init__.py000066400000000000000000000001621227300005300233500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: nipype-0.9.2/nipype/workflows/dmri/fsl/tests/test_dti.py000066400000000000000000000074701227300005300234410ustar00rootroot00000000000000import os from nipype.testing import (skipif) import nipype.workflows.fmri.fsl as fsl_wf import nipype.interfaces.fsl as fsl import nipype.interfaces.utility as util from nipype.interfaces.fsl import no_fsl, no_fsl_course_data import nipype.pipeline.engine as pe import warnings import tempfile import shutil from nipype.workflows.dmri.fsl.dti import create_bedpostx_pipeline @skipif(no_fsl) @skipif(no_fsl_course_data) def test_create_bedpostx_pipeline(): fsl_course_dir = os.path.abspath('fsl_course_data') mask_file = os.path.join(fsl_course_dir, "fdt/subj1.bedpostX/nodif_brain_mask.nii.gz") bvecs_file = os.path.join(fsl_course_dir, "fdt/subj1/bvecs") bvals_file = os.path.join(fsl_course_dir, "fdt/subj1/bvals") dwi_file = os.path.join(fsl_course_dir, "fdt/subj1/data.nii.gz") nipype_bedpostx = create_bedpostx_pipeline("nipype_bedpostx") nipype_bedpostx.inputs.inputnode.dwi = dwi_file nipype_bedpostx.inputs.inputnode.mask = mask_file nipype_bedpostx.inputs.inputnode.bvecs = bvecs_file nipype_bedpostx.inputs.inputnode.bvals = bvals_file nipype_bedpostx.inputs.xfibres.n_fibres = 2 nipype_bedpostx.inputs.xfibres.fudge = 1 nipype_bedpostx.inputs.xfibres.burn_in = 1000 nipype_bedpostx.inputs.xfibres.n_jumps = 1250 nipype_bedpostx.inputs.xfibres.sample_every = 25 with warnings.catch_warnings(): warnings.simplefilter("ignore") original_bedpostx = pe.Node(interface=fsl.BEDPOSTX(), name="original_bedpostx") original_bedpostx.inputs.dwi = dwi_file original_bedpostx.inputs.mask = mask_file original_bedpostx.inputs.bvecs = bvecs_file original_bedpostx.inputs.bvals = bvals_file original_bedpostx.inputs.environ['FSLPARALLEL'] = "" original_bedpostx.inputs.fibres = 2 original_bedpostx.inputs.weight = 1 original_bedpostx.inputs.burn_period = 1000 original_bedpostx.inputs.jumps = 1250 original_bedpostx.inputs.sampling = 25 test_f1 = pe.Node(util.AssertEqual(), name="mean_f1_test") test_f2 = pe.Node(util.AssertEqual(), name="mean_f2_test") test_th1 = pe.Node(util.AssertEqual(), name="mean_th1_test") test_th2 = pe.Node(util.AssertEqual(), name="mean_th2_test") test_ph1 = pe.Node(util.AssertEqual(), name="mean_ph1_test") test_ph2 = pe.Node(util.AssertEqual(), name="mean_ph2_test") pipeline = pe.Workflow(name="test_bedpostx") pipeline.base_dir = tempfile.mkdtemp(prefix="nipype_test_bedpostx_") def pickFirst(l): return l[0] def pickSecond(l): return l[1] pipeline.connect([(nipype_bedpostx, test_f1, [(("outputnode.mean_fsamples", pickFirst), "volume1")]), (nipype_bedpostx, test_f2, [(("outputnode.mean_fsamples", pickSecond), "volume1")]), (nipype_bedpostx, test_th1, [(("outputnode.mean_thsamples", pickFirst), "volume1")]), (nipype_bedpostx, test_th2, [(("outputnode.mean_thsamples", pickSecond), "volume1")]), (nipype_bedpostx, test_ph1, [(("outputnode.mean_phsamples", pickFirst), "volume1")]), (nipype_bedpostx, test_ph2, [(("outputnode.mean_phsamples", pickSecond), "volume1")]), (original_bedpostx, test_f1, [(("mean_fsamples", pickFirst), "volume2")]), (original_bedpostx, test_f2, [(("mean_fsamples", pickSecond), "volume2")]), (original_bedpostx, test_th1, [(("mean_thsamples", pickFirst), "volume2")]), (original_bedpostx, test_th2, [(("mean_thsamples", pickSecond), "volume2")]), (original_bedpostx, test_ph1, [(("mean_phsamples", pickFirst), "volume2")]), (original_bedpostx, test_ph2, [(("mean_phsamples", pickSecond), "volume2")]) ]) pipeline.run(plugin='Linear') shutil.rmtree(pipeline.base_dir) nipype-0.9.2/nipype/workflows/dmri/fsl/tests/test_epi.py000066400000000000000000000027731227300005300234370ustar00rootroot00000000000000import os from nipype.testing import (skipif) import nipype.workflows.fmri.fsl as fsl_wf import nipype.interfaces.fsl as fsl import nipype.interfaces.utility as util from nipype.interfaces.fsl import no_fsl, no_fsl_course_data import nipype.pipeline.engine as pe import warnings import tempfile import shutil from nipype.workflows.dmri.fsl.epi import create_eddy_correct_pipeline @skipif(no_fsl) @skipif(no_fsl_course_data) def test_create_eddy_correct_pipeline(): fsl_course_dir = os.path.abspath('fsl_course_data') dwi_file = os.path.join(fsl_course_dir, "fdt/subj1/data.nii.gz") nipype_eddycorrect = create_eddy_correct_pipeline("nipype_eddycorrect") nipype_eddycorrect.inputs.inputnode.in_file = dwi_file nipype_eddycorrect.inputs.inputnode.ref_num = 0 with warnings.catch_warnings(): warnings.simplefilter("ignore") original_eddycorrect = pe.Node(interface=fsl.EddyCorrect(), name="original_eddycorrect") original_eddycorrect.inputs.in_file = dwi_file original_eddycorrect.inputs.ref_num = 0 test = pe.Node(util.AssertEqual(), name="eddy_corrected_dwi_test") pipeline = pe.Workflow(name="test_eddycorrect") pipeline.base_dir = tempfile.mkdtemp(prefix="nipype_test_eddycorrect_") pipeline.connect([(nipype_eddycorrect, test, [("outputnode.eddy_corrected", "volume1")]), (original_eddycorrect, test, [("eddy_corrected", "volume2")]), ]) pipeline.run(plugin='Linear') shutil.rmtree(pipeline.base_dir) nipype-0.9.2/nipype/workflows/dmri/fsl/tests/test_tbss.py000066400000000000000000000156171227300005300236360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from nipype.interfaces.fsl.base import no_fsl, no_fsl_course_data import nipype.pipeline.engine as pe import nipype.interfaces.utility as util from nipype.testing import skipif import tempfile import shutil from subprocess import call from nipype.workflows.dmri.fsl.tbss import create_tbss_all import nipype.interfaces.io as nio from nipype.interfaces import fsl def _tbss_test_helper(estimate_skeleton): fsl_course_dir = os.path.abspath('fsl_course_data') fsl.FSLCommand.set_default_output_type('NIFTI_GZ') test_dir = tempfile.mkdtemp(prefix="nipype_test_tbss_") tbss_orig_dir = os.path.join(test_dir, "tbss_all_original") os.mkdir(tbss_orig_dir) old_dir = os.getcwd() os.chdir(tbss_orig_dir) subjects = ['1260', '1549', '1636', '1651', '2078', '2378'] FA_list = [os.path.join(fsl_course_dir, 'tbss', subject_id + '.nii.gz') for subject_id in subjects] for f in FA_list: shutil.copy(f, os.getcwd()) call(['tbss_1_preproc'] + [subject_id + '.nii.gz' for subject_id in subjects], env=os.environ.update({'FSLOUTPUTTYPE': 'NIFTI_GZ'})) tbss1_orig_dir = os.path.join(test_dir, "tbss1_original") shutil.copytree(tbss_orig_dir, tbss1_orig_dir) call(['tbss_2_reg', '-T'], env=os.environ.update({'FSLOUTPUTTYPE': 'NIFTI_GZ'})) tbss2_orig_dir = os.path.join(test_dir, "tbss2_original") shutil.copytree(tbss_orig_dir, tbss2_orig_dir) if estimate_skeleton: call(['tbss_3_postreg', '-S'], env=os.environ.update({'FSLOUTPUTTYPE': 'NIFTI_GZ'})) else: call(['tbss_3_postreg', '-T'], env=os.environ.update({'FSLOUTPUTTYPE': 'NIFTI_GZ'})) tbss3_orig_dir = os.path.join(test_dir, "tbss3_original") shutil.copytree(tbss_orig_dir, tbss3_orig_dir) call(['tbss_4_prestats', '0.2'], env=os.environ.update({'FSLOUTPUTTYPE': 'NIFTI_GZ'})) tbss4_orig_dir = os.path.join(test_dir, "tbss4_original") shutil.copytree(tbss_orig_dir, tbss4_orig_dir) pipeline = pe.Workflow(name="test_tbss") pipeline.base_dir = os.path.join(test_dir, "tbss_nipype") tbss = create_tbss_all(estimate_skeleton=estimate_skeleton) tbss.inputs.inputnode.fa_list = FA_list tbss.inputs.inputnode.skeleton_thresh = 0.2 tbss1_original_datasource = pe.Node(nio.DataGrabber(outfields=['fa_list', 'mask_list']), name='tbss1_original_datasource') tbss1_original_datasource.inputs.base_directory = tbss1_orig_dir tbss1_original_datasource.inputs.template = 'FA/%s_FA%s.nii.gz' tbss1_original_datasource.inputs.template_args = dict(fa_list=[[subjects, '']], mask_list=[[subjects, '_mask']]) tbss1_test_fa = pe.MapNode(util.AssertEqual(), name="tbss1_fa_test", iterfield=['volume1', 'volume2']) tbss1_test_mask = pe.MapNode(util.AssertEqual(), name="tbss1_mask_test", iterfield=['volume1', 'volume2']) pipeline.connect(tbss, 'tbss1.outputnode.fa_list', tbss1_test_fa, 'volume1') pipeline.connect(tbss, 'tbss1.outputnode.mask_list', tbss1_test_mask, 'volume1') pipeline.connect(tbss1_original_datasource, 'fa_list', tbss1_test_fa, 'volume2') pipeline.connect(tbss1_original_datasource, 'mask_list', tbss1_test_mask, 'volume2') tbss2_original_datasource = pe.Node(nio.DataGrabber(outfields=['field_list']), name='tbss2_original_datasource') tbss2_original_datasource.inputs.base_directory = tbss2_orig_dir tbss2_original_datasource.inputs.template = 'FA/%s_FA%s.nii.gz' tbss2_original_datasource.inputs.template_args = dict(field_list=[[subjects, '_to_target_warp']]) tbss2_test_field = pe.MapNode(util.AssertEqual(), name="tbss2_test_field", iterfield=['volume1', 'volume2']) pipeline.connect(tbss, 'tbss2.outputnode.field_list', tbss2_test_field, 'volume1') pipeline.connect(tbss2_original_datasource, 'field_list', tbss2_test_field, 'volume2') tbss3_original_datasource = pe.Node(nio.DataGrabber(outfields=['groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file']), name='tbss3_original_datasource') tbss3_original_datasource.inputs.base_directory = tbss3_orig_dir tbss3_original_datasource.inputs.template = 'stats/%s.nii.gz' tbss3_original_datasource.inputs.template_args = dict(groupmask=[['mean_FA_mask']], skeleton_file=[['mean_FA_skeleton']], meanfa_file=[['mean_FA']], mergefa_file=[['all_FA']]) tbss3_test_groupmask = pe.Node(util.AssertEqual(), name="tbss3_test_groupmask") tbss3_test_skeleton_file = pe.Node(util.AssertEqual(), name="tbss3_test_skeleton_file") tbss3_test_meanfa_file = pe.Node(util.AssertEqual(), name="tbss3_test_meanfa_file") tbss3_test_mergefa_file = pe.Node(util.AssertEqual(), name="tbss3_test_mergefa_file") pipeline.connect(tbss, 'tbss3.outputnode.groupmask', tbss3_test_groupmask, 'volume1') pipeline.connect(tbss3_original_datasource, 'groupmask', tbss3_test_groupmask, 'volume2') pipeline.connect(tbss, 'tbss3.outputnode.skeleton_file', tbss3_test_skeleton_file, 'volume1') pipeline.connect(tbss3_original_datasource, 'skeleton_file', tbss3_test_skeleton_file, 'volume2') pipeline.connect(tbss, 'tbss3.outputnode.meanfa_file', tbss3_test_meanfa_file, 'volume1') pipeline.connect(tbss3_original_datasource, 'meanfa_file', tbss3_test_meanfa_file, 'volume2') pipeline.connect(tbss, 'tbss3.outputnode.mergefa_file', tbss3_test_mergefa_file, 'volume1') pipeline.connect(tbss3_original_datasource, 'mergefa_file', tbss3_test_mergefa_file, 'volume2') tbss4_original_datasource = pe.Node(nio.DataGrabber(outfields=['all_FA_skeletonised', 'mean_FA_skeleton_mask']), name='tbss4_original_datasource') tbss4_original_datasource.inputs.base_directory = tbss4_orig_dir tbss4_original_datasource.inputs.template = 'stats/%s.nii.gz' tbss4_original_datasource.inputs.template_args = dict(all_FA_skeletonised=[['all_FA_skeletonised']], mean_FA_skeleton_mask=[['mean_FA_skeleton_mask']]) tbss4_test_all_FA_skeletonised = pe.Node(util.AssertEqual(), name="tbss4_test_all_FA_skeletonised") tbss4_test_mean_FA_skeleton_mask = pe.Node(util.AssertEqual(), name="tbss4_test_mean_FA_skeleton_mask") pipeline.connect(tbss, 'tbss4.outputnode.projectedfa_file', tbss4_test_all_FA_skeletonised, 'volume1') pipeline.connect(tbss4_original_datasource, 'all_FA_skeletonised', tbss4_test_all_FA_skeletonised, 'volume2') pipeline.connect(tbss, 'tbss4.outputnode.skeleton_mask', tbss4_test_mean_FA_skeleton_mask, 'volume1') pipeline.connect(tbss4_original_datasource, 'mean_FA_skeleton_mask', tbss4_test_mean_FA_skeleton_mask, 'volume2') pipeline.run(plugin='Linear') os.chdir(old_dir) shutil.rmtree(test_dir) @skipif(no_fsl) @skipif(no_fsl_course_data) def test_tbss_est_skeleton(): _tbss_test_helper(True) @skipif(no_fsl) @skipif(no_fsl_course_data) def test_tbss_est_skeleton_use_precomputed_skeleton(): _tbss_test_helper(False) nipype-0.9.2/nipype/workflows/dmri/mrtrix/000077500000000000000000000000001227300005300206375ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/dmri/mrtrix/__init__.py000066400000000000000000000002631227300005300227510ustar00rootroot00000000000000from diffusion import create_mrtrix_dti_pipeline from connectivity_mapping import create_connectivity_pipeline from group_connectivity import (create_group_connectivity_pipeline) nipype-0.9.2/nipype/workflows/dmri/mrtrix/connectivity_mapping.py000066400000000000000000000702301227300005300254440ustar00rootroot00000000000000import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.fsl as fsl import nipype.interfaces.freesurfer as fs # freesurfer import nipype.interfaces.mrtrix as mrtrix import nipype.interfaces.cmtk as cmtk import nipype.interfaces.dipy as dipy import nipype.algorithms.misc as misc import inspect import os, os.path as op # system functions from ..fsl.epi import create_eddy_correct_pipeline from ..connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline from nipype.interfaces.utility import Function from ...misc.utils import select_aparc_annot def create_connectivity_pipeline(name="connectivity", parcellation_name='scale500'): """Creates a pipeline that does the same connectivity processing as in the :ref:`example_dmri_connectivity_advanced` example script. Given a subject id (and completed Freesurfer reconstruction) diffusion-weighted image, b-values, and b-vectors, the workflow will return the subject's connectome as a Connectome File Format (CFF) file for use in Connectome Viewer (http://www.cmtk.org). Example ------- >>> from nipype.workflows.dmri.mrtrix.connectivity_mapping import create_connectivity_pipeline >>> conmapper = create_connectivity_pipeline("nipype_conmap") >>> conmapper.inputs.inputnode.subjects_dir = '.' >>> conmapper.inputs.inputnode.subject_id = 'subj1' >>> conmapper.inputs.inputnode.dwi = 'data.nii.gz' >>> conmapper.inputs.inputnode.bvecs = 'bvecs' >>> conmapper.inputs.inputnode.bvals = 'bvals' >>> conmapper.run() # doctest: +SKIP Inputs:: inputnode.subject_id inputnode.subjects_dir inputnode.dwi inputnode.bvecs inputnode.bvals inputnode.resolution_network_file Outputs:: outputnode.connectome outputnode.cmatrix outputnode.networks outputnode.fa outputnode.struct outputnode.tracts outputnode.rois outputnode.odfs outputnode.filtered_tractography outputnode.tdi outputnode.nxstatscff outputnode.nxcsv outputnode.cmatrices_csv outputnode.mean_fiber_length outputnode.median_fiber_length outputnode.fiber_length_std """ inputnode_within = pe.Node(util.IdentityInterface(fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir", "resolution_network_file"]), name="inputnode_within") FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') FreeSurferSourceLH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceLH') FreeSurferSourceLH.inputs.hemi = 'lh' FreeSurferSourceRH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceRH') FreeSurferSourceRH.inputs.hemi = 'rh' """ Creating the workflow's nodes ============================= """ """ Conversion nodes ---------------- """ """ A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. Nodes are used to convert the following: * Original structural image to NIFTI * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI """ mri_convert_Brain = pe.Node(interface=fs.MRIConvert(), name='mri_convert_Brain') mri_convert_Brain.inputs.out_type = 'nii' mri_convert_ROI_scale500 = mri_convert_Brain.clone('mri_convert_ROI_scale500') mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') mris_convertLH.inputs.out_datatype = 'gii' mris_convertRH = mris_convertLH.clone('mris_convertRH') mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') """ Diffusion processing nodes -------------------------- .. seealso:: dmri_mrtrix_dti.py Tutorial that focuses solely on the MRtrix diffusion processing http://www.brain.org.au/software/mrtrix/index.html MRtrix's online documentation """ """ b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix. """ fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(),name='fsl2mrtrix') """ Distortions induced by eddy currents are corrected prior to fitting the tensors. The first image is used as a reference for which to warp the others. """ eddycorrect = create_eddy_correct_pipeline(name='eddycorrect') eddycorrect.inputs.inputnode.ref_num = 1 """ Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created: * Major eigenvector in each voxel * Apparent diffusion coefficient * Fractional anisotropy """ dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(),name='dwi2tensor') tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(),name='tensor2vector') tensor2adc = pe.Node(interface=mrtrix.Tensor2ApparentDiffusion(),name='tensor2adc') tensor2fa = pe.Node(interface=mrtrix.Tensor2FractionalAnisotropy(),name='tensor2fa') MRconvert_fa = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert_fa') MRconvert_fa.inputs.extension = 'nii' """ These nodes are used to create a rough brain mask from the b0 image. The b0 image is extracted from the original diffusion-weighted image, put through a simple thresholding routine, and smoothed using a 3x3 median filter. """ MRconvert = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert') MRconvert.inputs.extract_at_axis = 3 MRconvert.inputs.extract_at_coordinate = [0] threshold_b0 = pe.Node(interface=mrtrix.Threshold(),name='threshold_b0') median3d = pe.Node(interface=mrtrix.MedianFilter3D(),name='median3d') """ The brain mask is also used to help identify single-fiber voxels. This is done by passing the brain mask through two erosion steps, multiplying the remaining mask with the fractional anisotropy map, and thresholding the result to obtain some highly anisotropic within-brain voxels. """ erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_firstpass') erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_secondpass') MRmultiply = pe.Node(interface=mrtrix.MRMultiply(),name='MRmultiply') MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') threshold_FA = pe.Node(interface=mrtrix.Threshold(),name='threshold_FA') threshold_FA.inputs.absolute_threshold_value = 0.7 """ For whole-brain tracking we also require a broad white-matter seed mask. This is created by generating a white matter mask, given a brainmask, and thresholding it at a reasonably high level. """ bet = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0') gen_WM_mask = pe.Node(interface=mrtrix.GenerateWhiteMatterMask(),name='gen_WM_mask') threshold_wmmask = pe.Node(interface=mrtrix.Threshold(),name='threshold_wmmask') threshold_wmmask.inputs.absolute_threshold_value = 0.4 """ The spherical deconvolution step depends on the estimate of the response function in the highly anisotropic voxels we obtained above. .. warning:: For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps. """ estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),name='estimateresponse') estimateresponse.inputs.maximum_harmonic_order = 6 csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),name='csdeconv') csdeconv.inputs.maximum_harmonic_order = 6 """ Finally, we track probabilistically using the orientation distribution functions obtained earlier. The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format. """ probCSDstreamtrack = pe.Node(interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(),name='probCSDstreamtrack') probCSDstreamtrack.inputs.inputmodel = 'SD_PROB' probCSDstreamtrack.inputs.desired_number_of_tracks = 150000 tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(),name='tracks2prob') tracks2prob.inputs.colour = True MRconvert_tracks2prob = MRconvert_fa.clone(name='MRconvert_tracks2prob') tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(),name='tck2trk') trk2tdi = pe.Node(interface=dipy.TrackDensityMap(),name='trk2tdi') """ Structural segmentation nodes ----------------------------- """ """ The following node identifies the transformation between the diffusion-weighted image and the structural image. This transformation is then applied to the tracts so that they are in the same space as the regions of interest. """ coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister') coregister.inputs.cost = ('normmi') """ Parcellation is performed given the aparc+aseg image from Freesurfer. The CMTK Parcellation step subdivides these regions to return a higher-resolution parcellation scheme. The parcellation used here is entitled "scale500" and returns 1015 regions. """ parcellate = pe.Node(interface=cmtk.Parcellate(), name="Parcellate") parcellate.inputs.parcellation_name = parcellation_name """ The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts and outputs a number of different files. The most important of which is the connectivity network itself, which is stored as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the specific tracts that connect between user-selected regions. Here we choose the Lausanne2008 parcellation scheme, since we are incorporating the CMTK parcellation step. """ creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") creatematrix.inputs.count_region_intersections = True """ Next we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. The inspect.getfile command is used to package this script into the resulting CFF file, so that it is easy to look back at the processing parameters that were used. """ CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") CFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe())) giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") """ We also create a node to calculate several network metrics on our resulting file, and another CFF converter which will be used to package these networks into a single file. """ networkx = create_networkx_pipeline(name='networkx') cmats_to_csv = create_cmats_to_csv_pipeline(name='cmats_to_csv') nfibs_to_csv = pe.Node(interface=misc.Matlab2CSV(), name='nfibs_to_csv') merge_nfib_csvs = pe.Node(interface=misc.MergeCSVFiles(), name='merge_nfib_csvs') merge_nfib_csvs.inputs.extra_column_heading = 'Subject' merge_nfib_csvs.inputs.out_file = 'fibers.csv' NxStatsCFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="NxStatsCFFConverter") NxStatsCFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe())) """ Connecting the workflow ======================= Here we connect our processing pipeline. """ """ Connecting the inputs, FreeSurfer nodes, and conversions -------------------------------------------------------- """ mapping = pe.Workflow(name='mapping') """ First, we connect the input node to the FreeSurfer input nodes. """ mapping.connect([(inputnode_within, FreeSurferSource,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode_within, FreeSurferSource,[("subject_id","subject_id")])]) mapping.connect([(inputnode_within, FreeSurferSourceLH,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode_within, FreeSurferSourceLH,[("subject_id","subject_id")])]) mapping.connect([(inputnode_within, FreeSurferSourceRH,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode_within, FreeSurferSourceRH,[("subject_id","subject_id")])]) mapping.connect([(inputnode_within, parcellate,[("subjects_dir","subjects_dir")])]) mapping.connect([(inputnode_within, parcellate,[("subject_id","subject_id")])]) mapping.connect([(parcellate, mri_convert_ROI_scale500,[('roi_file','in_file')])]) """ Nifti conversion for subject's stripped brain image from Freesurfer: """ mapping.connect([(FreeSurferSource, mri_convert_Brain,[('brain','in_file')])]) """ Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) """ mapping.connect([(FreeSurferSourceLH, mris_convertLH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRH,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite,[('white','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated,[('inflated','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere,[('sphere','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere,[('sphere','in_file')])]) """ The annotation files are converted using the pial surface as a map via the MRIsConvert interface. One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files specifically (rather than e.g. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. """ mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels,[('pial','in_file')])]) mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [(('annot', select_aparc_annot), 'annot_file')])]) """ Diffusion Processing -------------------- Now we connect the tensor computations: """ mapping.connect([(inputnode_within, fsl2mrtrix, [("bvecs", "bvec_file"), ("bvals", "bval_file")])]) mapping.connect([(inputnode_within, eddycorrect,[("dwi","inputnode.in_file")])]) mapping.connect([(eddycorrect, dwi2tensor,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(fsl2mrtrix, dwi2tensor,[("encoding_file","encoding_file")])]) mapping.connect([(dwi2tensor, tensor2vector,[['tensor','in_file']]), (dwi2tensor, tensor2adc,[['tensor','in_file']]), (dwi2tensor, tensor2fa,[['tensor','in_file']]), ]) mapping.connect([(tensor2fa, MRmult_merge,[("FA","in1")])]) mapping.connect([(tensor2fa, MRconvert_fa,[("FA","in_file")])]) """ This block creates the rough brain mask to be multiplied, mulitplies it with the fractional anisotropy image, and thresholds it to get the single-fiber voxels. """ mapping.connect([(eddycorrect, MRconvert,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(MRconvert, threshold_b0,[("converted","in_file")])]) mapping.connect([(threshold_b0, median3d,[("out_file","in_file")])]) mapping.connect([(median3d, erode_mask_firstpass,[("out_file","in_file")])]) mapping.connect([(erode_mask_firstpass, erode_mask_secondpass,[("out_file","in_file")])]) mapping.connect([(erode_mask_secondpass, MRmult_merge,[("out_file","in2")])]) mapping.connect([(MRmult_merge, MRmultiply,[("out","in_files")])]) mapping.connect([(MRmultiply, threshold_FA,[("out_file","in_file")])]) """ Here the thresholded white matter mask is created for seeding the tractography. """ mapping.connect([(eddycorrect, bet,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(eddycorrect, gen_WM_mask,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(bet, gen_WM_mask,[("mask_file","binary_mask")])]) mapping.connect([(fsl2mrtrix, gen_WM_mask,[("encoding_file","encoding_file")])]) mapping.connect([(gen_WM_mask, threshold_wmmask,[("WMprobabilitymap","in_file")])]) """ Next we estimate the fiber response distribution. """ mapping.connect([(eddycorrect, estimateresponse,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(fsl2mrtrix, estimateresponse,[("encoding_file","encoding_file")])]) mapping.connect([(threshold_FA, estimateresponse,[("out_file","mask_image")])]) """ Run constrained spherical deconvolution. """ mapping.connect([(eddycorrect, csdeconv,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(gen_WM_mask, csdeconv,[("WMprobabilitymap","mask_image")])]) mapping.connect([(estimateresponse, csdeconv,[("response","response_file")])]) mapping.connect([(fsl2mrtrix, csdeconv,[("encoding_file","encoding_file")])]) """ Connect the tractography and compute the tract density image. """ mapping.connect([(threshold_wmmask, probCSDstreamtrack,[("out_file","seed_file")])]) mapping.connect([(csdeconv, probCSDstreamtrack,[("spherical_harmonics_image","in_file")])]) mapping.connect([(probCSDstreamtrack, tracks2prob,[("tracked","in_file")])]) mapping.connect([(eddycorrect, tracks2prob,[("outputnode.eddy_corrected","template_file")])]) mapping.connect([(tracks2prob, MRconvert_tracks2prob,[("tract_image","in_file")])]) """ Structural Processing --------------------- First, we coregister the diffusion image to the structural image """ mapping.connect([(eddycorrect, coregister,[("outputnode.eddy_corrected","in_file")])]) mapping.connect([(mri_convert_Brain, coregister,[('out_file','reference')])]) """ The MRtrix-tracked fibers are converted to TrackVis format (with voxel and data dimensions grabbed from the DWI). The connectivity matrix is created with the transformed .trk fibers and the parcellation file. """ mapping.connect([(eddycorrect, tck2trk,[("outputnode.eddy_corrected","image_file")])]) mapping.connect([(mri_convert_Brain, tck2trk,[("out_file","registration_image_file")])]) mapping.connect([(coregister, tck2trk,[("out_matrix_file","matrix_file")])]) mapping.connect([(probCSDstreamtrack, tck2trk,[("tracked","in_file")])]) mapping.connect([(tck2trk, creatematrix,[("out_file","tract_file")])]) mapping.connect([(tck2trk, trk2tdi,[("out_file","in_file")])]) mapping.connect(inputnode_within, 'resolution_network_file', creatematrix, 'resolution_network_file') mapping.connect([(inputnode_within, creatematrix,[("subject_id","out_matrix_file")])]) mapping.connect([(inputnode_within, creatematrix,[("subject_id","out_matrix_mat_file")])]) mapping.connect([(parcellate, creatematrix,[("roi_file","roi_file")])]) """ The merge nodes defined earlier are used here to create lists of the files which are destined for the CFFConverter. """ mapping.connect([(mris_convertLH, giftiSurfaces,[("converted","in1")])]) mapping.connect([(mris_convertRH, giftiSurfaces,[("converted","in2")])]) mapping.connect([(mris_convertLHwhite, giftiSurfaces,[("converted","in3")])]) mapping.connect([(mris_convertRHwhite, giftiSurfaces,[("converted","in4")])]) mapping.connect([(mris_convertLHinflated, giftiSurfaces,[("converted","in5")])]) mapping.connect([(mris_convertRHinflated, giftiSurfaces,[("converted","in6")])]) mapping.connect([(mris_convertLHsphere, giftiSurfaces,[("converted","in7")])]) mapping.connect([(mris_convertRHsphere, giftiSurfaces,[("converted","in8")])]) mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])]) mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])]) mapping.connect([(parcellate, niftiVolumes,[("roi_file","in1")])]) mapping.connect([(eddycorrect, niftiVolumes,[("outputnode.eddy_corrected","in2")])]) mapping.connect([(mri_convert_Brain, niftiVolumes,[("out_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file","in1")])]) mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file_mm","in2")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_length_file","in3")])]) mapping.connect([(creatematrix, fiberDataArrays,[("fiber_label_file","in4")])]) """ This block actually connects the merged lists to the CFF converter. We pass the surfaces and volumes that are to be included, as well as the tracts and the network itself. The currently running pipeline (dmri_connectivity_advanced.py) is also scraped and included in the CFF file. This makes it easy for the user to examine the entire processing pathway used to generate the end product. """ mapping.connect([(giftiSurfaces, CFFConverter,[("out","gifti_surfaces")])]) mapping.connect([(giftiLabels, CFFConverter,[("out","gifti_labels")])]) mapping.connect([(creatematrix, CFFConverter,[("matrix_files","gpickled_networks")])]) mapping.connect([(niftiVolumes, CFFConverter,[("out","nifti_volumes")])]) mapping.connect([(fiberDataArrays, CFFConverter,[("out","data_files")])]) mapping.connect([(creatematrix, CFFConverter,[("filtered_tractography","tract_files")])]) mapping.connect([(inputnode_within, CFFConverter,[("subject_id","title")])]) """ The graph theoretical metrics which have been generated are placed into another CFF file. """ mapping.connect([(inputnode_within, networkx,[("subject_id","inputnode.extra_field")])]) mapping.connect([(creatematrix, networkx,[("intersection_matrix_file","inputnode.network_file")])]) mapping.connect([(networkx, NxStatsCFFConverter,[("outputnode.network_files","gpickled_networks")])]) mapping.connect([(giftiSurfaces, NxStatsCFFConverter,[("out","gifti_surfaces")])]) mapping.connect([(giftiLabels, NxStatsCFFConverter,[("out","gifti_labels")])]) mapping.connect([(niftiVolumes, NxStatsCFFConverter,[("out","nifti_volumes")])]) mapping.connect([(fiberDataArrays, NxStatsCFFConverter,[("out","data_files")])]) mapping.connect([(inputnode_within, NxStatsCFFConverter,[("subject_id","title")])]) mapping.connect([(inputnode_within, cmats_to_csv,[("subject_id","inputnode.extra_field")])]) mapping.connect([(creatematrix, cmats_to_csv,[("matlab_matrix_files","inputnode.matlab_matrix_files")])]) mapping.connect([(creatematrix, nfibs_to_csv,[("stats_file","in_file")])]) mapping.connect([(nfibs_to_csv, merge_nfib_csvs,[("csv_files","in_files")])]) mapping.connect([(inputnode_within, merge_nfib_csvs,[("subject_id","extra_field")])]) """ Create a higher-level workflow -------------------------------------- Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding their names to the subject list and their data to the proper folders. """ inputnode = pe.Node(interface=util.IdentityInterface(fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir"]), name="inputnode") outputnode = pe.Node(interface = util.IdentityInterface(fields=["fa", "struct", "tracts", "tracks2prob", "connectome", "nxstatscff", "nxmatlab", "nxcsv", "fiber_csv", "cmatrices_csv", "nxmergedcsv", "cmatrix", "networks", "filtered_tracts", "rois", "odfs", "tdi", "mean_fiber_length", "median_fiber_length", "fiber_length_std"]), name="outputnode") connectivity = pe.Workflow(name="connectivity") connectivity.base_output_dir=name connectivity.base_dir=name connectivity.connect([(inputnode, mapping, [("dwi", "inputnode_within.dwi"), ("bvals", "inputnode_within.bvals"), ("bvecs", "inputnode_within.bvecs"), ("subject_id", "inputnode_within.subject_id"), ("subjects_dir", "inputnode_within.subjects_dir")]) ]) connectivity.connect([(mapping, outputnode, [("tck2trk.out_file", "tracts"), ("CFFConverter.connectome_file", "connectome"), ("NxStatsCFFConverter.connectome_file", "nxstatscff"), ("CreateMatrix.matrix_mat_file", "cmatrix"), ("CreateMatrix.mean_fiber_length_matrix_mat_file", "mean_fiber_length"), ("CreateMatrix.median_fiber_length_matrix_mat_file", "median_fiber_length"), ("CreateMatrix.fiber_length_std_matrix_mat_file", "fiber_length_std"), ("CreateMatrix.matrix_files", "networks"), ("CreateMatrix.filtered_tractographies", "filtered_tracts"), ("merge_nfib_csvs.csv_file", "fiber_csv"), ("mri_convert_ROI_scale500.out_file", "rois"), ("trk2tdi.out_file", "tdi"), ("csdeconv.spherical_harmonics_image", "odfs"), ("mri_convert_Brain.out_file", "struct"), ("MRconvert_fa.converted", "fa"), ("MRconvert_tracks2prob.converted", "tracks2prob")]) ]) connectivity.connect([(cmats_to_csv, outputnode,[("outputnode.csv_file","cmatrices_csv")])]) connectivity.connect([(networkx, outputnode,[("outputnode.csv_files","nxcsv")])]) return connectivity nipype-0.9.2/nipype/workflows/dmri/mrtrix/diffusion.py000066400000000000000000000166401227300005300232060ustar00rootroot00000000000000import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.fsl as fsl import nipype.interfaces.mrtrix as mrtrix def create_mrtrix_dti_pipeline(name="dtiproc", tractography_type = 'probabilistic'): """Creates a pipeline that does the same diffusion processing as in the :doc:`../../users/examples/dmri_mrtrix_dti` example script. Given a diffusion-weighted image, b-values, and b-vectors, the workflow will return the tractography computed from spherical deconvolution and probabilistic streamline tractography Example ------- >>> dti = create_mrtrix_dti_pipeline("mrtrix_dti") >>> dti.inputs.inputnode.dwi = 'data.nii' >>> dti.inputs.inputnode.bvals = 'bvals' >>> dti.inputs.inputnode.bvecs = 'bvecs' >>> dti.run() # doctest: +SKIP Inputs:: inputnode.dwi inputnode.bvecs inputnode.bvals Outputs:: outputnode.fa outputnode.tdi outputnode.tracts_tck outputnode.tracts_trk outputnode.csdeconv """ inputnode = pe.Node(interface = util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode") bet = pe.Node(interface=fsl.BET(), name="bet") bet.inputs.mask = True fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(),name='fsl2mrtrix') fsl2mrtrix.inputs.invert_y = True dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(),name='dwi2tensor') tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(), name='tensor2vector') tensor2adc = pe.Node(interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') tensor2fa = pe.Node(interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(), name='erode_mask_firstpass') erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(), name='erode_mask_secondpass') threshold_b0 = pe.Node(interface=mrtrix.Threshold(),name='threshold_b0') threshold_FA = pe.Node(interface=mrtrix.Threshold(),name='threshold_FA') threshold_FA.inputs.absolute_threshold_value = 0.7 threshold_wmmask = pe.Node(interface=mrtrix.Threshold(), name='threshold_wmmask') threshold_wmmask.inputs.absolute_threshold_value = 0.4 MRmultiply = pe.Node(interface=mrtrix.MRMultiply(),name='MRmultiply') MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') median3d = pe.Node(interface=mrtrix.MedianFilter3D(),name='median3D') MRconvert = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert') MRconvert.inputs.extract_at_axis = 3 MRconvert.inputs.extract_at_coordinate = [0] csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') gen_WM_mask = pe.Node(interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') if tractography_type == 'probabilistic': CSDstreamtrack = pe.Node(interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(), name='CSDstreamtrack') else: CSDstreamtrack = pe.Node(interface=mrtrix.SphericallyDeconvolutedStreamlineTrack(), name='CSDstreamtrack') CSDstreamtrack.inputs.desired_number_of_tracks = 15000 tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(),name='tracks2prob') tracks2prob.inputs.colour = True tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(),name='tck2trk') workflow = pe.Workflow(name=name) workflow.base_output_dir=name workflow.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), ("bvals", "bval_file")])]) workflow.connect([(inputnode, dwi2tensor,[("dwi","in_file")])]) workflow.connect([(fsl2mrtrix, dwi2tensor,[("encoding_file","encoding_file")])]) workflow.connect([(dwi2tensor, tensor2vector,[['tensor','in_file']]), (dwi2tensor, tensor2adc,[['tensor','in_file']]), (dwi2tensor, tensor2fa,[['tensor','in_file']]), ]) workflow.connect([(inputnode, MRconvert,[("dwi","in_file")])]) workflow.connect([(MRconvert, threshold_b0,[("converted","in_file")])]) workflow.connect([(threshold_b0, median3d,[("out_file","in_file")])]) workflow.connect([(median3d, erode_mask_firstpass,[("out_file","in_file")])]) workflow.connect([(erode_mask_firstpass, erode_mask_secondpass,[("out_file","in_file")])]) workflow.connect([(tensor2fa, MRmult_merge,[("FA","in1")])]) workflow.connect([(erode_mask_secondpass, MRmult_merge,[("out_file","in2")])]) workflow.connect([(MRmult_merge, MRmultiply,[("out","in_files")])]) workflow.connect([(MRmultiply, threshold_FA,[("out_file","in_file")])]) workflow.connect([(threshold_FA, estimateresponse,[("out_file","mask_image")])]) workflow.connect([(inputnode, bet,[("dwi","in_file")])]) workflow.connect([(inputnode, gen_WM_mask,[("dwi","in_file")])]) workflow.connect([(bet, gen_WM_mask,[("mask_file","binary_mask")])]) workflow.connect([(fsl2mrtrix, gen_WM_mask,[("encoding_file","encoding_file")])]) workflow.connect([(inputnode, estimateresponse,[("dwi","in_file")])]) workflow.connect([(fsl2mrtrix, estimateresponse,[("encoding_file","encoding_file")])]) workflow.connect([(inputnode, csdeconv,[("dwi","in_file")])]) workflow.connect([(gen_WM_mask, csdeconv,[("WMprobabilitymap","mask_image")])]) workflow.connect([(estimateresponse, csdeconv,[("response","response_file")])]) workflow.connect([(fsl2mrtrix, csdeconv,[("encoding_file","encoding_file")])]) workflow.connect([(gen_WM_mask, threshold_wmmask,[("WMprobabilitymap","in_file")])]) workflow.connect([(threshold_wmmask, CSDstreamtrack,[("out_file","seed_file")])]) workflow.connect([(csdeconv, CSDstreamtrack,[("spherical_harmonics_image","in_file")])]) if tractography_type == 'probabilistic': workflow.connect([(CSDstreamtrack, tracks2prob,[("tracked","in_file")])]) workflow.connect([(inputnode, tracks2prob,[("dwi","template_file")])]) workflow.connect([(CSDstreamtrack, tck2trk,[("tracked","in_file")])]) workflow.connect([(inputnode, tck2trk,[("dwi","image_file")])]) output_fields = ["fa", "tracts_trk", "csdeconv", "tracts_tck"] if tractography_type == 'probabilistic': output_fields.append("tdi") outputnode = pe.Node(interface = util.IdentityInterface(fields=output_fields), name="outputnode") workflow.connect([(CSDstreamtrack, outputnode, [("tracked", "tracts_tck")]), (csdeconv, outputnode, [("spherical_harmonics_image", "csdeconv")]), (tensor2fa, outputnode, [("FA", "fa")]), (tck2trk, outputnode, [("out_file", "tracts_trk")]) ]) if tractography_type == 'probabilistic': workflow.connect([(tracks2prob, outputnode, [("tract_image", "tdi")])]) return workflow nipype-0.9.2/nipype/workflows/dmri/mrtrix/group_connectivity.py000066400000000000000000000146431227300005300251530ustar00rootroot00000000000000import os.path as op import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.utility as util # utility import nipype.interfaces.cmtk as cmtk import nipype.algorithms.misc as misc import nipype.pipeline.engine as pe # pypeline engine from .connectivity_mapping import create_connectivity_pipeline from nipype.utils.misc import package_check import warnings try: package_check('cmp') except Exception, e: warnings.warn('cmp not installed') else: import cmp def create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args_dict=0): """Creates a pipeline that performs MRtrix structural connectivity processing on groups of subjects. Given a diffusion-weighted image, and text files containing the associated b-values and b-vectors, the workflow will return each subjects' connectomes in a Connectome File Format (CFF) file, for use in Connectome Viewer (http://www.cmtk.org). Example ------- >>> import nipype.interfaces.freesurfer as fs >>> import nipype.workflows.dmri.mrtrix.group_connectivity as groupwork >>> import cmp # doctest: +SKIP >>> from nipype.testing import example_data >>> subjects_dir = '.' >>> data_dir = '.' >>> output_dir = '.' >>> fs.FSCommand.set_default_subjects_dir(subjects_dir) >>> group_list = {} >>> group_list['group1'] = ['subj1', 'subj2'] >>> group_list['group2'] = ['subj3', 'subj4'] >>> template_args = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) >>> group_id = 'group1' >>> l1pipeline = groupwork.create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args) >>> parcellation_name = 'scale500' >>> l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name >>> cmp_config = cmp.configuration.PipelineConfiguration() # doctest: +SKIP >>> cmp_config.parcellation_scheme = "Lausanne2008" # doctest: +SKIP >>> l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml'] # doctest: +SKIP >>> l1pipeline.run() # doctest: +SKIP Inputs:: group_list: Dictionary of subject lists, keyed by group name group_id: String containing the group name data_dir: Path to the data directory subjects_dir: Path to the Freesurfer 'subjects' directory output_dir: Path for the output files template_args_dict: Dictionary of template arguments for the connectivity pipeline datasource e.g. info = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) """ group_infosource = pe.Node(interface=util.IdentityInterface(fields=['group_id']), name="group_infosource") group_infosource.inputs.group_id = group_id subject_list = group_list[group_id] subj_infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="subj_infosource") subj_infosource.iterables = ('subject_id', subject_list) if template_args_dict == 0: info = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id','bvecs']], bvals=[['subject_id','bvals']]) else: info = template_args_dict datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=info.keys()), name = 'datasource') datasource.inputs.template = "%s/%s" datasource.inputs.base_directory = data_dir datasource.inputs.field_template = dict(dwi='%s/%s.nii') datasource.inputs.template_args = info datasource.inputs.sort_filelist = True """ Create a connectivity mapping workflow """ conmapper = create_connectivity_pipeline("nipype_conmap") conmapper.inputs.inputnode.subjects_dir = subjects_dir conmapper.base_dir = op.abspath('conmapper') datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = output_dir datasink.inputs.container = group_id l1pipeline = pe.Workflow(name="l1pipeline_"+group_id) l1pipeline.base_dir = output_dir l1pipeline.base_output_dir = group_id l1pipeline.connect([(subj_infosource, conmapper,[('subject_id', 'inputnode.subject_id')])]) l1pipeline.connect([(subj_infosource, datasource,[('subject_id', 'subject_id')])]) l1pipeline.connect([(datasource, conmapper, [("dwi", "inputnode.dwi"), ("bvals", "inputnode.bvals"), ("bvecs", "inputnode.bvecs"), ])]) l1pipeline.connect([(conmapper, datasink, [("outputnode.connectome", "@l1output.cff"), ("outputnode.nxstatscff", "@l1output.nxstatscff"), ("outputnode.nxmatlab", "@l1output.nxmatlab"), ("outputnode.nxcsv", "@l1output.nxcsv"), ("outputnode.fiber_csv", "@l1output.fiber_csv"), ("outputnode.cmatrices_csv", "@l1output.cmatrices_csv"), ("outputnode.fa", "@l1output.fa"), ("outputnode.filtered_tracts", "@l1output.filtered_tracts"), ("outputnode.cmatrix", "@l1output.cmatrix"), ("outputnode.rois", "@l1output.rois"), ("outputnode.odfs", "@l1output.odfs"), ("outputnode.struct", "@l1output.struct"), ("outputnode.networks", "@l1output.networks"), ("outputnode.mean_fiber_length", "@l1output.mean_fiber_length"), ("outputnode.fiber_length_std", "@l1output.fiber_length_std"), ])]) l1pipeline.connect([(group_infosource, datasink,[('group_id','@group_id')])]) return l1pipeline nipype-0.9.2/nipype/workflows/dmri/mrtrix/setup.py000066400000000000000000000006511227300005300223530ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('mrtrix', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/dmri/setup.py000066400000000000000000000010731227300005300210250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('dmri', parent_package, top_path) config.add_subpackage('camino') config.add_subpackage('mrtrix') config.add_subpackage('fsl') config.add_subpackage('connectivity') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/fmri/000077500000000000000000000000001227300005300173145ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/fmri/__init__.py000066400000000000000000000000271227300005300214240ustar00rootroot00000000000000from . import fsl, spm nipype-0.9.2/nipype/workflows/fmri/fsl/000077500000000000000000000000001227300005300201005ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/fmri/fsl/__init__.py000066400000000000000000000005351227300005300222140ustar00rootroot00000000000000from .preprocess import (create_susan_smooth, create_fsl_fs_preproc, create_parallelfeat_preproc, create_featreg_preproc, create_reg_workflow) from .estimate import create_modelfit_workflow, create_fixed_effects_flow #backwards compatibility from ...rsfmri.fsl.resting import create_resting_preprocnipype-0.9.2/nipype/workflows/fmri/fsl/estimate.py000066400000000000000000000246321227300005300222740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine def create_modelfit_workflow(name='modelfit'): """Create an FSL individual modelfitting workflow Example ------- >>> modelfit = create_modelfit_workflow() >>> modelfit.base_dir = '.' >>> info = dict() >>> modelfit.inputs.inputspec.session_info = info >>> modelfit.inputs.inputspec.interscan_interval = 3. >>> modelfit.inputs.inputspec.film_threshold = 1000 >>> modelfit.run() #doctest: +SKIP Inputs:: inputspec.session_info : info generated by modelgen.SpecifyModel inputspec.interscan_interval : interscan interval inputspec.contrasts : list of contrasts inputspec.film_threshold : image threshold for FILM estimation Outputs:: outputspec.realignment_parameters : realignment parameter files outputspec.smoothed_files : smoothed functional files outputspec.outlier_files : list of outliers outputspec.outlier_stats : statistics of outliers outputspec.outlier_plots : images of outliers outputspec.mask_file : binary mask file in reference image space outputspec.reg_file : registration file that maps reference image to freesurfer space outputspec.reg_cost : cost of registration (useful for detecting misalignment) """ modelfit = pe.Workflow(name=name) """ Create the nodes """ inputspec = pe.Node(util.IdentityInterface(fields=['session_info', 'interscan_interval', 'contrasts', 'film_threshold', 'functional_data', 'bases', 'model_serial_correlations']), name='inputspec') level1design = pe.Node(interface=fsl.Level1Design(), name="level1design") modelgen = pe.MapNode(interface=fsl.FEATModel(), name='modelgen', iterfield=['fsf_file', 'ev_files']) modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5), name='modelestimate', iterfield=['design_file', 'in_file']) conestimate = pe.MapNode(interface=fsl.ContrastMgr(), name='conestimate', iterfield=['tcon_file', 'param_estimates', 'sigmasquareds', 'corrections', 'dof_file']) ztopval = pe.MapNode(interface=fsl.ImageMaths(op_string='-ztop', suffix='_pval'), name='ztop', iterfield=['in_file']) outputspec = pe.Node(util.IdentityInterface(fields=['copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates']), name='outputspec') """ Utility function """ pop_lambda = lambda x: x[0] """ Setup the connections """ modelfit.connect([ (inputspec, level1design, [('interscan_interval', 'interscan_interval'), ('session_info', 'session_info'), ('contrasts', 'contrasts'), ('bases', 'bases'), ('model_serial_correlations', 'model_serial_correlations')]), (inputspec, modelestimate, [('film_threshold', 'threshold'), ('functional_data', 'in_file')]), (level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files', 'ev_files')]), (modelgen, modelestimate, [('design_file', 'design_file')]), (modelgen, conestimate, [('con_file', 'tcon_file')]), (modelestimate, conestimate, [('param_estimates', 'param_estimates'), ('sigmasquareds', 'sigmasquareds'), ('corrections', 'corrections'), ('dof_file', 'dof_file')]), (conestimate, ztopval, [(('zstats', pop_lambda), 'in_file')]), (ztopval, outputspec, [('out_file', 'pfiles')]), (modelestimate, outputspec, [('param_estimates', 'parameter_estimates'), ('dof_file', 'dof_file')]), (conestimate, outputspec, [('copes', 'copes'), ('varcopes', 'varcopes')]), ]) return modelfit def create_overlay_workflow(name='overlay'): """Setup overlay workflow """ overlay = pe.Workflow(name='overlay') overlaystats = pe.MapNode(interface=fsl.Overlay(), name="overlaystats", iterfield=['stat_image']) overlaystats.inputs.show_negative_stats = True overlaystats.inputs.auto_thresh_bg = True slicestats = pe.MapNode(interface=fsl.Slicer(), name="slicestats", iterfield=['in_file']) slicestats.inputs.all_axial = True slicestats.inputs.image_width = 512 overlay.connect(overlaystats, 'out_file', slicestats, 'in_file') return overlay def create_fixed_effects_flow(name='fixedfx'): """Create a fixed-effects workflow This workflow is used to combine registered copes and varcopes across runs for an individual subject Example ------- >>> fixedfx = create_fixed_effects_flow() >>> fixedfx.base_dir = '.' >>> fixedfx.inputs.inputspec.copes = [['cope1run1.nii.gz', 'cope1run2.nii.gz'], ['cope2run1.nii.gz', 'cope2run2.nii.gz']] # per contrast >>> fixedfx.inputs.inputspec.varcopes = [['varcope1run1.nii.gz', 'varcope1run2.nii.gz'], ['varcope2run1.nii.gz', 'varcope2run2.nii.gz']] # per contrast >>> fixedfx.inputs.inputspec.dof_files = ['dofrun1', 'dofrun2'] # per run >>> fixedfx.run() #doctest: +SKIP Inputs:: inputspec.copes : list of list of cope files (one list per contrast) inputspec.varcopes : list of list of varcope files (one list per contrast) inputspec.dof_files : degrees of freedom files for each run Outputs:: outputspec.res4d : 4d residual time series outputspec.copes : contrast parameter estimates outputspec.varcopes : variance of contrast parameter estimates outputspec.zstats : z statistics of contrasts outputspec.tstats : t statistics of contrasts """ fixed_fx = pe.Workflow(name=name) inputspec = pe.Node(util.IdentityInterface(fields=['copes', 'varcopes', 'dof_files' ]), name='inputspec') """ Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and varcopes for each condition """ copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'), iterfield=['in_files'], name="copemerge") varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'), iterfield=['in_files'], name="varcopemerge") """ Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition specific level 2 model design files """ level2model = pe.Node(interface=fsl.L2Model(), name='l2model') """ Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model """ flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo", iterfield=['cope_file', 'var_cope_file']) def get_dofvolumes(dof_files, cope_files): import os import nibabel as nb import numpy as np img = nb.load(cope_files[0]) out_data = np.zeros(img.get_shape()) for i in range(out_data.shape[-1]): dof = np.loadtxt(dof_files[i]) out_data[:, :, :, i] = dof filename = os.path.join(os.getcwd(), 'dof_file.nii.gz') newimg = nb.Nifti1Image(out_data, None, img.get_header()) newimg.to_filename(filename) return filename gendof = pe.Node(util.Function(input_names=['dof_files', 'cope_files'], output_names=['dof_volume'], function=get_dofvolumes), name='gendofvolume') outputspec = pe.Node(util.IdentityInterface(fields=['res4d', 'copes', 'varcopes', 'zstats', 'tstats']), name='outputspec') fixed_fx.connect([(inputspec, copemerge, [('copes', 'in_files')]), (inputspec, varcopemerge, [('varcopes', 'in_files')]), (inputspec, gendof, [('dof_files', 'dof_files')]), (copemerge, gendof, [('merged_file', 'cope_files')]), (copemerge, flameo, [('merged_file', 'cope_file')]), (varcopemerge, flameo, [('merged_file', 'var_cope_file')]), (level2model, flameo, [('design_mat', 'design_file'), ('design_con', 't_con_file'), ('design_grp', 'cov_split_file')]), (gendof, flameo, [('dof_volume', 'dof_var_cope_file')]), (flameo, outputspec, [('res4d', 'res4d'), ('copes', 'copes'), ('var_copes', 'varcopes'), ('zstats', 'zstats'), ('tstats', 'tstats') ]) ]) return fixed_fx nipype-0.9.2/nipype/workflows/fmri/fsl/preprocess.py000066400000000000000000001372741227300005300226550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import nipype.interfaces.fsl as fsl # fsl import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine import nipype.interfaces.freesurfer as fs # freesurfer import nipype.interfaces.spm as spm from ...smri.freesurfer.utils import create_getmask_flow def getthreshop(thresh): return ['-thr %.10f -Tmin -bin'%(0.1*val[1]) for val in thresh] def pickfirst(files): if isinstance(files, list): return files[0] else: return files def pickmiddle(files): from nibabel import load import numpy as np middlevol = [] for f in files: middlevol.append(int(np.ceil(load(f).get_shape()[3]/2))) return middlevol def pickvol(filenames, fileidx, which): from nibabel import load import numpy as np if which.lower() == 'first': idx = 0 elif which.lower() == 'middle': idx = int(np.ceil(load(filenames[fileidx]).get_shape()[3]/2)) else: raise Exception('unknown value for volume selection : %s'%which) return idx def getbtthresh(medianvals): return [0.75*val for val in medianvals] def chooseindex(fwhm): if fwhm<1: return [0] else: return [1] def getmeanscale(medianvals): return ['-mul %.10f'%(10000./val) for val in medianvals] def getusans(x): return [[tuple([val[0],0.75*val[1]])] for val in x] tolist = lambda x: [x] highpass_operand = lambda x:'-bptf %.10f -1'%x def create_parallelfeat_preproc(name='featpreproc', highpass=True): """Preprocess each run with FSL independently of the others Parameters ---------- :: name : name of workflow (default: featpreproc) highpass : boolean (default: True) Inputs:: inputspec.func : functional runs (filename or list of filenames) inputspec.fwhm : fwhm for smoothing with SUSAN inputspec.highpass : HWHM in TRs (if created with highpass=True) Outputs:: outputspec.reference : volume to which runs are realigned outputspec.motion_parameters : motion correction parameters outputspec.realigned_files : motion corrected files outputspec.motion_plots : plots of motion correction parameters outputspec.mask : mask file used to mask the brain outputspec.smoothed_files : smoothed functional data outputspec.highpassed_files : highpassed functional data (if highpass=True) outputspec.mean : mean file Example ------- >>> preproc = create_parallelfeat_preproc() >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] >>> preproc.inputs.inputspec.fwhm = 5 >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) >>> preproc.base_dir = '/tmp' >>> preproc.run() # doctest: +SKIP >>> preproc = create_parallelfeat_preproc(highpass=False) >>> preproc.inputs.inputspec.func = 'f3.nii' >>> preproc.inputs.inputspec.fwhm = 5 >>> preproc.base_dir = '/tmp' >>> preproc.run() # doctest: +SKIP """ featpreproc = pe.Workflow(name=name) """ Set up a node to define all inputs required for the preprocessing workflow """ if highpass: inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'fwhm', 'highpass']), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference', 'motion_parameters', 'realigned_files', 'motion_plots', 'mask', 'smoothed_files', 'highpassed_files', 'mean']), name='outputspec') else: inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'fwhm']), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference', 'motion_parameters', 'realigned_files', 'motion_plots', 'mask', 'smoothed_files', 'mean']), name='outputspec') """ Set up a node to define outputs for the preprocessing workflow """ """ Convert functional images to float representation. Since there can be more than one functional run we use a MapNode to convert each run. """ img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float', op_string = '', suffix='_dtype'), iterfield=['in_file'], name='img2float') featpreproc.connect(inputnode, 'func', img2float, 'in_file') """ Extract the first volume of the first run as the reference """ extract_ref = pe.MapNode(interface=fsl.ExtractROI(t_size=1), iterfield=['in_file', 't_min'], name = 'extractref') featpreproc.connect(img2float, 'out_file', extract_ref, 'in_file') featpreproc.connect(img2float, ('out_file', pickmiddle), extract_ref, 't_min') featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') """ Realign the functional runs to the reference (1st volume of first run) """ motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True, save_plots = True), name='realign', iterfield = ['in_file', 'ref_file']) featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters') featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files') """ Plot the estimated motion parameters """ plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'), name='plot_motion', iterfield=['in_file']) plot_motion.iterables = ('plot_type', ['rotations', 'translations']) featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') """ Extract the mean volume of the first functional run """ meanfunc = pe.MapNode(interface=fsl.ImageMaths(op_string = '-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc') featpreproc.connect(motion_correct, 'out_file', meanfunc, 'in_file') """ Strip the skull from the mean functional to generate a mask """ meanfuncmask = pe.MapNode(interface=fsl.BET(mask = True, no_output=True, frac = 0.3), iterfield=['in_file'], name = 'meanfuncmask') featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') """ Mask the functional runs with the extracted mask """ maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), iterfield=['in_file', 'in_file2'], name = 'maskfunc') featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') """ Determine the 2nd and 98th percentile intensities of each functional run """ getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'), iterfield = ['in_file'], name='getthreshold') featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file') """ Threshold the first run of the functional data at 10% of the 98th percentile """ threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), iterfield=['in_file', 'op_string'], name='threshold') featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file') """ Define a function to get 10% of the intensity """ featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string') """ Determine the median value of the functional runs using the mask """ medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'), iterfield = ['in_file', 'mask_file'], name='medianval') featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') featpreproc.connect(threshold, 'out_file', medianval, 'mask_file') """ Dilate the mask """ dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), iterfield=['in_file'], name='dilatemask') featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file') featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask') """ Mask the motion corrected functional runs with the dilated mask """ maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file', 'in_file2'], name='maskfunc2') featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') """ Smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask consituting the mean functional """ smooth = create_susan_smooth() featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files') featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file') """ Mask the smoothed data with the dilated mask """ maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file', 'in_file2'], name='maskfunc3') featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file') featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') concatnode = pe.Node(interface=util.Merge(2), name='concat') featpreproc.connect(maskfunc2,('out_file', tolist), concatnode, 'in1') featpreproc.connect(maskfunc3,('out_file', tolist), concatnode, 'in2') """ The following nodes select smooth or unsmoothed data depending on the fwhm. This is because SUSAN defaults to smoothing the data with about the voxel size of the input data if the fwhm parameter is less than 1/3 of the voxel size. """ selectnode = pe.Node(interface=util.Select(),name='select') featpreproc.connect(concatnode, 'out', selectnode, 'inlist') featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') """ Scale the median value of the run is set to 10000 """ meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'), iterfield=['in_file','op_string'], name='meanscale') featpreproc.connect(selectnode, 'out', meanscale, 'in_file') """ Define a function to get the scaling factor for intensity normalization """ featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string') """ Perform temporal highpass filtering on the data """ if highpass: highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'), iterfield=['in_file'], name='highpass') featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string') featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files') """ Generate a mean functional image from the first run """ meanfunc3 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc3') if highpass: featpreproc.connect(highpass, 'out_file', meanfunc3, 'in_file') else: featpreproc.connect(meanscale, 'out_file', meanfunc3, 'in_file') featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean') return featpreproc def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle'): """Create a FEAT preprocessing workflow with registration to one volume of the first run Parameters ---------- :: name : name of workflow (default: featpreproc) highpass : boolean (default: True) whichvol : which volume of the first run to register to ('first', 'middle', 'mean') Inputs:: inputspec.func : functional runs (filename or list of filenames) inputspec.fwhm : fwhm for smoothing with SUSAN inputspec.highpass : HWHM in TRs (if created with highpass=True) Outputs:: outputspec.reference : volume to which runs are realigned outputspec.motion_parameters : motion correction parameters outputspec.realigned_files : motion corrected files outputspec.motion_plots : plots of motion correction parameters outputspec.mask : mask file used to mask the brain outputspec.smoothed_files : smoothed functional data outputspec.highpassed_files : highpassed functional data (if highpass=True) outputspec.mean : mean file Example ------- >>> preproc = create_featreg_preproc() >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] >>> preproc.inputs.inputspec.fwhm = 5 >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) >>> preproc.base_dir = '/tmp' >>> preproc.run() # doctest: +SKIP >>> preproc = create_featreg_preproc(highpass=False, whichvol='mean') >>> preproc.inputs.inputspec.func = 'f3.nii' >>> preproc.inputs.inputspec.fwhm = 5 >>> preproc.base_dir = '/tmp' >>> preproc.run() # doctest: +SKIP """ featpreproc = pe.Workflow(name=name) """ Set up a node to define all inputs required for the preprocessing workflow """ if highpass: inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'fwhm', 'highpass']), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference', 'motion_parameters', 'realigned_files', 'motion_plots', 'mask', 'smoothed_files', 'highpassed_files', 'mean']), name='outputspec') else: inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'fwhm']), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference', 'motion_parameters', 'realigned_files', 'motion_plots', 'mask', 'smoothed_files', 'mean']), name='outputspec') """ Set up a node to define outputs for the preprocessing workflow """ """ Convert functional images to float representation. Since there can be more than one functional run we use a MapNode to convert each run. """ img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float', op_string = '', suffix='_dtype'), iterfield=['in_file'], name='img2float') featpreproc.connect(inputnode, 'func', img2float, 'in_file') """ Extract the first volume of the first run as the reference """ if whichvol != 'mean': extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), iterfield=['in_file'], name = 'extractref') featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file') featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), extract_ref, 't_min') featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') """ Realign the functional runs to the reference (1st volume of first run) """ motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True, save_plots = True, interpolation = 'spline'), name='realign', iterfield = ['in_file']) featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') if whichvol != 'mean': featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') else: motion_correct.inputs.mean_vol = True featpreproc.connect(motion_correct, ('mean_img', pickfirst), outputnode, 'reference') featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters') featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files') """ Plot the estimated motion parameters """ plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'), name='plot_motion', iterfield=['in_file']) plot_motion.iterables = ('plot_type', ['rotations', 'translations']) featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') """ Extract the mean volume of the first functional run """ meanfunc = pe.Node(interface=fsl.ImageMaths(op_string = '-Tmean', suffix='_mean'), name='meanfunc') featpreproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file') """ Strip the skull from the mean functional to generate a mask """ meanfuncmask = pe.Node(interface=fsl.BET(mask = True, no_output=True, frac = 0.3), name = 'meanfuncmask') featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') """ Mask the functional runs with the extracted mask """ maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), iterfield=['in_file'], name = 'maskfunc') featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') """ Determine the 2nd and 98th percentile intensities of each functional run """ getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'), iterfield = ['in_file'], name='getthreshold') featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file') """ Threshold the first run of the functional data at 10% of the 98th percentile """ threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), iterfield=['in_file', 'op_string'], name='threshold') featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file') """ Define a function to get 10% of the intensity """ featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string') """ Determine the median value of the functional runs using the mask """ medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'), iterfield = ['in_file', 'mask_file'], name='medianval') featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') featpreproc.connect(threshold, 'out_file', medianval, 'mask_file') """ Dilate the mask """ dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), iterfield=['in_file'], name='dilatemask') featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file') featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask') """ Mask the motion corrected functional runs with the dilated mask """ maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file', 'in_file2'], name='maskfunc2') featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') """ Smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask consituting the mean functional """ smooth = create_susan_smooth() featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files') featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file') """ Mask the smoothed data with the dilated mask """ maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file', 'in_file2'], name='maskfunc3') featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file') featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') concatnode = pe.Node(interface=util.Merge(2), name='concat') featpreproc.connect(maskfunc2,('out_file', tolist), concatnode, 'in1') featpreproc.connect(maskfunc3,('out_file', tolist), concatnode, 'in2') """ The following nodes select smooth or unsmoothed data depending on the fwhm. This is because SUSAN defaults to smoothing the data with about the voxel size of the input data if the fwhm parameter is less than 1/3 of the voxel size. """ selectnode = pe.Node(interface=util.Select(),name='select') featpreproc.connect(concatnode, 'out', selectnode, 'inlist') featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') """ Scale the median value of the run is set to 10000 """ meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'), iterfield=['in_file','op_string'], name='meanscale') featpreproc.connect(selectnode, 'out', meanscale, 'in_file') """ Define a function to get the scaling factor for intensity normalization """ featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string') """ Perform temporal highpass filtering on the data """ if highpass: highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'), iterfield=['in_file'], name='highpass') featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string') featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files') """ Generate a mean functional image from the first run """ meanfunc3 = pe.Node(interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc3') if highpass: featpreproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file') else: featpreproc.connect(meanscale, ('out_file', pickfirst), meanfunc3, 'in_file') featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean') return featpreproc def create_susan_smooth(name="susan_smooth", separate_masks=True): """Create a SUSAN smoothing workflow Parameters ---------- :: name : name of workflow (default: susan_smooth) separate_masks : separate masks for each run Inputs:: inputnode.in_files : functional runs (filename or list of filenames) inputnode.fwhm : fwhm for smoothing with SUSAN inputnode.mask_file : mask used for estimating SUSAN thresholds (but not for smoothing) Outputs:: outputnode.smoothed_files : functional runs (filename or list of filenames) Example ------- >>> smooth = create_susan_smooth() >>> smooth.inputs.inputnode.in_files = 'f3.nii' >>> smooth.inputs.inputnode.fwhm = 5 >>> smooth.inputs.inputnode.mask_file = 'mask.nii' >>> smooth.run() # doctest: +SKIP """ susan_smooth = pe.Workflow(name=name) """ Set up a node to define all inputs required for the preprocessing workflow """ inputnode = pe.Node(interface=util.IdentityInterface(fields=['in_files', 'fwhm', 'mask_file']), name='inputnode') """ Smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask consituting the mean functional """ smooth = pe.MapNode(interface=fsl.SUSAN(), iterfield=['in_file', 'brightness_threshold','usans'], name='smooth') """ Determine the median value of the functional runs using the mask """ if separate_masks: median = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'), iterfield = ['in_file', 'mask_file'], name='median') else: median = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'), iterfield = ['in_file'], name='median') susan_smooth.connect(inputnode, 'in_files', median, 'in_file') susan_smooth.connect(inputnode, 'mask_file', median, 'mask_file') """ Mask the motion corrected functional runs with the dilated mask """ if separate_masks: mask = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file', 'in_file2'], name='mask') else: mask = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file'], name='mask') susan_smooth.connect(inputnode, 'in_files', mask, 'in_file') susan_smooth.connect(inputnode, 'mask_file', mask, 'in_file2') """ Determine the mean image from each functional run """ meanfunc = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), iterfield=['in_file'], name='meanfunc2') susan_smooth.connect(mask, 'out_file', meanfunc, 'in_file') """ Merge the median values with the mean functional images into a coupled list """ merge = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') susan_smooth.connect(meanfunc,'out_file', merge, 'in1') susan_smooth.connect(median,'out_stat', merge, 'in2') """ Define a function to get the brightness threshold for SUSAN """ susan_smooth.connect(inputnode, 'fwhm', smooth, 'fwhm') susan_smooth.connect(inputnode, 'in_files', smooth, 'in_file') susan_smooth.connect(median, ('out_stat', getbtthresh), smooth, 'brightness_threshold') susan_smooth.connect(merge, ('out', getusans), smooth, 'usans') outputnode = pe.Node(interface=util.IdentityInterface(fields=['smoothed_files']), name='outputnode') susan_smooth.connect(smooth, 'smoothed_file', outputnode, 'smoothed_files') return susan_smooth def create_fsl_fs_preproc(name='preproc', highpass=True, whichvol='middle'): """Create a FEAT preprocessing workflow together with freesurfer Parameters ---------- :: name : name of workflow (default: preproc) highpass : boolean (default: True) whichvol : which volume of the first run to register to ('first', 'middle', 'mean') Inputs:: inputspec.func : functional runs (filename or list of filenames) inputspec.fwhm : fwhm for smoothing with SUSAN inputspec.highpass : HWHM in TRs (if created with highpass=True) inputspec.subject_id : freesurfer subject id inputspec.subjects_dir : freesurfer subjects dir Outputs:: outputspec.reference : volume to which runs are realigned outputspec.motion_parameters : motion correction parameters outputspec.realigned_files : motion corrected files outputspec.motion_plots : plots of motion correction parameters outputspec.mask_file : mask file used to mask the brain outputspec.smoothed_files : smoothed functional data outputspec.highpassed_files : highpassed functional data (if highpass=True) outputspec.reg_file : bbregister registration files outputspec.reg_cost : bbregister registration cost files Example ------- >>> preproc = create_fsl_fs_preproc(whichvol='first') >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] >>> preproc.inputs.inputspec.subjects_dir = '.' >>> preproc.inputs.inputspec.subject_id = 's1' >>> preproc.inputs.inputspec.fwhm = 6 >>> preproc.run() # doctest: +SKIP """ featpreproc = pe.Workflow(name=name) """ Set up a node to define all inputs required for the preprocessing workflow """ if highpass: inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'fwhm', 'subject_id', 'subjects_dir', 'highpass']), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference', 'motion_parameters', 'realigned_files', 'motion_plots', 'mask_file', 'smoothed_files', 'highpassed_files', 'reg_file', 'reg_cost' ]), name='outputspec') else: inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'fwhm', 'subject_id', 'subjects_dir' ]), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference', 'motion_parameters', 'realigned_files', 'motion_plots', 'mask_file', 'smoothed_files', 'reg_file', 'reg_cost' ]), name='outputspec') """ Set up a node to define outputs for the preprocessing workflow """ """ Convert functional images to float representation. Since there can be more than one functional run we use a MapNode to convert each run. """ img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float', op_string = '', suffix='_dtype'), iterfield=['in_file'], name='img2float') featpreproc.connect(inputnode, 'func', img2float, 'in_file') """ Extract the first volume of the first run as the reference """ if whichvol != 'mean': extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), iterfield=['in_file'], name = 'extractref') featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file') featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), extract_ref, 't_min') featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') """ Realign the functional runs to the reference (1st volume of first run) """ motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True, save_plots = True, interpolation = 'sinc'), name='realign', iterfield = ['in_file']) featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') if whichvol != 'mean': featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') else: motion_correct.inputs.mean_vol = True featpreproc.connect(motion_correct, 'mean_img', outputnode, 'reference') featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters') featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files') """ Plot the estimated motion parameters """ plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'), name='plot_motion', iterfield=['in_file']) plot_motion.iterables = ('plot_type', ['rotations', 'translations']) featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') """Get the mask from subject for each run """ maskflow = create_getmask_flow() featpreproc.connect([(inputnode, maskflow, [('subject_id','inputspec.subject_id'), ('subjects_dir', 'inputspec.subjects_dir')])]) maskflow.inputs.inputspec.contrast_type = 't2' if whichvol != 'mean': featpreproc.connect(extract_ref, 'roi_file', maskflow, 'inputspec.source_file') else: featpreproc.connect(motion_correct, ('mean_img', pickfirst), maskflow, 'inputspec.source_file') """ Mask the functional runs with the extracted mask """ maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), iterfield=['in_file'], name = 'maskfunc') featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), maskfunc, 'in_file2') """ Smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask consituting the mean functional """ smooth = create_susan_smooth(separate_masks=False) featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') featpreproc.connect(maskfunc, 'out_file', smooth, 'inputnode.in_files') featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), smooth, 'inputnode.mask_file') """ Mask the smoothed data with the dilated mask """ maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), iterfield=['in_file'], name='maskfunc3') featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file') featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), maskfunc3, 'in_file2') concatnode = pe.Node(interface=util.Merge(2), name='concat') featpreproc.connect(maskfunc, ('out_file', tolist), concatnode, 'in1') featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2') """ The following nodes select smooth or unsmoothed data depending on the fwhm. This is because SUSAN defaults to smoothing the data with about the voxel size of the input data if the fwhm parameter is less than 1/3 of the voxel size. """ selectnode = pe.Node(interface=util.Select(),name='select') featpreproc.connect(concatnode, 'out', selectnode, 'inlist') featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') """ Scale the median value of the run is set to 10000 """ meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'), iterfield=['in_file','op_string'], name='meanscale') featpreproc.connect(selectnode, 'out', meanscale, 'in_file') """ Determine the median value of the functional runs using the mask """ medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'), iterfield = ['in_file'], name='medianval') featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), medianval, 'mask_file') """ Define a function to get the scaling factor for intensity normalization """ featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string') """ Perform temporal highpass filtering on the data """ if highpass: highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'), iterfield=['in_file'], name='highpass') featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string') featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files') featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), outputnode, 'mask_file') featpreproc.connect(maskflow, 'outputspec.reg_file', outputnode, 'reg_file') featpreproc.connect(maskflow, 'outputspec.reg_cost', outputnode, 'reg_cost') return featpreproc def create_reg_workflow(name='registration'): """Create a FEAT preprocessing workflow together with freesurfer Parameters ---------- :: name : name of workflow (default: 'registration') Inputs:: inputspec.source_files : files (filename or list of filenames to register) inputspec.mean_image : reference image to use inputspec.anatomical_image : anatomical image to coregister to inputspec.target_image : registration target Outputs:: outputspec.func2anat_transform : FLIRT transform outputspec.anat2target_transform : FLIRT+FNIRT transform outputspec.transformed_files : transformed files in target space outputspec.transformed_mean : mean image in target space Example ------- """ register = pe.Workflow(name=name) inputnode = pe.Node(interface=util.IdentityInterface(fields=['source_files', 'mean_image', 'anatomical_image', 'target_image']), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=['func2anat_transform', 'anat2target_transform', 'transformed_files', 'transformed_mean', ]), name='outputspec') """ Estimate the tissue classes from the anatomical image. But use spm's segment as FSL appears to be breaking. """ stripper = pe.Node(fsl.BET(), name='stripper') register.connect(inputnode, 'anatomical_image', stripper, 'in_file') fast = pe.Node(fsl.FAST(), name='fast') register.connect(stripper, 'out_file', fast, 'in_files') """ Binarize the segmentation """ binarize = pe.Node(fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'), name='binarize') pickindex = lambda x, i: x[i] register.connect(fast, ('partial_volume_files', pickindex, 2), binarize, 'in_file') """ Calculate rigid transform from mean image to anatomical image """ mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat') mean2anat.inputs.dof = 6 register.connect(inputnode, 'mean_image', mean2anat, 'in_file') register.connect(inputnode, 'anatomical_image', mean2anat, 'reference') """ Now use bbr cost function to improve the transform """ mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr') mean2anatbbr.inputs.dof = 6 mean2anatbbr.inputs.cost = 'bbr' mean2anatbbr.inputs.schedule = os.path.join(os.getenv('FSLDIR'), 'etc/flirtsch/bbr.sch') register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file') register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg') register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference') register.connect(mean2anat, 'out_matrix_file', mean2anatbbr, 'in_matrix_file') """ Calculate affine transform from anatomical to target """ anat2target_affine = pe.Node(fsl.FLIRT(), name='anat2target_linear') register.connect(inputnode, 'anatomical_image', anat2target_affine, 'in_file') register.connect(inputnode, 'target_image', anat2target_affine, 'reference') """ Calculate nonlinear transform from anatomical to target """ anat2target_nonlinear = pe.Node(fsl.FNIRT(), name='anat2target_nonlinear') register.connect(anat2target_affine, 'out_matrix_file', anat2target_nonlinear, 'affine_file') anat2target_nonlinear.inputs.warp_resolution = (8, 8, 8) register.connect(inputnode, 'anatomical_image', anat2target_nonlinear, 'in_file') register.connect(inputnode, 'target_image', anat2target_nonlinear, 'ref_file') """ Transform the mean image. First to anatomical and then to target """ warp2anat = pe.Node(fsl.ApplyWarp(interp='spline'), name='warp2anat') register.connect(inputnode, 'mean_image', warp2anat, 'in_file') register.connect(inputnode, 'anatomical_image', warp2anat, 'ref_file') register.connect(mean2anatbbr, 'out_matrix_file', warp2anat, 'premat') warpmean = warp2anat.clone(name='warpmean') register.connect(warp2anat, 'out_file', warpmean, 'in_file') register.connect(inputnode, 'target_image', warpmean, 'ref_file') register.connect(anat2target_affine, 'out_matrix_file', warpmean, 'premat') register.connect(anat2target_nonlinear, 'field_file', warpmean, 'field_file') """ Transform the remaining images. First to anatomical and then to target """ warpall2anat = pe.MapNode(fsl.ApplyWarp(interp='spline'), iterfield=['in_file'], name='warpall2anat') register.connect(inputnode, 'source_files', warpall2anat, 'in_file') register.connect(inputnode, 'anatomical_image', warpall2anat, 'ref_file') register.connect(mean2anatbbr, 'out_matrix_file', warpall2anat, 'premat') warpall = warpall2anat.clone(name='warpall') register.connect(warpall2anat, 'out_file', warpall, 'in_file') register.connect(inputnode, 'target_image', warpall, 'ref_file') register.connect(anat2target_affine, 'out_matrix_file', warpall, 'premat') register.connect(anat2target_nonlinear, 'field_file', warpall, 'field_file') """ Assign all the output files """ register.connect(warpmean, 'out_file', outputnode, 'transformed_mean') register.connect(warpall, 'out_file', outputnode, 'transformed_files') register.connect(mean2anatbbr, 'out_matrix_file', outputnode, 'func2anat_transform') register.connect(anat2target_nonlinear, 'field_file', outputnode, 'anat2target_transform') return register nipype-0.9.2/nipype/workflows/fmri/fsl/setup.py000066400000000000000000000006461227300005300216200ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fsl', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/fmri/setup.py000066400000000000000000000007521227300005300210320ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fmri', parent_package, top_path) config.add_subpackage('fsl') config.add_subpackage('spm') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/fmri/spm/000077500000000000000000000000001227300005300201135ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/fmri/spm/__init__.py000066400000000000000000000001611227300005300222220ustar00rootroot00000000000000from .preprocess import (create_spm_preproc, create_vbm_preproc, create_DARTEL_template)nipype-0.9.2/nipype/workflows/fmri/spm/estimate.py000066400000000000000000000001621227300005300222770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: nipype-0.9.2/nipype/workflows/fmri/spm/preprocess.py000066400000000000000000000306411227300005300226560ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import nipype.algorithms.rapidart as ra import nipype.interfaces.spm as spm import nipype.interfaces.utility as niu import nipype.pipeline.engine as pe logger = pe.logger from ....interfaces.matlab import no_matlab from ...smri.freesurfer.utils import create_getmask_flow def create_spm_preproc(name='preproc'): """Create an spm preprocessing workflow with freesurfer registration and artifact detection. The workflow realigns and smooths and registers the functional images with the subject's freesurfer space. Example ------- >>> preproc = create_spm_preproc() >>> preproc.base_dir = '.' >>> preproc.inputs.inputspec.fwhm = 6 >>> preproc.inputs.inputspec.subject_id = 's1' >>> preproc.inputs.inputspec.subjects_dir = '.' >>> preproc.inputs.inputspec.functionals = ['f3.nii', 'f5.nii'] >>> preproc.inputs.inputspec.norm_threshold = 1 >>> preproc.inputs.inputspec.zintensity_threshold = 3 Inputs:: inputspec.functionals : functional runs use 4d nifti inputspec.subject_id : freesurfer subject id inputspec.subjects_dir : freesurfer subjects dir inputspec.fwhm : smoothing fwhm inputspec.norm_threshold : norm threshold for outliers inputspec.zintensity_threshold : intensity threshold in z-score Outputs:: outputspec.realignment_parameters : realignment parameter files outputspec.smoothed_files : smoothed functional files outputspec.outlier_files : list of outliers outputspec.outlier_stats : statistics of outliers outputspec.outlier_plots : images of outliers outputspec.mask_file : binary mask file in reference image space outputspec.reg_file : registration file that maps reference image to freesurfer space outputspec.reg_cost : cost of registration (useful for detecting misalignment) """ """ Initialize the workflow """ workflow = pe.Workflow(name=name) """ Define the inputs to this workflow """ inputnode = pe.Node(niu.IdentityInterface(fields=['functionals', 'subject_id', 'subjects_dir', 'fwhm', 'norm_threshold', 'zintensity_threshold']), name='inputspec') """ Setup the processing nodes and create the mask generation and coregistration workflow """ poplist = lambda x: x.pop() realign = pe.Node(spm.Realign(), name='realign') workflow.connect(inputnode, 'functionals', realign, 'in_files') maskflow = create_getmask_flow() workflow.connect([(inputnode, maskflow, [('subject_id','inputspec.subject_id'), ('subjects_dir', 'inputspec.subjects_dir')])]) maskflow.inputs.inputspec.contrast_type = 't2' workflow.connect(realign, 'mean_image', maskflow, 'inputspec.source_file') smooth = pe.Node(spm.Smooth(), name='smooth') workflow.connect(inputnode, 'fwhm', smooth, 'fwhm') workflow.connect(realign, 'realigned_files', smooth, 'in_files') artdetect = pe.Node(ra.ArtifactDetect(mask_type='file', parameter_source='SPM', use_differences=[True,False], use_norm=True, save_plot=True), name='artdetect') workflow.connect([(inputnode, artdetect,[('norm_threshold', 'norm_threshold'), ('zintensity_threshold', 'zintensity_threshold')])]) workflow.connect([(realign, artdetect, [('realigned_files', 'realigned_files'), ('realignment_parameters', 'realignment_parameters')])]) workflow.connect(maskflow, ('outputspec.mask_file', poplist), artdetect, 'mask_file') """ Define the outputs of the workflow and connect the nodes to the outputnode """ outputnode = pe.Node(niu.IdentityInterface(fields=["realignment_parameters", "smoothed_files", "mask_file", "reg_file", "reg_cost", 'outlier_files', 'outlier_stats', 'outlier_plots' ]), name="outputspec") workflow.connect([ (maskflow, outputnode, [("outputspec.reg_file", "reg_file")]), (maskflow, outputnode, [("outputspec.reg_cost", "reg_cost")]), (maskflow, outputnode, [(("outputspec.mask_file", poplist), "mask_file")]), (realign, outputnode, [('realignment_parameters', 'realignment_parameters')]), (smooth, outputnode, [('smoothed_files', 'smoothed_files')]), (artdetect, outputnode,[('outlier_files', 'outlier_files'), ('statistic_files','outlier_stats'), ('plot_files','outlier_plots')]) ]) return workflow def create_vbm_preproc(name='vbmpreproc'): """Create a vbm workflow that generates DARTEL-based warps to MNI space Based on: http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf Example ------- >>> preproc = create_vbm_preproc() >>> preproc.inputs.inputspec.fwhm = 8 >>> preproc.inputs.inputspec.structural_files = [os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] >>> preproc.inputs.inputspec.template_prefix = 'Template' >>> preproc.run() # doctest: +SKIP Inputs:: inputspec.structural_files : structural data to be used to create templates inputspec.fwhm: single of triplet for smoothing when normalizing to MNI space inputspec.template_prefix : prefix for dartel template Outputs:: outputspec.normalized_files : normalized gray matter files outputspec.template_file : DARTEL template outputspec.icv : intracranial volume (cc - assuming dimensions in mm) """ workflow = pe.Workflow(name=name) """ Define the inputs to this workflow """ inputnode = pe.Node(niu.IdentityInterface(fields=['structural_files', 'fwhm', 'template_prefix']), name='inputspec') dartel_template = create_DARTEL_template() workflow.connect(inputnode, 'template_prefix', dartel_template, 'inputspec.template_prefix') workflow.connect(inputnode, 'structural_files', dartel_template, 'inputspec.structural_files') norm2mni = pe.Node(spm.DARTELNorm2MNI(modulate=True), name='norm2mni') workflow.connect(dartel_template, 'outputspec.template_file', norm2mni, 'template_file') workflow.connect(dartel_template, 'outputspec.flow_fields', norm2mni, 'flowfield_files') def getclass1images(class_images): class1images = [] for session in class_images: class1images.extend(session[0]) return class1images workflow.connect(dartel_template, ('segment.native_class_images', getclass1images), norm2mni, 'apply_to_files') workflow.connect(inputnode, 'fwhm', norm2mni, 'fwhm') def compute_icv(class_images): from nibabel import load from numpy import prod icv = [] for session in class_images: voxel_volume = prod(load(session[0][0]).get_header().get_zooms()) img = load(session[0][0]).get_data() + \ load(session[1][0]).get_data() + \ load(session[2][0]).get_data() img_icv = (img>0.5).astype(int).sum()*voxel_volume*1e-3 icv.append(img_icv) return icv calc_icv = pe.Node(niu.Function(function=compute_icv, input_names=['class_images'], output_names=['icv']), name='calc_icv') workflow.connect(dartel_template, 'segment.native_class_images', calc_icv, 'class_images') """ Define the outputs of the workflow and connect the nodes to the outputnode """ outputnode = pe.Node(niu.IdentityInterface(fields=["normalized_files", "template_file", "icv" ]), name="outputspec") workflow.connect([(dartel_template, outputnode, [('outputspec.template_file','template_file')]), (norm2mni, outputnode, [("normalized_files", "normalized_files")]), (calc_icv, outputnode, [("icv", "icv")]), ]) return workflow def create_DARTEL_template(name='dartel_template'): """Create a vbm workflow that generates DARTEL-based template Example ------- >>> preproc = create_DARTEL_template() >>> preproc.inputs.inputspec.structural_files = [os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] >>> preproc.inputs.inputspec.template_prefix = 'Template' >>> preproc.run() # doctest: +SKIP Inputs:: inputspec.structural_files : structural data to be used to create templates inputspec.template_prefix : prefix for dartel template Outputs:: outputspec.template_file : DARTEL template outputspec.flow_fields : warps from input struct files to the template """ workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['structural_files', 'template_prefix']), name='inputspec') segment = pe.MapNode(spm.NewSegment(), iterfield=['channel_files'], name='segment') workflow.connect(inputnode, 'structural_files', segment, 'channel_files') if not no_matlab: version = spm.Info.version() if version and version['name'] == 'SPM8': spm_path = version['path'] tissue1 = ((os.path.join(spm_path,'toolbox/Seg/TPM.nii'), 1), 2, (True,True), (False, False)) tissue2 = ((os.path.join(spm_path,'toolbox/Seg/TPM.nii'), 2), 2, (True,True), (False, False)) tissue3 = ((os.path.join(spm_path,'toolbox/Seg/TPM.nii'), 3), 2, (True,False), (False, False)) tissue4 = ((os.path.join(spm_path,'toolbox/Seg/TPM.nii'), 4), 3, (False,False), (False, False)) tissue5 = ((os.path.join(spm_path,'toolbox/Seg/TPM.nii'), 5), 4, (False,False), (False, False)) tissue6 = ((os.path.join(spm_path,'toolbox/Seg/TPM.nii'), 6), 2, (False,False), (False, False)) segment.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6] else: logger.critical('SPM8 not found: DARTEL not available') else: logger.critical('MATLAB not found: DARTEL not setting tissue templates') dartel = pe.Node(spm.DARTEL(), name='dartel') """Get the gray and white segmentation classes generated by NewSegment """ def get2classes(dartel_files): class1images = [] class2images = [] for session in dartel_files: class1images.extend(session[0]) class2images.extend(session[1]) return [class1images, class2images] workflow.connect(segment, ('dartel_input_images', get2classes), dartel, 'image_files') workflow.connect(inputnode, 'template_prefix', dartel, 'template_prefix') outputnode = pe.Node(niu.IdentityInterface(fields=["template_file", "flow_fields" ]), name="outputspec") workflow.connect([ (dartel, outputnode, [('final_template_file','template_file'), ('dartel_flow_fields', 'flow_fields')]), ]) return workflow nipype-0.9.2/nipype/workflows/fmri/spm/setup.py000066400000000000000000000007101227300005300216230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('spm', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/fmri/spm/tests/000077500000000000000000000000001227300005300212555ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/fmri/spm/tests/__init__.py000066400000000000000000000000251227300005300233630ustar00rootroot00000000000000__author__ = 'satra' nipype-0.9.2/nipype/workflows/graph/000077500000000000000000000000001227300005300174605ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/graph/__init__.py000066400000000000000000000000011227300005300215600ustar00rootroot00000000000000 nipype-0.9.2/nipype/workflows/graph/setup.py000066400000000000000000000006501227300005300211730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('graph', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/misc/000077500000000000000000000000001227300005300173125ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/misc/__init__.py000066400000000000000000000000011227300005300214120ustar00rootroot00000000000000 nipype-0.9.2/nipype/workflows/misc/setup.py000066400000000000000000000006471227300005300210330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('misc', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/misc/utils.py000066400000000000000000000043141227300005300210260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def get_vox_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() voxdims = hdr.get_zooms() return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] def get_data_dims(volume): import nibabel as nb if isinstance(volume, list): volume = volume[0] nii = nb.load(volume) hdr = nii.get_header() datadims = hdr.get_data_shape() return [int(datadims[0]), int(datadims[1]), int(datadims[2])] def get_affine(volume): import nibabel as nb nii = nb.load(volume) return nii.get_affine() def select_aparc(list_of_files): for in_file in list_of_files: if 'aparc+aseg.mgz' in in_file: idx = list_of_files.index(in_file) return list_of_files[idx] def select_aparc_annot(list_of_files): for in_file in list_of_files: if '.aparc.annot' in in_file: idx = list_of_files.index(in_file) return list_of_files[idx] def region_list_from_volume(in_file): import nibabel as nb import numpy as np segmentation = nb.load(in_file) segmentationdata = segmentation.get_data() rois = np.unique(segmentationdata) region_list = list(rois) region_list.sort() region_list.remove(0) region_list = map(int, region_list) return region_list def id_list_from_lookup_table(lookup_file, region_list): import numpy as np LUTlabelsRGBA = np.loadtxt(lookup_file, skiprows=4, usecols=[0,1,2,3,4,5], comments='#', dtype={'names': ('index', 'label', 'R', 'G', 'B', 'A'),'formats': ('int', '|S30', 'int', 'int', 'int', 'int')}) numLUTLabels = np.size(LUTlabelsRGBA) LUTlabelDict = {} for labels in range(0,numLUTLabels): LUTlabelDict[LUTlabelsRGBA[labels][0]] = [LUTlabelsRGBA[labels][1], LUTlabelsRGBA[labels][2], LUTlabelsRGBA[labels][3], LUTlabelsRGBA[labels][4], LUTlabelsRGBA[labels][5]] id_list = [] for region in region_list: label = LUTlabelDict[region][0] id_list.append(label) id_list = map(str, id_list) return id_list nipype-0.9.2/nipype/workflows/rsfmri/000077500000000000000000000000001227300005300176615ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/rsfmri/__init__.py000066400000000000000000000000221227300005300217640ustar00rootroot00000000000000from . import fsl nipype-0.9.2/nipype/workflows/rsfmri/fsl/000077500000000000000000000000001227300005300204455ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/rsfmri/fsl/__init__.py000066400000000000000000000000541227300005300225550ustar00rootroot00000000000000from .resting import create_resting_preproc nipype-0.9.2/nipype/workflows/rsfmri/fsl/resting.py000066400000000000000000000166311227300005300225010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nipype.interfaces.fsl as fsl # fsl from nipype.algorithms.misc import TSNR import nipype.interfaces.utility as util # utility import nipype.pipeline.engine as pe # pypeline engine def extract_noise_components(realigned_file, noise_mask_file, num_components): """Derive components most reflective of physiological noise """ import os from nibabel import load import numpy as np import scipy as sp from scipy.signal import detrend imgseries = load(realigned_file) noise_mask = load(noise_mask_file) voxel_timecourses = imgseries.get_data()[np.nonzero(noise_mask.get_data())] for timecourse in voxel_timecourses: timecourse[:] = detrend(timecourse, type='constant') u,s,v = sp.linalg.svd(voxel_timecourses, full_matrices=False) components_file = os.path.join(os.getcwd(), 'noise_components.txt') np.savetxt(components_file, v[:num_components, :].T) return components_file def select_volume(filename, which): """Return the middle index of a file """ from nibabel import load import numpy as np if which.lower() == 'first': idx = 0 elif which.lower() == 'middle': idx = int(np.ceil(load(filename).get_shape()[3]/2)) else: raise Exception('unknown value for volume selection : %s'%which) return idx def create_realign_flow(name='realign'): """Realign a time series to the middle volume using spline interpolation Uses MCFLIRT to realign the time series and ApplyWarp to apply the rigid body transformations using spline interpolation (unknown order). Example ------- >>> wf = create_realign_flow() >>> wf.inputs.inputspec.func = 'f3.nii' >>> wf.run() # doctest: +SKIP """ realignflow = pe.Workflow(name=name) inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', ]), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=[ 'realigned_file', ]), name='outputspec') realigner = pe.Node(fsl.MCFLIRT(save_mats=True, stats_imgs=True), name='realigner') splitter = pe.Node(fsl.Split(dimension='t'), name='splitter') warper = pe.MapNode(fsl.ApplyWarp(interp='spline'), iterfield=['in_file', 'premat'], name='warper') joiner = pe.Node(fsl.Merge(dimension='t'), name='joiner') realignflow.connect(inputnode, 'func', realigner, 'in_file') realignflow.connect(inputnode, ('func', select_volume, 'middle'), realigner, 'ref_vol') realignflow.connect(realigner, 'out_file', splitter, 'in_file') realignflow.connect(realigner, 'mat_file', warper, 'premat') realignflow.connect(realigner, 'variance_img', warper, 'ref_file') realignflow.connect(splitter, 'out_files', warper, 'in_file') realignflow.connect(warper, 'out_file', joiner, 'in_files') realignflow.connect(joiner, 'merged_file', outputnode, 'realigned_file') return realignflow def create_resting_preproc(name='restpreproc'): """Create a "resting" time series preprocessing workflow The noise removal is based on Behzadi et al. (2007) Parameters ---------- name : name of workflow (default: restpreproc) Inputs:: inputspec.func : functional run (filename or list of filenames) Outputs:: outputspec.noise_mask_file : voxels used for PCA to derive noise components outputspec.filtered_file : bandpass filtered and noise-reduced time series Example ------- >>> TR = 3.0 >>> wf = create_resting_preproc() >>> wf.inputs.inputspec.func = 'f3.nii' >>> wf.inputs.inputspec.num_noise_components = 6 >>> wf.inputs.inputspec.highpass_sigma = 100/(2*TR) >>> wf.inputs.inputspec.lowpass_sigma = 12.5/(2*TR) >>> wf.run() # doctest: +SKIP """ restpreproc = pe.Workflow(name=name) # Define nodes inputnode = pe.Node(interface=util.IdentityInterface(fields=['func', 'num_noise_components', 'highpass_sigma', 'lowpass_sigma' ]), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=[ 'noise_mask_file', 'filtered_file', ]), name='outputspec') slicetimer = pe.Node(fsl.SliceTimer(), name='slicetimer') realigner = create_realign_flow() tsnr = pe.Node(TSNR(regress_poly=2), name='tsnr') getthresh = pe.Node(interface=fsl.ImageStats(op_string='-p 98'), name='getthreshold') threshold_stddev = pe.Node(fsl.Threshold(), name='threshold') compcor = pe.Node(util.Function(input_names=['realigned_file', 'noise_mask_file', 'num_components'], output_names=['noise_components'], function=extract_noise_components), name='compcorr') remove_noise = pe.Node(fsl.FilterRegressor(filter_all=True), name='remove_noise') bandpass_filter = pe.Node(fsl.TemporalFilter(), name='bandpass_filter') # Define connections restpreproc.connect(inputnode, 'func', slicetimer, 'in_file') restpreproc.connect(slicetimer, 'slice_time_corrected_file', realigner, 'inputspec.func') restpreproc.connect(realigner, 'outputspec.realigned_file', tsnr, 'in_file') restpreproc.connect(tsnr, 'stddev_file', threshold_stddev, 'in_file') restpreproc.connect(tsnr, 'stddev_file', getthresh, 'in_file') restpreproc.connect(getthresh, 'out_stat', threshold_stddev, 'thresh') restpreproc.connect(realigner, 'outputspec.realigned_file', compcor, 'realigned_file') restpreproc.connect(threshold_stddev, 'out_file', compcor, 'noise_mask_file') restpreproc.connect(inputnode, 'num_noise_components', compcor, 'num_components') restpreproc.connect(tsnr, 'detrended_file', remove_noise, 'in_file') restpreproc.connect(compcor, 'noise_components', remove_noise, 'design_file') restpreproc.connect(inputnode, 'highpass_sigma', bandpass_filter, 'highpass_sigma') restpreproc.connect(inputnode, 'lowpass_sigma', bandpass_filter, 'lowpass_sigma') restpreproc.connect(remove_noise, 'out_file', bandpass_filter, 'in_file') restpreproc.connect(threshold_stddev, 'out_file', outputnode, 'noise_mask_file') restpreproc.connect(bandpass_filter, 'out_file', outputnode, 'filtered_file') return restpreprocnipype-0.9.2/nipype/workflows/rsfmri/fsl/setup.py000066400000000000000000000006461227300005300221650ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fsl', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/rsfmri/setup.py000066400000000000000000000007121227300005300213730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('rsfmri', parent_package, top_path) config.add_subpackage('fsl') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/setup.py000066400000000000000000000012361227300005300200730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('workflows', parent_package, top_path) config.add_subpackage('dmri') config.add_subpackage('fmri') config.add_subpackage('graph') config.add_subpackage('misc') config.add_subpackage('rsfmri') config.add_subpackage('smri') config.add_subpackage('warp') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/smri/000077500000000000000000000000001227300005300173315ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/smri/__init__.py000066400000000000000000000000531227300005300214400ustar00rootroot00000000000000from . import freesurfer from . import antsnipype-0.9.2/nipype/workflows/smri/ants/000077500000000000000000000000001227300005300202765ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/smri/ants/ANTSBuildTemplate.py000066400000000000000000000402231227300005300240720ustar00rootroot00000000000000################################################################################# ## Program: Build Template Parallel ## Language: Python ## ## Authors: Jessica Forbes, Grace Murray, and Hans Johnson, University of Iowa ## ## This software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. ## ################################################################################# import nipype.pipeline.engine as pe import nipype.interfaces.utility as util from nipype.interfaces.utility import Function from nipype.interfaces.ants import ( ANTS, WarpImageMultiTransform, AverageImages, MultiplyImages, AverageAffineTransform) def GetFirstListElement(this_list): return this_list[0] def MakeTransformListWithGradientWarps(averageAffineTranform, gradientStepWarp): return [averageAffineTranform, gradientStepWarp, gradientStepWarp, gradientStepWarp, gradientStepWarp] def RenestDeformedPassiveImages(deformedPassiveImages,flattened_image_nametypes): import os """ Now make a list of lists of images where the outter list is per image type, and the inner list is the same size as the number of subjects to be averaged. In this case, the first element will be a list of all the deformed T2's, and the second element will be a list of all deformed POSTERIOR_AIR, etc.. """ all_images_size=len(deformedPassiveImages) image_dictionary_of_lists=dict() nested_imagetype_list=list() outputAverageImageName_list=list() image_type_list=list() ## make empty_list, this is not efficient, but it works for name in flattened_image_nametypes: image_dictionary_of_lists[name]=list() for index in range(0,all_images_size): curr_name=flattened_image_nametypes[index] curr_file=deformedPassiveImages[index] image_dictionary_of_lists[curr_name].append(curr_file) for image_type,image_list in image_dictionary_of_lists.items(): nested_imagetype_list.append(image_list) outputAverageImageName_list.append('AVG_'+image_type+'.nii.gz') image_type_list.append('WARP_AVG_'+image_type) print "\n"*10 print "HACK: ", nested_imagetype_list print "HACK: ", outputAverageImageName_list print "HACK: ", image_type_list return nested_imagetype_list,outputAverageImageName_list,image_type_list ## Utility Function ## This will make a list of list pairs for defining the concatenation of transforms ## wp=['wp1.nii','wp2.nii','wp3.nii'] ## af=['af1.mat','af2.mat','af3.mat'] ## ll=map(list,zip(af,wp)) ## ll ##[['af1.mat', 'wp1.nii'], ['af2.mat', 'wp2.nii'], ['af3.mat', 'wp3.nii']] def MakeListsOfTransformLists(warpTransformList, AffineTransformList): return map(list, zip(warpTransformList,AffineTransformList)) ## Flatten and return equal length transform and images lists. def FlattenTransformAndImagesList(ListOfPassiveImagesDictionaries,transformation_series): import sys print("HACK: DEBUG: ListOfPassiveImagesDictionaries\n{lpi}\n".format(lpi=ListOfPassiveImagesDictionaries)) subjCount=len(ListOfPassiveImagesDictionaries) tranCount=len(transformation_series) if subjCount != tranCount: print "ERROR: subjCount must equal tranCount {0} != {1}".format(subjCount,tranCount) sys.exit(-1) flattened_images=list() flattened_image_nametypes=list() flattened_transforms=list() passiveImagesCount = len(ListOfPassiveImagesDictionaries[0]) for subjIndex in range(0,subjCount): #if passiveImagesCount != len(ListOfPassiveImagesDictionaries[subjIndex]): # print "ERROR: all image lengths must be equal {0} != {1}".format(passiveImagesCount,len(ListOfPassiveImagesDictionaries[subjIndex])) # sys.exit(-1) subjImgDictionary=ListOfPassiveImagesDictionaries[subjIndex] subjToAtlasTransform=transformation_series[subjIndex] for imgname,img in subjImgDictionary.items(): flattened_images.append(img) flattened_image_nametypes.append(imgname) flattened_transforms.append(subjToAtlasTransform) print("HACK: flattened images {0}\n".format(flattened_images)) print("HACK: flattened nametypes {0}\n".format(flattened_image_nametypes)) print("HACK: flattened txfms {0}\n".format(flattened_transforms)) return flattened_images,flattened_transforms,flattened_image_nametypes def ANTSTemplateBuildSingleIterationWF(iterationPhasePrefix=''): """ Inputs:: inputspec.images : inputspec.fixed_image : inputspec.ListOfPassiveImagesDictionaries : Outputs:: outputspec.template : outputspec.transforms_list : outputspec.passive_deformed_templates : """ TemplateBuildSingleIterationWF = pe.Workflow(name = 'ANTSTemplateBuildSingleIterationWF_'+str(str(iterationPhasePrefix)) ) inputSpec = pe.Node(interface=util.IdentityInterface(fields=['images', 'fixed_image', 'ListOfPassiveImagesDictionaries']), run_without_submitting=True, name='inputspec') ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that ## they do not change due to re-indenting. Otherwise re-indenting for flow control will trigger ## their hash to change. ## HACK: TODO: REMOVE 'transforms_list' it is not used. That will change all the hashes ## HACK: TODO: Need to run all python files through the code beutifiers. It has gotten pretty ugly. outputSpec = pe.Node(interface=util.IdentityInterface(fields=['template','transforms_list', 'passive_deformed_templates']), run_without_submitting=True, name='outputspec') ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template BeginANTS=pe.MapNode(interface=ANTS(), name = 'BeginANTS', iterfield=['moving_image']) BeginANTS.inputs.dimension = 3 BeginANTS.inputs.output_transform_prefix = str(iterationPhasePrefix)+'_tfm' BeginANTS.inputs.metric = ['CC'] BeginANTS.inputs.metric_weight = [1.0] BeginANTS.inputs.radius = [5] BeginANTS.inputs.transformation_model = 'SyN' BeginANTS.inputs.gradient_step_length = 0.25 BeginANTS.inputs.number_of_iterations = [50, 35, 15] BeginANTS.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] BeginANTS.inputs.use_histogram_matching = True BeginANTS.inputs.mi_option = [32, 16000] BeginANTS.inputs.regularization = 'Gauss' BeginANTS.inputs.regularization_gradient_field_sigma = 3 BeginANTS.inputs.regularization_deformation_field_sigma = 0 TemplateBuildSingleIterationWF.connect(inputSpec, 'images', BeginANTS, 'moving_image') TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS, 'fixed_image') MakeTransformsLists = pe.Node(interface=util.Function(function=MakeListsOfTransformLists, input_names=['warpTransformList', 'AffineTransformList'], output_names=['out']), run_without_submitting=True, name='MakeTransformsLists') MakeTransformsLists.inputs.ignore_exception = True TemplateBuildSingleIterationWF.connect(BeginANTS, 'warp_transform', MakeTransformsLists, 'warpTransformList') TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform', MakeTransformsLists, 'AffineTransformList') ## Now warp all the input_images images wimtdeformed = pe.MapNode(interface = WarpImageMultiTransform(), iterfield=['transformation_series', 'input_image'], name ='wimtdeformed') TemplateBuildSingleIterationWF.connect(inputSpec, 'images', wimtdeformed, 'input_image') TemplateBuildSingleIterationWF.connect(MakeTransformsLists, 'out', wimtdeformed, 'transformation_series') ## Shape Update Next ===== ## Now Average All input_images deformed images together to create an updated template average AvgDeformedImages=pe.Node(interface=AverageImages(), name='AvgDeformedImages') AvgDeformedImages.inputs.dimension = 3 AvgDeformedImages.inputs.output_average_image = str(iterationPhasePrefix)+'.nii.gz' AvgDeformedImages.inputs.normalize = True TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image", AvgDeformedImages, 'images') ## Now average all affine transforms together AvgAffineTransform = pe.Node(interface=AverageAffineTransform(), name = 'AvgAffineTransform') AvgAffineTransform.inputs.dimension = 3 AvgAffineTransform.inputs.output_affine_transform = 'Avererage_'+str(iterationPhasePrefix)+'_Affine.mat' TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform', AvgAffineTransform, 'transforms') ## Now average the warp fields togther AvgWarpImages=pe.Node(interface=AverageImages(), name='AvgWarpImages') AvgWarpImages.inputs.dimension = 3 AvgWarpImages.inputs.output_average_image = str(iterationPhasePrefix)+'warp.nii.gz' AvgWarpImages.inputs.normalize = True TemplateBuildSingleIterationWF.connect(BeginANTS, 'warp_transform', AvgWarpImages, 'images') ## Now average the images together ## TODO: For now GradientStep is set to 0.25 as a hard coded default value. GradientStep = 0.25 GradientStepWarpImage=pe.Node(interface=MultiplyImages(), name='GradientStepWarpImage') GradientStepWarpImage.inputs.dimension = 3 GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_'+str(iterationPhasePrefix)+'_warp.nii.gz' TemplateBuildSingleIterationWF.connect(AvgWarpImages, 'output_average_image', GradientStepWarpImage, 'first_input') ## Now create the new template shape based on the average of all deformed images UpdateTemplateShape = pe.Node(interface = WarpImageMultiTransform(), name = 'UpdateTemplateShape') UpdateTemplateShape.inputs.invert_affine = [1] TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', UpdateTemplateShape, 'reference_image') TemplateBuildSingleIterationWF.connect(AvgAffineTransform, 'affine_transform', UpdateTemplateShape, 'transformation_series') TemplateBuildSingleIterationWF.connect(GradientStepWarpImage, 'output_product_image', UpdateTemplateShape, 'input_image') ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(interface=util.Function(function=MakeTransformListWithGradientWarps, input_names=['averageAffineTranform', 'gradientStepWarp'], output_names=['TransformListWithGradientWarps']), run_without_submitting=True, name='MakeTransformListWithGradientWarps') ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True TemplateBuildSingleIterationWF.connect(AvgAffineTransform, 'affine_transform', ApplyInvAverageAndFourTimesGradientStepWarpImage, 'averageAffineTranform') TemplateBuildSingleIterationWF.connect(UpdateTemplateShape, 'output_image', ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp') ReshapeAverageImageWithShapeUpdate = pe.Node(interface = WarpImageMultiTransform(), name = 'ReshapeAverageImageWithShapeUpdate') ReshapeAverageImageWithShapeUpdate.inputs.invert_affine = [1] ReshapeAverageImageWithShapeUpdate.inputs.out_postfix = '_Reshaped' TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', ReshapeAverageImageWithShapeUpdate, 'input_image') TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', ReshapeAverageImageWithShapeUpdate, 'reference_image') TemplateBuildSingleIterationWF.connect(ApplyInvAverageAndFourTimesGradientStepWarpImage, 'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate, 'transformation_series') TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate, 'output_image', outputSpec, 'template') ###### ###### ###### Process all the passive deformed images in a way similar to the main image used for registration ###### ###### ###### ############################################## ## Now warp all the ListOfPassiveImagesDictionaries images FlattenTransformAndImagesListNode = pe.Node( Function(function=FlattenTransformAndImagesList, input_names = ['ListOfPassiveImagesDictionaries','transformation_series'], output_names = ['flattened_images','flattened_transforms','flattened_image_nametypes']), run_without_submitting=True, name="99_FlattenTransformAndImagesList") TemplateBuildSingleIterationWF.connect( inputSpec,'ListOfPassiveImagesDictionaries', FlattenTransformAndImagesListNode, 'ListOfPassiveImagesDictionaries' ) TemplateBuildSingleIterationWF.connect( MakeTransformsLists ,'out', FlattenTransformAndImagesListNode, 'transformation_series' ) wimtPassivedeformed = pe.MapNode(interface = WarpImageMultiTransform(), iterfield=['transformation_series', 'input_image'], name ='wimtPassivedeformed') TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image',wimtPassivedeformed,'reference_image') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_images', wimtPassivedeformed, 'input_image') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_transforms', wimtPassivedeformed, 'transformation_series') RenestDeformedPassiveImagesNode = pe.Node( Function(function=RenestDeformedPassiveImages, input_names = ['deformedPassiveImages','flattened_image_nametypes'], output_names = ['nested_imagetype_list','outputAverageImageName_list','image_type_list']), run_without_submitting=True, name="99_RenestDeformedPassiveImages") TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image', RenestDeformedPassiveImagesNode, 'deformedPassiveImages') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_image_nametypes', RenestDeformedPassiveImagesNode, 'flattened_image_nametypes') ## Now Average All passive input_images deformed images together to create an updated template average AvgDeformedPassiveImages=pe.MapNode(interface=AverageImages(), iterfield=['images','output_average_image'], name='AvgDeformedPassiveImages') AvgDeformedPassiveImages.inputs.dimension = 3 AvgDeformedPassiveImages.inputs.normalize = False TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, "nested_imagetype_list", AvgDeformedPassiveImages, 'images') TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, "outputAverageImageName_list", AvgDeformedPassiveImages, 'output_average_image') ## -- TODO: Now neeed to reshape all the passive images as well ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(interface = WarpImageMultiTransform(), iterfield=['input_image','reference_image','out_postfix'], name = 'ReshapeAveragePassiveImageWithShapeUpdate') ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_affine = [1] TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, "image_type_list", ReshapeAveragePassiveImageWithShapeUpdate, 'out_postfix') TemplateBuildSingleIterationWF.connect(AvgDeformedPassiveImages, 'output_average_image', ReshapeAveragePassiveImageWithShapeUpdate, 'input_image') TemplateBuildSingleIterationWF.connect(AvgDeformedPassiveImages, 'output_average_image', ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image') TemplateBuildSingleIterationWF.connect(ApplyInvAverageAndFourTimesGradientStepWarpImage, 'TransformListWithGradientWarps', ReshapeAveragePassiveImageWithShapeUpdate, 'transformation_series') TemplateBuildSingleIterationWF.connect(ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec, 'passive_deformed_templates') return TemplateBuildSingleIterationWF nipype-0.9.2/nipype/workflows/smri/ants/__init__.py000066400000000000000000000002331227300005300224050ustar00rootroot00000000000000from .ANTSBuildTemplate import ANTSTemplateBuildSingleIterationWF from .antsRegistrationBuildTemplate import antsRegistrationTemplateBuildSingleIterationWFnipype-0.9.2/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py000066400000000000000000000556231227300005300266770ustar00rootroot00000000000000################################################################################# ## Program: Build Template Parallel ## Language: Python ## ## Authors: Jessica Forbes, Grace Murray, and Hans Johnson, University of Iowa ## ## This software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. ## ################################################################################# import nipype.pipeline.engine as pe import nipype.interfaces.utility as util from nipype.interfaces.utility import Function from nipype.interfaces.ants import ( Registration, ApplyTransforms, AverageImages, MultiplyImages, AverageAffineTransform) def makeListOfOneElement(inputFile): outputList=[inputFile] return outputList def GetFirstListElement(this_list): return this_list[0] def MakeTransformListWithGradientWarps(averageAffineTranform, gradientStepWarp): return [averageAffineTranform, gradientStepWarp, gradientStepWarp, gradientStepWarp, gradientStepWarp] def RenestDeformedPassiveImages(deformedPassiveImages,flattened_image_nametypes,interpolationMapping): import os """ Now make a list of lists of images where the outter list is per image type, and the inner list is the same size as the number of subjects to be averaged. In this case, the first element will be a list of all the deformed T2's, and the second element will be a list of all deformed POSTERIOR_AIR, etc.. """ all_images_size=len(deformedPassiveImages) image_dictionary_of_lists=dict() nested_imagetype_list=list() outputAverageImageName_list=list() image_type_list=list() nested_interpolation_type=list() ## make empty_list, this is not efficient, but it works for name in flattened_image_nametypes: image_dictionary_of_lists[name]=list() for index in range(0,all_images_size): curr_name=flattened_image_nametypes[index] curr_file=deformedPassiveImages[index] image_dictionary_of_lists[curr_name].append(curr_file) for image_type,image_list in image_dictionary_of_lists.items(): nested_imagetype_list.append(image_list) outputAverageImageName_list.append('AVG_'+image_type+'.nii.gz') image_type_list.append('WARP_AVG_'+image_type) if interpolationMapping.has_key(image_type): nested_interpolation_type.append(interpolationMapping[image_type]) else: nested_interpolation_type.append('Linear') #Linear is the default. print "\n"*10 print "HACK: ", nested_imagetype_list print "HACK: ", outputAverageImageName_list print "HACK: ", image_type_list print "HACK: ", nested_interpolation_type return nested_imagetype_list,outputAverageImageName_list,image_type_list,nested_interpolation_type def SplitAffineAndWarpComponents(list_of_transforms_lists): ### Nota bene: The outputs will include the initial_moving_transform from Registration (which depends on what ### the invert_initial_moving_transform is set to) affine_component_list = [] warp_component_list = [] for transform in list_of_transforms_lists: affine_component_list.append(transform[0]) warp_component_list.append(transform[1]) print "HACK ", affine_component_list, " ", warp_component_list return affine_component_list, warp_component_list ## Flatten and return equal length transform and images lists. def FlattenTransformAndImagesList(ListOfPassiveImagesDictionaries,transforms,invert_transform_flags,interpolationMapping): import sys print("HACK: DEBUG: ListOfPassiveImagesDictionaries\n{lpi}\n".format(lpi=ListOfPassiveImagesDictionaries)) subjCount=len(ListOfPassiveImagesDictionaries) tranCount=len(transforms) if subjCount != tranCount: print "ERROR: subjCount must equal tranCount {0} != {1}".format(subjCount,tranCount) sys.exit(-1) invertTfmsFlagsCount=len(invert_transform_flags) if subjCount != invertTfmsFlagsCount: print "ERROR: subjCount must equal invertTfmsFlags {0} != {1}".format(subjCount,invertTfmsFlagsCount) sys.exit(-1) flattened_images=list() flattened_image_nametypes=list() flattened_transforms=list() flattened_invert_transform_flags=list() flattened_interpolation_type=list() passiveImagesCount = len(ListOfPassiveImagesDictionaries[0]) for subjIndex in range(0,subjCount): #if passiveImagesCount != len(ListOfPassiveImagesDictionaries[subjIndex]): # print "ERROR: all image lengths must be equal {0} != {1}".format(passiveImagesCount,len(ListOfPassiveImagesDictionaries[subjIndex])) # sys.exit(-1) subjImgDictionary=ListOfPassiveImagesDictionaries[subjIndex] subjToAtlasTransform=transforms[subjIndex] subjToAtlasInvertFlags=invert_transform_flags[subjIndex] for imgname,img in subjImgDictionary.items(): flattened_images.append(img) flattened_image_nametypes.append(imgname) flattened_transforms.append(subjToAtlasTransform) flattened_invert_transform_flags.append(subjToAtlasInvertFlags) if interpolationMapping.has_key(imgname): flattened_interpolation_type.append(interpolationMapping[imgname]) else: flattened_interpolation_type.append('Linear') #Linear is the default. print("HACK: flattened images {0}\n".format(flattened_images)) print("HACK: flattened nametypes {0}\n".format(flattened_image_nametypes)) print("HACK: flattened txfms {0}\n".format(flattened_transforms)) print("HACK: flattened txfmsFlags{0}\n".format(flattened_invert_transform_flags)) return flattened_images,flattened_transforms,flattened_invert_transform_flags,flattened_image_nametypes,flattened_interpolation_type def GetMovingImages(ListOfImagesDictionaries,registrationImageTypes,interpolationMapping): """ This currently ONLY works when registrationImageTypes has length of exactly 1. When the new multi-variate registration is introduced, it will be expanded. """ if len(registrationImageTypes) !=1: print("ERROR: Multivariate imageing not supported yet!") return [] moving_images=[ mdict[ registrationImageTypes[0] ] for mdict in ListOfImagesDictionaries ] moving_interpolation_type=interpolationMapping[ registrationImageTypes[0] ] return moving_images,moving_interpolation_type def GetPassiveImages(ListOfImagesDictionaries,registrationImageTypes): if len(registrationImageTypes) !=1: print("ERROR: Multivariate imageing not supported yet!") return [dict()] passive_images=list() for mdict in ListOfImagesDictionaries: ThisSubjectPassiveImages=dict() for key,value in mdict.items(): if key not in registrationImageTypes: ThisSubjectPassiveImages[key]=value passive_images.append(ThisSubjectPassiveImages) return passive_images ## ## NOTE: The modes can be either 'SINGLE_IMAGE' or 'MULTI' ## 'SINGLE_IMAGE' is quick shorthand when you are building an atlas with a single subject, then registration can ## be short-circuted ## any other string indicates the normal mode that you would expect and replicates the shell script build_template_parallel.sh def antsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix=''): """ Inputs:: inputspec.images : inputspec.fixed_image : inputspec.ListOfPassiveImagesDictionaries : inputspec.interpolationMapping : Outputs:: outputspec.template : outputspec.transforms_list : outputspec.passive_deformed_templates : """ TemplateBuildSingleIterationWF = pe.Workflow(name = 'antsRegistrationTemplateBuildSingleIterationWF_'+str(iterationPhasePrefix) ) inputSpec = pe.Node(interface=util.IdentityInterface(fields=[ 'ListOfImagesDictionaries', 'registrationImageTypes', 'interpolationMapping','fixed_image']), run_without_submitting=True, name='inputspec') ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that ## they do not change due to re-indenting. Otherwise re-indenting for flow control will trigger ## their hash to change. ## HACK: TODO: REMOVE 'transforms_list' it is not used. That will change all the hashes ## HACK: TODO: Need to run all python files through the code beutifiers. It has gotten pretty ugly. outputSpec = pe.Node(interface=util.IdentityInterface(fields=['template','transforms_list', 'passive_deformed_templates']), run_without_submitting=True, name='outputspec') ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template BeginANTS=pe.MapNode(interface=Registration(), name = 'BeginANTS', iterfield=['moving_image']) BeginANTS.inputs.dimension = 3 BeginANTS.inputs.output_transform_prefix = str(iterationPhasePrefix)+'_tfm' BeginANTS.inputs.transforms = ["Affine", "SyN"] BeginANTS.inputs.transform_parameters = [[0.9], [0.25,3.0,0.0]] BeginANTS.inputs.metric = ['Mattes', 'CC'] BeginANTS.inputs.metric_weight = [1.0, 1.0] BeginANTS.inputs.radius_or_number_of_bins = [32, 5] BeginANTS.inputs.number_of_iterations = [[1000, 1000, 1000], [50, 35, 15]] BeginANTS.inputs.use_histogram_matching = [True, True] BeginANTS.inputs.use_estimate_learning_rate_once = [False, False] BeginANTS.inputs.shrink_factors = [[3,2,1], [3,2,1]] BeginANTS.inputs.smoothing_sigmas = [[3,2,0], [3,2,0]] BeginANTS.inputs.sigma_units = ["vox"]*2 GetMovingImagesNode = pe.Node(interface=util.Function(function=GetMovingImages, input_names=['ListOfImagesDictionaries','registrationImageTypes','interpolationMapping'], output_names=['moving_images','moving_interpolation_type']), run_without_submitting=True, name='99_GetMovingImagesNode') TemplateBuildSingleIterationWF.connect(inputSpec, 'ListOfImagesDictionaries', GetMovingImagesNode, 'ListOfImagesDictionaries') TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', GetMovingImagesNode, 'registrationImageTypes') TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',GetMovingImagesNode,'interpolationMapping') TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, 'moving_images', BeginANTS, 'moving_image') TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, 'moving_interpolation_type', BeginANTS, 'interpolation') TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS, 'fixed_image') ## Now warp all the input_images images wimtdeformed = pe.MapNode(interface = ApplyTransforms(), iterfield=['transforms','invert_transform_flags','input_image'], name ='wimtdeformed') wimtdeformed.inputs.interpolation = 'Linear' wimtdeformed.default_value = 0 TemplateBuildSingleIterationWF.connect(BeginANTS,'forward_transforms',wimtdeformed,'transforms') TemplateBuildSingleIterationWF.connect(BeginANTS,'forward_invert_flags',wimtdeformed,'invert_transform_flags') TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, 'moving_images', wimtdeformed, 'input_image') TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', wimtdeformed, 'reference_image') ## Shape Update Next ===== ## Now Average All input_images deformed images together to create an updated template average AvgDeformedImages=pe.Node(interface=AverageImages(), name='AvgDeformedImages') AvgDeformedImages.inputs.dimension = 3 AvgDeformedImages.inputs.output_average_image = str(iterationPhasePrefix)+'.nii.gz' AvgDeformedImages.inputs.normalize = True TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image", AvgDeformedImages, 'images') ## Now average all affine transforms together AvgAffineTransform = pe.Node(interface=AverageAffineTransform(), name = 'AvgAffineTransform') AvgAffineTransform.inputs.dimension = 3 AvgAffineTransform.inputs.output_affine_transform = 'Avererage_'+str(iterationPhasePrefix)+'_Affine.mat' SplitAffineAndWarpsNode = pe.Node(interface=util.Function(function=SplitAffineAndWarpComponents, input_names=['list_of_transforms_lists'], output_names=['affine_component_list', 'warp_component_list']), run_without_submitting=True, name='99_SplitAffineAndWarpsNode') TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms',SplitAffineAndWarpsNode,'list_of_transforms_lists') TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, 'affine_component_list', AvgAffineTransform, 'transforms') ## Now average the warp fields togther AvgWarpImages=pe.Node(interface=AverageImages(), name='AvgWarpImages') AvgWarpImages.inputs.dimension = 3 AvgWarpImages.inputs.output_average_image = str(iterationPhasePrefix)+'warp.nii.gz' AvgWarpImages.inputs.normalize = True TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, 'warp_component_list', AvgWarpImages, 'images') ## Now average the images together ## TODO: For now GradientStep is set to 0.25 as a hard coded default value. GradientStep = 0.25 GradientStepWarpImage=pe.Node(interface=MultiplyImages(), name='GradientStepWarpImage') GradientStepWarpImage.inputs.dimension = 3 GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_'+str(iterationPhasePrefix)+'_warp.nii.gz' TemplateBuildSingleIterationWF.connect(AvgWarpImages, 'output_average_image', GradientStepWarpImage, 'first_input') ## Now create the new template shape based on the average of all deformed images UpdateTemplateShape = pe.Node(interface = ApplyTransforms(), name = 'UpdateTemplateShape') UpdateTemplateShape.inputs.invert_transform_flags = [True] UpdateTemplateShape.inputs.interpolation = 'Linear' UpdateTemplateShape.default_value = 0 TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', UpdateTemplateShape, 'reference_image') TemplateBuildSingleIterationWF.connect( [ (AvgAffineTransform, UpdateTemplateShape, [(('affine_transform', makeListOfOneElement ), 'transforms')] ), ]) TemplateBuildSingleIterationWF.connect(GradientStepWarpImage, 'output_product_image', UpdateTemplateShape, 'input_image') ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(interface=util.Function(function=MakeTransformListWithGradientWarps, input_names=['averageAffineTranform', 'gradientStepWarp'], output_names=['TransformListWithGradientWarps']), run_without_submitting=True, name='99_MakeTransformListWithGradientWarps') ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True TemplateBuildSingleIterationWF.connect(AvgAffineTransform, 'affine_transform', ApplyInvAverageAndFourTimesGradientStepWarpImage, 'averageAffineTranform') TemplateBuildSingleIterationWF.connect(UpdateTemplateShape, 'output_image', ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp') ReshapeAverageImageWithShapeUpdate = pe.Node(interface = ApplyTransforms(), name = 'ReshapeAverageImageWithShapeUpdate') ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [ True, False, False, False, False ] ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear' ReshapeAverageImageWithShapeUpdate.default_value = 0 ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz' TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', ReshapeAverageImageWithShapeUpdate, 'input_image') TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', ReshapeAverageImageWithShapeUpdate, 'reference_image') TemplateBuildSingleIterationWF.connect(ApplyInvAverageAndFourTimesGradientStepWarpImage, 'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate, 'transforms') TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate, 'output_image', outputSpec, 'template') ###### ###### ###### Process all the passive deformed images in a way similar to the main image used for registration ###### ###### ###### ############################################## ## Now warp all the ListOfPassiveImagesDictionaries images FlattenTransformAndImagesListNode = pe.Node( Function(function=FlattenTransformAndImagesList, input_names = ['ListOfPassiveImagesDictionaries','transforms', 'invert_transform_flags','interpolationMapping'], output_names = ['flattened_images','flattened_transforms','flattened_invert_transform_flags', 'flattened_image_nametypes','flattened_interpolation_type']), run_without_submitting=True, name="99_FlattenTransformAndImagesList") GetPassiveImagesNode = pe.Node(interface=util.Function(function=GetPassiveImages, input_names=['ListOfImagesDictionaries','registrationImageTypes'], output_names=['ListOfPassiveImagesDictionaries']), run_without_submitting=True, name='99_GetPassiveImagesNode') TemplateBuildSingleIterationWF.connect(inputSpec, 'ListOfImagesDictionaries', GetPassiveImagesNode, 'ListOfImagesDictionaries') TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', GetPassiveImagesNode, 'registrationImageTypes') TemplateBuildSingleIterationWF.connect( GetPassiveImagesNode,'ListOfPassiveImagesDictionaries', FlattenTransformAndImagesListNode, 'ListOfPassiveImagesDictionaries' ) TemplateBuildSingleIterationWF.connect( inputSpec,'interpolationMapping', FlattenTransformAndImagesListNode, 'interpolationMapping' ) TemplateBuildSingleIterationWF.connect( BeginANTS,'forward_transforms', FlattenTransformAndImagesListNode, 'transforms' ) TemplateBuildSingleIterationWF.connect( BeginANTS,'forward_invert_flags', FlattenTransformAndImagesListNode, 'invert_transform_flags' ) wimtPassivedeformed = pe.MapNode(interface = ApplyTransforms(), iterfield=['transforms','invert_transform_flags', 'input_image','interpolation'], name ='wimtPassivedeformed') wimtPassivedeformed.default_value = 0 TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image',wimtPassivedeformed,'reference_image') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_interpolation_type', wimtPassivedeformed, 'interpolation') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_images', wimtPassivedeformed, 'input_image') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_transforms', wimtPassivedeformed, 'transforms') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_invert_transform_flags', wimtPassivedeformed, 'invert_transform_flags') RenestDeformedPassiveImagesNode = pe.Node( Function(function=RenestDeformedPassiveImages, input_names = ['deformedPassiveImages','flattened_image_nametypes','interpolationMapping'], output_names = ['nested_imagetype_list','outputAverageImageName_list', 'image_type_list','nested_interpolation_type']), run_without_submitting=True, name="99_RenestDeformedPassiveImages") TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', RenestDeformedPassiveImagesNode, 'interpolationMapping') TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image', RenestDeformedPassiveImagesNode, 'deformedPassiveImages') TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_image_nametypes', RenestDeformedPassiveImagesNode, 'flattened_image_nametypes') ## Now Average All passive input_images deformed images together to create an updated template average AvgDeformedPassiveImages=pe.MapNode(interface=AverageImages(), iterfield=['images','output_average_image'], name='AvgDeformedPassiveImages') AvgDeformedPassiveImages.inputs.dimension = 3 AvgDeformedPassiveImages.inputs.normalize = False TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, "nested_imagetype_list", AvgDeformedPassiveImages, 'images') TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, "outputAverageImageName_list", AvgDeformedPassiveImages, 'output_average_image') ## -- TODO: Now neeed to reshape all the passive images as well ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(interface = ApplyTransforms(), iterfield=['input_image','reference_image','output_image','interpolation'], name = 'ReshapeAveragePassiveImageWithShapeUpdate') ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [ True, False, False, False, False ] ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0 TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, 'nested_interpolation_type', ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation') TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, 'outputAverageImageName_list', ReshapeAveragePassiveImageWithShapeUpdate, 'output_image') TemplateBuildSingleIterationWF.connect(AvgDeformedPassiveImages, 'output_average_image', ReshapeAveragePassiveImageWithShapeUpdate, 'input_image') TemplateBuildSingleIterationWF.connect(AvgDeformedPassiveImages, 'output_average_image', ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image') TemplateBuildSingleIterationWF.connect(ApplyInvAverageAndFourTimesGradientStepWarpImage, 'TransformListWithGradientWarps', ReshapeAveragePassiveImageWithShapeUpdate, 'transforms') TemplateBuildSingleIterationWF.connect(ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec, 'passive_deformed_templates') return TemplateBuildSingleIterationWF nipype-0.9.2/nipype/workflows/smri/freesurfer/000077500000000000000000000000001227300005300215015ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/smri/freesurfer/__init__.py000066400000000000000000000001731227300005300236130ustar00rootroot00000000000000from .utils import (create_getmask_flow, create_get_stats_flow, create_tessellation_flow) from .bem import create_bem_flow nipype-0.9.2/nipype/workflows/smri/freesurfer/bem.py000066400000000000000000000047641227300005300226310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nipype.pipeline.engine as pe import nipype.interfaces.mne as mne import nipype.interfaces.freesurfer as fs import nipype.interfaces.utility as niu def create_bem_flow(name='bem', out_format='stl'): """Uses MNE's Watershed algorithm to create Boundary Element Meshes (BEM) for a subject's brain, inner/outer skull, and skin. The surfaces are returned in the desired (by default, stereolithic .stl) format. Example ------- >>> from nipype.workflows.smri.freesurfer import create_bem_flow >>> bemflow = create_bem_flow() >>> bemflow.inputs.inputspec.subject_id = 'subj1' >>> bemflow.inputs.inputspec.subjects_dir = '.' >>> bemflow.run() # doctest: +SKIP Inputs:: inputspec.subject_id : freesurfer subject id inputspec.subjects_dir : freesurfer subjects directory Outputs:: outputspec.meshes : output boundary element meshes in (by default) stereolithographic (.stl) format """ """ Initialize the workflow """ bemflow = pe.Workflow(name=name) """ Define the inputs to the workflow. """ inputnode = pe.Node(niu.IdentityInterface(fields=['subject_id', 'subjects_dir']), name='inputspec') """ Define all the nodes of the workflow: fssource: used to retrieve aseg.mgz mri_convert : converts aseg.mgz to aseg.nii tessellate : tessellates regions in aseg.mgz surfconvert : converts regions to stereolithographic (.stl) format """ watershed_bem = pe.Node(interface=mne.WatershedBEM(), name='WatershedBEM') surfconvert = pe.MapNode(fs.MRIsConvert(out_datatype=out_format), iterfield=['in_file'], name='surfconvert') """ Connect the nodes """ bemflow.connect([ (inputnode, watershed_bem, [('subject_id', 'subject_id'), ('subjects_dir', 'subjects_dir')]), (watershed_bem, surfconvert, [('mesh_files', 'in_file')]), ]) """ Setup an outputnode that defines relevant inputs of the workflow. """ outputnode = pe.Node(niu.IdentityInterface(fields=["meshes"]), name="outputspec") bemflow.connect([ (surfconvert, outputnode, [("converted", "meshes")]), ]) return bemflow nipype-0.9.2/nipype/workflows/smri/freesurfer/setup.py000066400000000000000000000006601227300005300232150ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('freesurfer', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/smri/freesurfer/utils.py000066400000000000000000000322551227300005300232220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nipype.pipeline.engine as pe import nipype.interfaces.fsl as fsl import nipype.interfaces.freesurfer as fs import nipype.interfaces.meshfix as mf import nipype.interfaces.io as nio import nipype.interfaces.utility as niu import nipype.algorithms.misc as misc from nipype.interfaces.utility import Function from nipype.workflows.misc.utils import region_list_from_volume, id_list_from_lookup_table import os, os.path as op def get_aparc_aseg(files): """Return the aparc+aseg.mgz file""" for name in files: if 'aparc+aseg' in name: return name raise ValueError('aparc+aseg.mgz not found') def create_getmask_flow(name='getmask', dilate_mask=True): """Registers a source file to freesurfer space and create a brain mask in source space Requires fsl tools for initializing registration Parameters ---------- name : string name of workflow dilate_mask : boolean indicates whether to dilate mask or not Example ------- >>> getmask = create_getmask_flow() >>> getmask.inputs.inputspec.source_file = 'mean.nii' >>> getmask.inputs.inputspec.subject_id = 's1' >>> getmask.inputs.inputspec.subjects_dir = '.' >>> getmask.inputs.inputspec.contrast_type = 't2' Inputs:: inputspec.source_file : reference image for mask generation inputspec.subject_id : freesurfer subject id inputspec.subjects_dir : freesurfer subjects directory inputspec.contrast_type : MR contrast of reference image Outputs:: outputspec.mask_file : binary mask file in reference image space outputspec.reg_file : registration file that maps reference image to freesurfer space outputspec.reg_cost : cost of registration (useful for detecting misalignment) """ """ Initialize the workflow """ getmask = pe.Workflow(name=name) """ Define the inputs to the workflow. """ inputnode = pe.Node(niu.IdentityInterface(fields=['source_file', 'subject_id', 'subjects_dir', 'contrast_type']), name='inputspec') """ Define all the nodes of the workflow: fssource: used to retrieve aseg.mgz threshold : binarize aseg register : coregister source file to freesurfer space voltransform: convert binarized aseg to source file space """ fssource = pe.Node(nio.FreeSurferSource(), name = 'fssource') threshold = pe.Node(fs.Binarize(min=0.5, out_type='nii'), name='threshold') register = pe.MapNode(fs.BBRegister(init='fsl'), iterfield=['source_file'], name='register') voltransform = pe.MapNode(fs.ApplyVolTransform(inverse=True), iterfield=['source_file', 'reg_file'], name='transform') """ Connect the nodes """ getmask.connect([ (inputnode, fssource, [('subject_id','subject_id'), ('subjects_dir','subjects_dir')]), (inputnode, register, [('source_file', 'source_file'), ('subject_id', 'subject_id'), ('subjects_dir', 'subjects_dir'), ('contrast_type', 'contrast_type')]), (inputnode, voltransform, [('subjects_dir', 'subjects_dir'), ('source_file', 'source_file')]), (fssource, threshold, [(('aparc_aseg', get_aparc_aseg), 'in_file')]), (register, voltransform, [('out_reg_file','reg_file')]), (threshold, voltransform, [('binary_file','target_file')]) ]) """ Add remaining nodes and connections dilate : dilate the transformed file in source space threshold2 : binarize transformed file """ threshold2 = pe.MapNode(fs.Binarize(min=0.5, out_type='nii'), iterfield=['in_file'], name='threshold2') if dilate_mask: threshold2.inputs.dilate = 1 getmask.connect([ (voltransform, threshold2, [('transformed_file', 'in_file')]) ]) """ Setup an outputnode that defines relevant inputs of the workflow. """ outputnode = pe.Node(niu.IdentityInterface(fields=["mask_file", "reg_file", "reg_cost" ]), name="outputspec") getmask.connect([ (register, outputnode, [("out_reg_file", "reg_file")]), (register, outputnode, [("min_cost_file", "reg_cost")]), (threshold2, outputnode, [("binary_file", "mask_file")]), ]) return getmask def create_get_stats_flow(name='getstats', withreg=False): """Retrieves stats from labels Parameters ---------- name : string name of workflow withreg : boolean indicates whether to register source to label Example ------- Inputs:: inputspec.source_file : reference image for mask generation inputspec.label_file : label file from which to get ROIs (optionally with registration) inputspec.reg_file : bbreg file (assumes reg from source to label inputspec.inverse : boolean whether to invert the registration inputspec.subjects_dir : freesurfer subjects directory Outputs:: outputspec.stats_file : stats file """ """ Initialize the workflow """ getstats = pe.Workflow(name=name) """ Define the inputs to the workflow. """ if withreg: inputnode = pe.Node(niu.IdentityInterface(fields=['source_file', 'label_file', 'reg_file', 'subjects_dir']), name='inputspec') else: inputnode = pe.Node(niu.IdentityInterface(fields=['source_file', 'label_file']), name='inputspec') statnode = pe.MapNode(fs.SegStats(), iterfield=['segmentation_file','in_file'], name='segstats') """ Convert between source and label spaces if registration info is provided """ if withreg: voltransform = pe.MapNode(fs.ApplyVolTransform(inverse=True), iterfield=['source_file', 'reg_file'], name='transform') getstats.connect(inputnode, 'reg_file', voltransform, 'reg_file') getstats.connect(inputnode, 'source_file', voltransform, 'source_file') getstats.connect(inputnode, 'label_file', voltransform, 'target_file') getstats.connect(inputnode, 'subjects_dir', voltransform, 'subjects_dir') def switch_labels(inverse, transform_output, source_file, label_file): if inverse: return transform_output, source_file else: return label_file, transform_output chooser = pe.MapNode(niu.Function(input_names = ['inverse', 'transform_output', 'source_file', 'label_file'], output_names = ['label_file', 'source_file'], function=switch_labels), iterfield=['transform_output','source_file'], name='chooser') getstats.connect(inputnode,'source_file', chooser, 'source_file') getstats.connect(inputnode,'label_file', chooser, 'label_file') getstats.connect(inputnode,'inverse', chooser, 'inverse') getstats.connect(voltransform, 'transformed_file', chooser, 'transform_output') getstats.connect(chooser, 'label_file', statnode, 'segmentation_file') getstats.connect(chooser, 'source_file', statnode, 'in_file') else: getstats.connect(inputnode, 'label_file', statnode, 'segmentation_file') getstats.connect(inputnode, 'source_file', statnode, 'in_file') """ Setup an outputnode that defines relevant inputs of the workflow. """ outputnode = pe.Node(niu.IdentityInterface(fields=["stats_file" ]), name="outputspec") getstats.connect([ (statnode, outputnode, [("summary_file", "stats_file")]), ]) return getstats def create_tessellation_flow(name='tessellate', out_format='stl'): """Tessellates the input subject's aseg.mgz volume and returns the surfaces for each region in stereolithic (.stl) format Example ------- >>> from nipype.workflows.smri.freesurfer import create_tessellation_flow >>> tessflow = create_tessellation_flow() >>> tessflow.inputs.inputspec.subject_id = 'subj1' >>> tessflow.inputs.inputspec.subjects_dir = '.' >>> tessflow.inputs.inputspec.lookup_file = 'FreeSurferColorLUT.txt' # doctest: +SKIP >>> tessflow.run() # doctest: +SKIP Inputs:: inputspec.subject_id : freesurfer subject id inputspec.subjects_dir : freesurfer subjects directory inputspec.lookup_file : lookup file from freesurfer directory Outputs:: outputspec.meshes : output region meshes in (by default) stereolithographic (.stl) format """ """ Initialize the workflow """ tessflow = pe.Workflow(name=name) """ Define the inputs to the workflow. """ inputnode = pe.Node(niu.IdentityInterface(fields=['subject_id', 'subjects_dir', 'lookup_file']), name='inputspec') """ Define all the nodes of the workflow: fssource: used to retrieve aseg.mgz mri_convert : converts aseg.mgz to aseg.nii tessellate : tessellates regions in aseg.mgz surfconvert : converts regions to stereolithographic (.stl) format smoother: smooths the tessellated regions """ fssource = pe.Node(nio.FreeSurferSource(), name = 'fssource') volconvert = pe.Node(fs.MRIConvert(out_type='nii'), name = 'volconvert') tessellate = pe.MapNode(fs.MRIMarchingCubes(), iterfield=['label_value','out_file'], name='tessellate') surfconvert = pe.MapNode(fs.MRIsConvert(out_datatype='stl'), iterfield=['in_file'], name='surfconvert') smoother = pe.MapNode(mf.MeshFix(), iterfield=['in_file1'], name='smoother') if out_format == 'gii': stl_to_gifti = pe.MapNode(fs.MRIsConvert(out_datatype=out_format), iterfield=['in_file'], name='stl_to_gifti') smoother.inputs.save_as_stl = True smoother.inputs.laplacian_smoothing_steps = 1 region_list_from_volume_interface = Function(input_names=["in_file"], output_names=["region_list"], function=region_list_from_volume) id_list_from_lookup_table_interface = Function(input_names=["lookup_file", "region_list"], output_names=["id_list"], function=id_list_from_lookup_table) region_list_from_volume_node = pe.Node(interface=region_list_from_volume_interface, name='region_list_from_volume_node') id_list_from_lookup_table_node = pe.Node(interface=id_list_from_lookup_table_interface, name='id_list_from_lookup_table_node') """ Connect the nodes """ tessflow.connect([ (inputnode, fssource, [('subject_id','subject_id'), ('subjects_dir','subjects_dir')]), (fssource, volconvert, [('aseg', 'in_file')]), (volconvert, region_list_from_volume_node, [('out_file', 'in_file')]), (region_list_from_volume_node, tessellate, [('region_list', 'label_value')]), (region_list_from_volume_node, id_list_from_lookup_table_node, [('region_list', 'region_list')]), (inputnode, id_list_from_lookup_table_node, [('lookup_file', 'lookup_file')]), (id_list_from_lookup_table_node, tessellate, [('id_list', 'out_file')]), (fssource, tessellate, [('aseg', 'in_file')]), (tessellate, surfconvert, [('surface','in_file')]), (surfconvert, smoother, [('converted','in_file1')]), ]) """ Setup an outputnode that defines relevant inputs of the workflow. """ outputnode = pe.Node(niu.IdentityInterface(fields=["meshes"]), name="outputspec") if out_format == 'gii': tessflow.connect([ (smoother, stl_to_gifti, [("mesh_file", "in_file")]), ]) tessflow.connect([ (stl_to_gifti, outputnode, [("converted", "meshes")]), ]) else: tessflow.connect([ (smoother, outputnode, [("mesh_file", "meshes")]), ]) return tessflow nipype-0.9.2/nipype/workflows/smri/setup.py000066400000000000000000000007621227300005300210500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('smri', parent_package, top_path) config.add_subpackage('freesurfer') config.add_subpackage('ants') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/nipype/workflows/warp/000077500000000000000000000000001227300005300173305ustar00rootroot00000000000000nipype-0.9.2/nipype/workflows/warp/__init__.py000066400000000000000000000000011227300005300214300ustar00rootroot00000000000000 nipype-0.9.2/nipype/workflows/warp/setup.py000066400000000000000000000006471227300005300210510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('warp', parent_package, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipype-0.9.2/requirements.txt000066400000000000000000000001251227300005300163000ustar00rootroot00000000000000numpy>=1.3 scipy>=0.7 networkx>=1.0 traits>=4.0 dateutil>=1.5 nibabel>=1.0 nose>=1.0 nipype-0.9.2/setup.py000077500000000000000000000073351227300005300145430ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Nipype : Neuroimaging in Python pipelines and interfaces package. Nipype intends to create python interfaces to other neuroimaging packages and create an API for specifying a full analysis pipeline in python. """ import sys from glob import glob # Import build helpers try: from nisext.sexts import package_check, get_comrec_build except ImportError: raise RuntimeError('Need nisext package from nibabel installation' ' - please install nibabel first') from build_docs import cmdclass, INFO_VARS # Add custom commit-recording build command cmdclass['build_py'] = get_comrec_build('nipype') def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) # The quiet=True option will silence all of the name setting warnings: # Ignoring attempt to set 'name' (from 'nipy.core' to # 'nipy.core.image') # Robert Kern recommends setting quiet=True on the numpy list, stating # these messages are probably only used in debugging numpy distutils. config.get_version('nipype/__init__.py') # sets config.version config.add_subpackage('nipype', 'nipype') return config ################################################################################ # For some commands, use setuptools if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb', 'bdist_wininst', 'install_egg_info', 'egg_info', 'easy_install', )).intersection(sys.argv)) > 0: from setup_egg import extra_setuptools_args # extra_setuptools_args can be defined from the line above, but it can # also be defined here because setup.py has been exec'ed from # setup_egg.py. if not 'extra_setuptools_args' in globals(): extra_setuptools_args = dict() # Hard and soft dependency checking package_check('networkx', INFO_VARS['NETWORKX_MIN_VERSION']) package_check('nibabel', INFO_VARS['NIBABEL_MIN_VERSION']) package_check('numpy', INFO_VARS['NUMPY_MIN_VERSION']) package_check('scipy', INFO_VARS['SCIPY_MIN_VERSION']) package_check('traits', INFO_VARS['TRAITS_MIN_VERSION']) ################################################################################ # Import the documentation building classes. try: from build_docs import cmdclass except ImportError: """ Pass by the doc build gracefully if sphinx is not installed """ print "Sphinx is not installed, docs cannot be built" cmdclass = {} ################################################################################ def main(**extra_args): from numpy.distutils.core import setup setup(name=INFO_VARS['NAME'], maintainer=INFO_VARS['MAINTAINER'], maintainer_email=INFO_VARS['MAINTAINER_EMAIL'], description=INFO_VARS['DESCRIPTION'], long_description=INFO_VARS['LONG_DESCRIPTION'], url=INFO_VARS['URL'], download_url=INFO_VARS['DOWNLOAD_URL'], license=INFO_VARS['LICENSE'], classifiers=INFO_VARS['CLASSIFIERS'], author=INFO_VARS['AUTHOR'], author_email=INFO_VARS['AUTHOR_EMAIL'], platforms=INFO_VARS['PLATFORMS'], version=INFO_VARS['VERSION'], requires=INFO_VARS['REQUIRES'], configuration = configuration, cmdclass = cmdclass, scripts = glob('bin/*'), **extra_args) if __name__ == "__main__": main(**extra_setuptools_args) nipype-0.9.2/setup_egg.py000077500000000000000000000013421227300005300153550ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Wrapper to run setup.py using setuptools.""" from setuptools import setup ################################################################################ # Call the setup.py script, injecting the setuptools-specific arguments. extra_setuptools_args = dict( tests_require=['nose'], test_suite='nose.collector', zip_safe=False, ) if __name__ == '__main__': execfile('setup.py', dict(__name__='__main__', extra_setuptools_args=extra_setuptools_args)) nipype-0.9.2/tools/000077500000000000000000000000001227300005300141565ustar00rootroot00000000000000nipype-0.9.2/tools/README000066400000000000000000000006601227300005300150400ustar00rootroot00000000000000============== Nipype Tools ============== This directory contains various tools used by the nipype developers. Only install tools here that are unique to the nipype project. Any tools shared with our parent project, nipy, should go in the nipy/tools directory. Exceptions ---------- * apigen.py: This is not importable from nipy, so I copied it. * build_modref_templates.py: This was copied and modified to work with nipype. nipype-0.9.2/tools/apigen.py000066400000000000000000000370501227300005300160000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Attempt to generate templates for module reference with Sphinx XXX - we exclude extension modules To include extension modules, first identify them as valid in the ``_uri2path`` method, then handle them in the ``_parse_module`` script. We get functions and classes by parsing the text of .py files. Alternatively we could import the modules for discovery, and we'd have to do that for extension modules. This would involve changing the ``_parse_module`` method to work via import and introspection, and might involve changing ``discover_modules`` (which determines which files are modules, and therefore which module URIs will be passed to ``_parse_module``). NOTE: this is a modified version of a script originally shipped with the PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed project.""" # Stdlib imports from __future__ import print_function import os import re # Functions and classes class ApiDocWriter(object): ''' Class for automatic detection and parsing of API docs to Sphinx-parsable reST format''' # only separating first two levels rst_section_levels = ['*', '=', '-', '~', '^'] def __init__(self, package_name, rst_extension='.rst', package_skip_patterns=None, module_skip_patterns=None, ): ''' Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default '.rst' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._'] ''' if package_skip_patterns is None: package_skip_patterns = ['\\.tests$'] if module_skip_patterns is None: module_skip_patterns = ['\\.setup$', '\\._'] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns def get_package_name(self): return self._package_name def set_package_name(self, package_name): ''' Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True ''' # It's also possible to imagine caching the module parsing here self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None package_name = property(get_package_name, set_package_name, None, 'get/set package_name') def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':') def _uri2path(self, uri): ''' Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples -------- >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path('sphinx.builder') >>> res == os.path.join(modpath, 'builder.py') True >>> res = docwriter._uri2path('sphinx') >>> res == os.path.join(modpath, '__init__.py') True >>> docwriter._uri2path('sphinx.does_not_exist') ''' if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace('.', os.path.sep) path = path.replace(self.package_name + os.path.sep, '') path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + '.py'): # file path += '.py' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path def _path2uri(self, dirpath): ''' Convert directory path to uri ''' relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([], []) f = open(filename, 'rt') functions, classes = self._parse_lines(f) f.close() return functions, classes def _parse_lines(self, linesource): ''' Parse lines of text for functions and classes ''' functions = [] classes = [] for line in linesource: if line.startswith('def ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- S : string Contents of API doc ''' # get the names of all classes and functions functions, classes = self._parse_module(uri) if not len(functions) and not len(classes): print(('WARNING: Empty -', uri)) # dbg return '' # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name, '', uri) ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_title = uri_short ad += (chap_title + '\n' + self.rst_section_levels[1] * len(chap_title) + '\n\n') # Set the chapter title to read 'module' for all modules except for the # main packages if '.' in uri: title = 'Module: :mod:`' + uri_short + '`' else: title = ':mod:`' + uri_short + '`' ad += title + '\n' + self.rst_section_levels[2] * len(title) if len(classes): ad += '\nInheritance diagram for ``%s``:\n\n' % uri ad += '.. inheritance-diagram:: %s \n' % uri ad += ' :parts: 2\n' ad += '\n.. automodule:: ' + uri + '\n' ad += '\n.. currentmodule:: ' + uri + '\n' multi_class = len(classes) > 1 multi_fx = len(functions) > 1 if multi_class: ad += '\n' + 'Classes' + '\n' + \ self.rst_section_levels[2] * 7 + '\n' elif len(classes) and multi_fx: ad += '\n' + 'Class' + '\n' + \ self.rst_section_levels[2] * 5 + '\n' for c in classes: ad += '\n:class:`' + c + '`\n' \ + self.rst_section_levels[multi_class + 2] * \ (len(c) + 9) + '\n\n' ad += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working ad += ' :members:\n' \ ' :undoc-members:\n' \ ' :show-inheritance:\n' \ ' :inherited-members:\n' \ '\n' \ ' .. automethod:: __init__\n' if multi_fx: ad += '\n' + 'Functions' + '\n' + \ self.rst_section_levels[2] * 9 + '\n\n' elif len(functions) and multi_class: ad += '\n' + 'Function' + '\n' + \ self.rst_section_levels[2] * 8 + '\n\n' for f in functions: # must NOT exclude from index to keep cross-refs working ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n' return ad def _survives_exclude(self, matchstr, match_type): ''' Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False ''' if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': patterns = self.package_skip_patterns else: raise ValueError('Cannot interpret match type "%s"' % match_type) # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: #print (pat, matchstr, match_type) #dbg try: pat.search except AttributeError: pat = re.compile(pat) #print (pat.search(matchstr)) #dbg if pat.search(matchstr): return False return True def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace package_uri = '.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) else: dirnames.remove(dirname) # Check filenames for modules for filename in filenames: module_name = filename[:-3] module_uri = '.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, 'module')): modules.append(module_uri) #print sorted(modules) #dbg return sorted(modules) def write_modules_api(self, modules, outdir): # write the list written_modules = [] for m in modules: api_str = self.generate_api_doc(m) if not api_str: continue # write out to file outfile = os.path.join(outdir, m + self.rst_extension) fileobj = open(outfile, 'wt') fileobj.write(api_str) fileobj.close() written_modules.append(m) self.written_modules = written_modules def write_api_docs(self, outdir): """Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules """ if not os.path.exists(outdir): os.mkdir(outdir) # compose list of modules modules = self.discover_modules() self.write_modules_api(modules, outdir) def write_index(self, outdir, froot='gen', relative_to=None): """Make a reST API index file from written files Parameters ---------- path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to 'gen'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is. """ if self.written_modules is None: raise ValueError('No modules written') # Get full filename path path = os.path.join(outdir, froot + self.rst_extension) # Path written into index is relative to rootpath if relative_to is not None: relpath = outdir.replace(relative_to + os.path.sep, '') else: relpath = outdir idx = open(path, 'wt') w = idx.write w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') w('.. toctree::\n\n') for f in self.written_modules: w(' %s\n' % os.path.join(relpath, f)) idx.close() nipype-0.9.2/tools/build_interface_docs.py000077500000000000000000000052301227300005300206620ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to auto-generate interface docs. """ # stdlib imports import os import sys #***************************************************************************** if __name__ == '__main__': nipypepath = os.path.abspath('..') sys.path.insert(1,nipypepath) # local imports from interfacedocgen import InterfaceHelpWriter package = 'nipype' outdir = os.path.join('interfaces','generated') docwriter = InterfaceHelpWriter(package) # Packages that should not be included in generated API docs. docwriter.package_skip_patterns += ['\.external$', '\.fixes$', '\.utils$', '\.pipeline', '\.testing', '\.caching', ] # Modules that should not be included in generated API docs. docwriter.module_skip_patterns += ['\.version$', '\.interfaces\.base$', '\.interfaces\.matlab$', '\.interfaces\.rest$', '\.interfaces\.pymvpa$', '\.interfaces\.slicer\.generate_classes$', '\.interfaces\.spm\.base$', '\.interfaces\.traits', '\.pipeline\.alloy$', '\.pipeline\.s3_node_wrapper$', '.\testing', ] docwriter.class_skip_patterns += ['AFNI', 'ANTS', 'FSL', 'FS', 'Info', '^SPM', 'Tester', 'Spec$', 'Numpy' # NipypeTester raises an # exception when instantiated in # InterfaceHelpWriter.generate_api_doc 'NipypeTester', ] docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'gen', relative_to='interfaces') print '%d files written' % len(docwriter.written_modules) nipype-0.9.2/tools/build_modref_templates.py000077500000000000000000000033451227300005300212510ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to auto-generate our API docs. """ # stdlib imports import os import sys #***************************************************************************** if __name__ == '__main__': nipypepath = os.path.abspath('..') sys.path.insert(1, nipypepath) package = 'nipype' # local imports from apigen import ApiDocWriter outdir = os.path.join('api', 'generated') docwriter = ApiDocWriter(package) # Packages that should not be included in generated API docs. docwriter.package_skip_patterns += ['\.external$', '\.utils$', '\.interfaces\.', '\.workflows$', '\.pipeline\.plugins$', '\.testing$', '\.fixes$', '\.algorithms$', ] # Modules that should not be included in generated API docs. docwriter.module_skip_patterns += ['\.version$', 'info', '\.interfaces\.(?!(base|matlab))', '\.pipeline\.utils$', '\.interfaces\.slicer\.generate_classes$', '\.interfaces\.pymvpa$', ] docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'gen', relative_to='api') print '%d files written' % len(docwriter.written_modules) nipype-0.9.2/tools/checkspecs.py000066400000000000000000000415451227300005300166540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Attempt to check each interface in nipype """ # Stdlib imports import inspect import os import re import sys import tempfile import warnings from nipype.interfaces.base import BaseInterface # Functions and classes class InterfaceChecker(object): """Class for checking all interface specifications """ def __init__(self, package_name, package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None ): ''' Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._'] class_skip_patterns : None or sequence Sequence of strings giving classes to be excluded Default is: None ''' if package_skip_patterns is None: package_skip_patterns = ['\\.tests$'] if module_skip_patterns is None: module_skip_patterns = ['\\.setup$', '\\._'] if class_skip_patterns: self.class_skip_patterns = class_skip_patterns else: self.class_skip_patterns = [] self.package_name = package_name self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns def get_package_name(self): return self._package_name def set_package_name(self, package_name): """Set package_name""" # It's also possible to imagine caching the module parsing here self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] package_name = property(get_package_name, set_package_name, None, 'get/set package_name') def _get_object_name(self, line): name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':') def _uri2path(self, uri): """Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI """ if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace('.', os.path.sep) path = path.replace(self.package_name + os.path.sep, '') path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + '.py'): # file path += '.py' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path def _path2uri(self, dirpath): ''' Convert directory path to uri ''' relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([],[]) f = open(filename, 'rt') functions, classes = self._parse_lines(f, uri) f.close() return functions, classes def _parse_lines(self, linesource, module): ''' Parse lines of text for functions and classes ''' functions = [] classes = [] for line in linesource: if line.startswith('def ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_') and \ self._survives_exclude('.'.join((module, name)), 'class'): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes def test_specs(self, uri): """Check input and output specs in an uri Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- """ # get the names of all classes and functions _, classes = self._parse_module(uri) if not classes: #print 'WARNING: Empty -',uri # dbg return None # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name, '', uri) allowed_keys = ['desc', 'genfile', 'xor', 'requires', 'desc', 'nohash', 'argstr', 'position', 'mandatory', 'copyfile', 'usedefault', 'sep', 'hash_files', 'deprecated', 'new_name', 'min_ver', 'max_ver', 'name_source', 'name_template', 'keep_extension', 'units', 'output_name'] in_built = ['type', 'copy', 'parent', 'instance_handler', 'comparison_mode', 'array', 'default', 'editor'] bad_specs = [] for c in classes: __import__(uri) try: with warnings.catch_warnings(): warnings.simplefilter("ignore") classinst = sys.modules[uri].__dict__[c] except Exception as inst: continue if not issubclass(classinst, BaseInterface): continue testdir = os.path.join(*(uri.split('.')[:-1] + ['tests'])) if not os.path.exists(testdir): os.makedirs(testdir) nonautotest = os.path.join(testdir, 'test_%s.py' % c) testfile = os.path.join(testdir, 'test_auto_%s.py' % c) if os.path.exists(testfile): os.unlink(testfile) if not os.path.exists(nonautotest): with open(testfile, 'wt') as fp: cmd = ['# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT', 'from nipype.testing import assert_equal', 'from %s import %s' % (uri, c), '', 'def test_%s_inputs():' % c] input_fields = '' for traitname, trait in sorted(classinst.input_spec().traits(transient=None).items()): input_fields += '%s=dict(' % traitname for key, value in sorted(trait.__dict__.items()): if key in in_built or key == 'desc': continue if isinstance(value, basestring): quote = "'" if "'" in value: quote = '"' input_fields += "%s=%s%s%s,\n " % (key, quote, value, quote) else: input_fields += "%s=%s,\n " % (key, value) input_fields += '),\n ' cmd += [' input_map = dict(%s)' % input_fields] cmd += [' inputs = %s.input_spec()' % c] cmd += [""" for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value"""] fp.writelines('\n'.join(cmd) + '\n\n') for traitname, trait in sorted(classinst.input_spec().traits(transient=None).items()): for key in sorted(trait.__dict__): if key in in_built: continue parent_metadata = [] if 'parent' in trait.__dict__: parent_metadata = getattr(trait, 'parent').__dict__.keys() if key not in allowed_keys + classinst._additional_metadata\ + parent_metadata: bad_specs.append([uri, c, 'Inputs', traitname, key]) if key == 'mandatory' and trait.mandatory is not None and not trait.mandatory: bad_specs.append([uri, c, 'Inputs', traitname, 'mandatory=False']) if not classinst.output_spec: continue if not os.path.exists(nonautotest): with open(testfile, 'at') as fp: cmd = ['def test_%s_outputs():' % c] input_fields = '' for traitname, trait in sorted(classinst.output_spec().traits(transient=None).items()): input_fields += '%s=dict(' % traitname for key, value in sorted(trait.__dict__.items()): if key in in_built or key == 'desc': continue if isinstance(value, basestring): quote = "'" if "'" in value: quote = '"' input_fields += "%s=%s%s%s,\n " % (key, quote, value, quote) else: input_fields += "%s=%s,\n " % (key, value) input_fields += '),\n ' cmd += [' output_map = dict(%s)' % input_fields] cmd += [' outputs = %s.output_spec()' % c] cmd += [""" for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value"""] fp.writelines('\n'.join(cmd) + '\n\n') for traitname, trait in sorted(classinst.output_spec().traits(transient=None).items()): for key in sorted(trait.__dict__): if key in in_built: continue parent_metadata = [] if 'parent' in trait.__dict__: parent_metadata = getattr(trait, 'parent').__dict__.keys() if key not in allowed_keys + classinst._additional_metadata\ + parent_metadata: bad_specs.append([uri, c, 'Outputs', traitname, key]) return bad_specs def _survives_exclude(self, matchstr, match_type): ''' Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False ''' if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': patterns = self.package_skip_patterns elif match_type == 'class': patterns = self.class_skip_patterns else: raise ValueError('Cannot interpret match type "%s"' % match_type) # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace package_uri = '.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) else: dirnames.remove(dirname) # Check filenames for modules for filename in filenames: module_name = filename[:-3] module_uri = '.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, 'module')): modules.append(module_uri) return sorted(modules) def check_modules(self): # write the list modules = self.discover_modules() checked_modules = [] for m in modules: bad_specs = self.test_specs(m) if bad_specs: checked_modules.extend(bad_specs) for bad_spec in checked_modules: print ':'.join(bad_spec) if __name__ == "__main__": package = 'nipype' ic = InterfaceChecker(package) # Packages that should not be included in generated API docs. ic.package_skip_patterns += ['\.external$', '\.fixes$', '\.utils$', '\.pipeline', '\.testing', '\.caching', '\.workflows', ] """ # Modules that should not be included in generated API docs. ic.module_skip_patterns += ['\.version$', '\.interfaces\.base$', '\.interfaces\.matlab$', '\.interfaces\.rest$', '\.interfaces\.pymvpa$', '\.interfaces\.slicer\.generate_classes$', '\.interfaces\.spm\.base$', '\.interfaces\.traits', '\.pipeline\.alloy$', '\.pipeline\.s3_node_wrapper$', '.\testing', ] ic.class_skip_patterns += ['AFNI', 'ANTS', 'FSL', 'FS', 'Info', '^SPM', 'Tester', 'Spec$', 'Numpy', 'NipypeTester', ] """ ic.check_modules() nipype-0.9.2/tools/ex2rst000077500000000000000000000222501227300005300153340ustar00rootroot00000000000000#!/usr/bin/env python # # Note: this file is copied (possibly with minor modifications) from the # sources of the PyMVPA project - http://pymvpa.org. It remains licensed as # the rest of PyMVPA (MIT license as of October 2010). # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the PyMVPA package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Helper to automagically generate ReST versions of examples""" __docformat__ = 'restructuredtext' import os import sys import re import glob from optparse import OptionParser def auto_image(line): """Automatically replace generic image markers with ones that have full size (width/height) info, plus a :target: link to the original png, to be used in the html docs. """ img_re = re.compile(r'(\s*)\.\. image::\s*(.*)$') m = img_re.match(line) if m is None: # Not an image declaration, leave the line alone and return unmodified return line # Match means it's an image spec, we rewrite it with extra tags ini_space = m.group(1) lines = [line, ini_space + ' :width: 500\n', #ini_space + ' :height: 350\n' ] fspec = m.group(2) if fspec.endswith('.*'): fspec = fspec.replace('.*', '.png') fspec = fspec.replace('fig/', '../_images/') lines.append(ini_space + (' :target: %s\n' % fspec) ) lines.append('\n') return ''.join(lines) def exfile2rst(filename): """Open a Python script and convert it into an ReST string. """ # output string s = '' # open source file xfile = open(filename) # parser status vars inheader = True indocs = False doc2code = False code2doc = False # an empty line found in the example enables the check for a potentially # indented docstring starting on the next line (as an attempt to exclude # function or class docstrings) last_line_empty = False # indentation of indented docstring, which is removed from the RsT output # since we typically do not want an indentation there. indent_level = 0 for line in xfile: # skip header if inheader and \ not (line.startswith('"""') or line.startswith("'''")): continue # determine end of header if inheader and (line.startswith('"""') or line.startswith("'''")): inheader = False # strip comments and remove trailing whitespace if not indocs and last_line_empty: # first remove leading whitespace and store indent level cleanline = line[:line.find('#')].lstrip() indent_level = len(line) - len(cleanline) - 1 cleanline = cleanline.rstrip() else: cleanline = line[:line.find('#')].rstrip() if not indocs and line == '\n': last_line_empty = True else: last_line_empty = False # if we have something that should go into the text if indocs \ or (cleanline.startswith('"""') or cleanline.startswith("'''")): proc_line = None # handle doc start if not indocs: # guarenteed to start with """ if len(cleanline) > 3 \ and (cleanline.endswith('"""') \ or cleanline.endswith("'''")): # single line doc code2doc = True doc2code = True proc_line = cleanline[3:-3] else: # must be start of multiline block indocs = True code2doc = True # rescue what is left on the line proc_line = cleanline[3:] # strip """ else: # we are already in the docs # handle doc end if cleanline.endswith('"""') or cleanline.endswith("'''"): indocs = False doc2code = True # rescue what is left on the line proc_line = cleanline[:-3] # reset the indentation indent_level = 0 else: # has to be documentation # if the indentation is whitespace remove it, other wise # keep it (accounts for some variation in docstring # styles real_indent = \ indent_level - len(line[:indent_level].lstrip()) proc_line = line[real_indent:] if code2doc: code2doc = False s += '\n' proc_line = auto_image(proc_line) if proc_line: s += proc_line.rstrip() + '\n' else: if doc2code: doc2code = False s += '\n::\n' # has to be code s += ' %s' % line xfile.close() return s def exfile2rstfile(filename, opts): """ """ # doc filename dfilename = os.path.basename(filename[:-3]) + '.rst' # open dest file dfile = open(os.path.join(opts.outdir, os.path.basename(dfilename)), 'w') # place header dfile.write('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') # place cross-ref target dfile.write('.. _example_' + dfilename[:-4] + ':\n\n') # write converted ReST dfile.write(exfile2rst(filename)) links = """ .. include:: ../../links_names.txt """ dfile.write(links) if opts.sourceref: # write post example see also box msg = """ .. admonition:: Example source code You can download :download:`the full source code of this example <%s>`. This same script is also included in the %s source distribution under the :file:`examples` directory. """ % (filename, opts.project) dfile.write(msg) dfile.close() def main(): parser = OptionParser( \ usage="%prog [options] [...]", \ version="%prog 0.1", description="""\ %prog converts Python scripts into restructered text (ReST) format suitable for integration into the Sphinx documentation framework. Its key feature is that it extracts stand-alone (unassigned) single, or multiline triple-quote docstrings and moves them out of the code listing so that they are rendered as regular ReST, while at the same time maintaining their position relative to the listing. The detection of such docstrings is exclusively done by parsing the raw code so it is never actually imported into a running Python session. Docstrings have to be written using triple quotes (both forms " and ' are possible). It is recommend that such docstrings are preceded and followed by an empty line. Intended docstring can make use of the full linewidth from the second docstring line on. If the indentation of multiline docstring is maintained for all lines, the respective indentation is removed in the ReST output. The parser algorithm automatically excludes file headers and starts with the first (module-level) docstring instead. """ ) #' # define options parser.add_option('--verbose', action='store_true', dest='verbose', default=False, help='print status messages') parser.add_option('-x', '--exclude', action='append', dest='excluded', help="""\ Use this option to exclude single files from the to be parsed files. This is especially useful to exclude files when parsing complete directories. This option can be specified multiple times. """) parser.add_option('-o', '--outdir', action='store', dest='outdir', type='string', default=None, help="""\ Target directory to write the ReST output to. This is a required option. """) parser.add_option('--no-sourceref', action='store_false', default=True, dest='sourceref', help="""\ If specified, the source reference section will be suppressed. """) parser.add_option('--project', type='string', action='store', default='', dest='project', help="""\ Name of the project that contains the examples. This name is used in the 'seealso' source references. Default: '' """) # parse options (opts, args) = parser.parse_args() # read sys.argv[1:] by default # check for required options if opts.outdir is None: print('Required option -o, --outdir not specified.') sys.exit(1) # build up list of things to parse toparse = [] for t in args: # expand dirs if os.path.isdir(t): # add all python files in that dir toparse += glob.glob(os.path.join(t, '*.py')) else: toparse.append(t) # filter parse list if not opts.excluded is None: toparse = [t for t in toparse if not t in opts.excluded] toparse_list = toparse toparse = set(toparse) if len(toparse) != len(toparse_list): print('Ignoring duplicate parse targets.') if not os.path.exists(opts.outdir): os.mkdir(outdir) # finally process all examples for t in toparse: exfile2rstfile(t, opts) if __name__ == '__main__': main() nipype-0.9.2/tools/github.py000066400000000000000000000054141227300005300160160ustar00rootroot00000000000000import httplib import inspect import json import os from subprocess import Popen, PIPE import nipype def is_git_repo(): """Does the current nipype module have a git folder """ sourcepath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)) gitpathgit = os.path.join(sourcepath, '.git') if os.path.exists(gitpathgit): return True else: return False def get_local_branch(): """Determine current branch """ if is_git_repo(): o, _ = Popen('git branch | grep "\* "', shell=True, stdout=PIPE, cwd=os.path.dirname(nipype.__file__)).communicate() return o.strip()[2:] else: return None def get_remote_branch(): """Get remote branch for current branch """ pass def create_hash_map(): """Create a hash map for all objects """ hashmap = {} from base64 import encodestring as base64 import pwd login_name = pwd.getpwuid(os.geteuid())[0] conn = httplib.HTTPSConnection("api.github.com") conn.request("GET", "/repos/nipy/nipype", headers={'Authorization': 'Basic %s' % base64(login_name)}) try: conn.request("GET", "/repos/nipy/nipype/git/trees/master?recursive=1") except: pass else: r1 = conn.getresponse() if r1.reason != 'OK': raise Exception('HTTP Response %s:%s' % (r1.status, r1.reason)) payload = json.loads(r1.read()) for infodict in payload['tree']: if infodict['type'] == "blob": hashmap[infodict['sha']] = infodict['path'] return hashmap def get_repo_url(force_github=False): """Returns github url or local url Returns ------- URI: str filesystem path or github repo url """ sourcepath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)) gitpathgit = os.path.join(sourcepath, '.git') if not os.path.exists(gitpathgit) and not force_github: uri = 'file://%s' % sourcepath else: uri = 'http://github.com/nipy/nipype/blob/master' return uri def get_file_url(object): """Returns local or remote url for an object """ filename = inspect.getsourcefile(object) lines = inspect.getsourcelines(object) uri = 'file://%s#L%d' % (filename, lines[1]) if is_git_repo(): info = nipype.get_info() shortfile = os.path.join('nipype', filename.split('nipype/')[-1]) uri = 'http://github.com/nipy/nipype/tree/%s/%s#L%d' % \ (info['commit_hash'], shortfile, lines[1]) return uri nipype-0.9.2/tools/gitwash_dumper.py000077500000000000000000000172501227300005300175620ustar00rootroot00000000000000#!/usr/bin/env python ''' Checkout gitwash repo into directory and do search replace on name ''' import os from os.path import join as pjoin import shutil import sys import re import glob import fnmatch import tempfile from subprocess import call from optparse import OptionParser verbose = False def clone_repo(url, branch): cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: cmd = 'git clone %s %s' % (url, tmpdir) call(cmd, shell=True) os.chdir(tmpdir) cmd = 'git checkout %s' % branch call(cmd, shell=True) except: shutil.rmtree(tmpdir) raise finally: os.chdir(cwd) return tmpdir def cp_files(in_path, globs, out_path): try: os.makedirs(out_path) except OSError: pass out_fnames = [] for in_glob in globs: in_glob_path = pjoin(in_path, in_glob) for in_fname in glob.glob(in_glob_path): out_fname = in_fname.replace(in_path, out_path) pth, _ = os.path.split(out_fname) if not os.path.isdir(pth): os.makedirs(pth) shutil.copyfile(in_fname, out_fname) out_fnames.append(out_fname) return out_fnames def filename_search_replace(sr_pairs, filename, backup=False): ''' Search and replace for expressions in files ''' in_txt = open(filename, 'rt').read(-1) out_txt = in_txt[:] for in_exp, out_exp in sr_pairs: in_exp = re.compile(in_exp) out_txt = in_exp.sub(out_exp, out_txt) if in_txt == out_txt: return False open(filename, 'wt').write(out_txt) if backup: open(filename + '.bak', 'wt').write(in_txt) return True def copy_replace(replace_pairs, repo_path, out_path, cp_globs=('*',), rep_globs=('*',), renames = ()): out_fnames = cp_files(repo_path, cp_globs, out_path) renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames] fnames = [] for rep_glob in rep_globs: fnames += fnmatch.filter(out_fnames, rep_glob) if verbose: print '\n'.join(fnames) for fname in fnames: filename_search_replace(replace_pairs, fname, False) for in_exp, out_exp in renames: new_fname, n = in_exp.subn(out_exp, fname) if n: os.rename(fname, new_fname) break def make_link_targets(proj_name, user_name, repo_name, known_link_fname, out_link_fname, url=None, ml_url=None): """ Check and make link targets If url is None or ml_url is None, check if there are links present for these in `known_link_fname`. If not, raise error. The check is: Look for a target `proj_name`. Look for a target `proj_name` + ' mailing list' Also, look for a target `proj_name` + 'github'. If this exists, don't write this target into the new file below. If we are writing any of the url, ml_url, or github address, then write new file with these links, of form: .. _`proj_name` .. _`proj_name`: url .. _`proj_name` mailing list: url """ link_contents = open(known_link_fname, 'rt').readlines() have_url = not url is None have_ml_url = not ml_url is None have_gh_url = None for line in link_contents: if not have_url: match = re.match(r'..\s+_%s:\s+' % proj_name, line) if match: have_url = True if not have_ml_url: match = re.match(r'..\s+_`%s mailing list`:\s+' % proj_name, line) if match: have_ml_url = True if not have_gh_url: match = re.match(r'..\s+_`%s github`:\s+' % proj_name, line) if match: have_gh_url = True if not have_url or not have_ml_url: raise RuntimeError('Need command line or known project ' 'and / or mailing list URLs') lines = [] if not url is None: lines.append('.. _%s: %s\n' % (proj_name, url)) if not have_gh_url: gh_url = 'http://github.com/%s/%s\n' % (user_name, repo_name) lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url)) if not ml_url is None: lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url)) if len(lines) == 0: # Nothing to do return # A neat little header line lines = ['.. %s\n' % proj_name] + lines out_links = open(out_link_fname, 'wt') out_links.writelines(lines) out_links.close() USAGE = ''' If not set with options, the repository name is the same as the If not set with options, the main github user is the same as the repository name.''' GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git' GITWASH_BRANCH = 'master' def main(): parser = OptionParser() parser.set_usage(parser.get_usage().strip() + USAGE) parser.add_option("--repo-name", dest="repo_name", help="repository name - e.g. nitime", metavar="REPO_NAME") parser.add_option("--github-user", dest="main_gh_user", help="github username for main repo - e.g fperez", metavar="MAIN_GH_USER") parser.add_option("--gitwash-url", dest="gitwash_url", help="URL to gitwash repository - default %s" % GITWASH_CENTRAL, default=GITWASH_CENTRAL, metavar="GITWASH_URL") parser.add_option("--gitwash-branch", dest="gitwash_branch", help="branch in gitwash repository - default %s" % GITWASH_BRANCH, default=GITWASH_BRANCH, metavar="GITWASH_BRANCH") parser.add_option("--source-suffix", dest="source_suffix", help="suffix of ReST source files - default '.rst'", default='.rst', metavar="SOURCE_SUFFIX") parser.add_option("--project-url", dest="project_url", help="URL for project web pages", default=None, metavar="PROJECT_URL") parser.add_option("--project-ml-url", dest="project_ml_url", help="URL for project mailing list", default=None, metavar="PROJECT_ML_URL") (options, args) = parser.parse_args() if len(args) < 2: parser.print_help() sys.exit() out_path, project_name = args if options.repo_name is None: options.repo_name = project_name if options.main_gh_user is None: options.main_gh_user = options.repo_name repo_path = clone_repo(options.gitwash_url, options.gitwash_branch) try: copy_replace((('PROJECTNAME', project_name), ('REPONAME', options.repo_name), ('MAIN_GH_USER', options.main_gh_user)), repo_path, out_path, cp_globs=(pjoin('gitwash', '*'),), rep_globs=('*.rst',), renames=(('\.rst$', options.source_suffix),)) make_link_targets(project_name, options.main_gh_user, options.repo_name, pjoin(out_path, 'gitwash', 'known_projects.inc'), pjoin(out_path, 'gitwash', 'this_project.inc'), options.project_url, options.project_ml_url) finally: shutil.rmtree(repo_path) if __name__ == '__main__': main() nipype-0.9.2/tools/interface_inputspec_testwriter.py000066400000000000000000000053411227300005300230610ustar00rootroot00000000000000spm_metadata = ['field', 'mandatory', 'copyfile', 'xor', 'requires', 'usedefault'] cmd_metadata = ['argstr', 'mandatory', 'copyfile', 'xor', 'requires', 'usedefault', 'sep', 'genfile', 'hash_files'] py_metadata = ['mandatory', 'copyfile', 'xor', 'requires', 'usedefault'] def create_spmtest_func(pkg, interface, object): cmd = ['def test_%s():'%interface.lower()] cmd += ["yield assert_equal, %s.%s._jobtype, '%s'"%(pkg, interface, object._jobtype)] cmd += ["yield assert_equal, %s.%s._jobname, '%s'"%(pkg, interface, object._jobname)] input_fields = '' for field, spec in object.inputs.items(): input_fields += '%s = dict('%field for key, value in spec.__dict__.items(): if key in spm_metadata: if key == 'field': value = "'%s'"%value input_fields += "%s=%s,"%(key,str(value)) input_fields += '),\n' cmd += ['input_map = dict(%s)'%input_fields] cmd += ['instance = %s.%s()'%(pkg, interface)] cmd += [""" for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value"""] return cmd def create_cmdtest_func(pkg, interface, object): cmd = ['def test_%s():'%interface.lower()] input_fields = '' for field, spec in object.inputs.items(): input_fields += '%s = dict('%field for key, value in spec.__dict__.items(): if key in cmd_metadata: if key == 'argstr': value = "'%s'"%value input_fields += "%s=%s,"%(key,str(value)) input_fields += '),\n' cmd += ['input_map = dict(%s)'%input_fields] cmd += ['\tinstance = %s.%s()'%(pkg, interface)] cmd += ["""\tfor key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value"""] print '\n'.join(cmd)+'\n' def create_pytest_func(pkg, interface, object): cmd = ['def test_%s():'%interface.lower()] input_fields = '' for field, spec in object.inputs.items(): input_fields += '%s = dict('%field for key, value in spec.__dict__.items(): if key in cmd_metadata: if key == 'argstr': value = "'%s'"%value input_fields += "%s=%s,"%(key,str(value)) input_fields += '),\n' cmd += ['input_map = dict(%s)'%input_fields] cmd += ['\tinstance = %s.%s()'%(pkg, interface)] cmd += ["""\tfor key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value"""] print '\n'.join(cmd)+'\n' nipype-0.9.2/tools/interfacedocgen.py000066400000000000000000000432711227300005300176570ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Attempt to generate templates for module reference with Sphinx XXX - we exclude extension modules To include extension modules, first identify them as valid in the ``_uri2path`` method, then handle them in the ``_parse_module`` script. We get functions and classes by parsing the text of .py files. Alternatively we could import the modules for discovery, and we'd have to do that for extension modules. This would involve changing the ``_parse_module`` method to work via import and introspection, and might involve changing ``discover_modules`` (which determines which files are modules, and therefore which module URIs will be passed to ``_parse_module``). NOTE: this is a modified version of a script originally shipped with the PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed project.""" # Stdlib imports import inspect import os import re import sys import tempfile import warnings from nipype.interfaces.base import BaseInterface from nipype.pipeline.engine import Workflow from nipype.utils.misc import trim from github import get_file_url # Functions and classes class InterfaceHelpWriter(object): ''' Class for automatic detection and parsing of API docs to Sphinx-parsable reST format''' # only separating first two levels rst_section_levels = ['*', '=', '-', '~', '^'] def __init__(self, package_name, rst_extension='.rst', package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None ): ''' Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default '.rst' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._'] class_skip_patterns : None or sequence Sequence of strings giving classes to be excluded Default is: None ''' if package_skip_patterns is None: package_skip_patterns = ['\\.tests$'] if module_skip_patterns is None: module_skip_patterns = ['\\.setup$', '\\._'] if class_skip_patterns: self.class_skip_patterns = class_skip_patterns else: self.class_skip_patterns = [] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns def get_package_name(self): return self._package_name def set_package_name(self, package_name): ''' Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True ''' # It's also possible to imagine caching the module parsing here self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None package_name = property(get_package_name, set_package_name, None, 'get/set package_name') def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':') def _uri2path(self, uri): ''' Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples -------- >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path('sphinx.builder') >>> res == os.path.join(modpath, 'builder.py') True >>> res = docwriter._uri2path('sphinx') >>> res == os.path.join(modpath, '__init__.py') True >>> docwriter._uri2path('sphinx.does_not_exist') ''' if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace('.', os.path.sep) path = path.replace(self.package_name + os.path.sep, '') path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + '.py'): # file path += '.py' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path def _path2uri(self, dirpath): ''' Convert directory path to uri ''' relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([],[]) f = open(filename, 'rt') functions, classes = self._parse_lines(f, uri) f.close() return functions, classes def _parse_lines(self, linesource, module): ''' Parse lines of text for functions and classes ''' functions = [] classes = [] for line in linesource: if line.startswith('def ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_') and \ self._survives_exclude('.'.join((module, name)), 'class'): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes def _write_graph_section(self, fname, title): ad = '\n%s\n%s\n\n' % (title, self.rst_section_levels[3] * len(title)) ad += '.. graphviz::\n\n' fhandle = open(fname) for line in fhandle: ad += '\t' + line + '\n' fhandle.close() os.remove(fname) os.remove(fname + ".png") return ad def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- S : string Contents of API doc ''' # get the names of all classes and functions functions, classes = self._parse_module(uri) workflows = [] helper_functions = [] for function in functions: try: __import__(uri) finst = sys.modules[uri].__dict__[function] except TypeError: continue try: workflow = finst() except Exception: helper_functions.append((function, finst)) continue if isinstance(workflow, Workflow): workflows.append((workflow,function, finst)) if not classes and not workflows and not helper_functions: print 'WARNING: Empty -',uri # dbg return '' # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name, '', uri) #uri_short = uri ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_title = uri_short ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title) + '\n\n') # Set the chapter title to read 'module' for all modules except for the # main packages #if '.' in uri: # title = 'Module: :mod:`' + uri_short + '`' #else: # title = ':mod:`' + uri_short + '`' #ad += title + '\n' + self.rst_section_levels[2] * len(title) #ad += '\n' + 'Classes' + '\n' + \ # self.rst_section_levels[2] * 7 + '\n' for c in classes: __import__(uri) print c try: with warnings.catch_warnings(): warnings.simplefilter("ignore") classinst = sys.modules[uri].__dict__[c] except Exception as inst: print inst continue if not issubclass(classinst, BaseInterface): continue label = uri + '.' + c + ':' ad += '\n.. _%s\n\n' % label ad += '\n.. index:: %s\n\n' % c ad += c + '\n' + self.rst_section_levels[2] * len(c) + '\n\n' ad += "`Link to code <%s>`__\n\n" % get_file_url(classinst) ad += trim(classinst.help(returnhelp=True), self.rst_section_levels[3]) + '\n' if workflows or helper_functions: ad += '\n.. module:: %s\n\n' % uri for workflow, name, finst in workflows: label = ':func:`' + name + '`' ad += '\n.. _%s:\n\n' % (uri + '.' + name) ad += '\n'.join((label, self.rst_section_levels[2] * len(label))) ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst) helpstr = trim(finst.__doc__, self.rst_section_levels[3]) ad += '\n\n' + helpstr + '\n\n' """ # use sphinx autodoc for function signature ad += '\n.. _%s:\n\n' % (uri + '.' + name) ad += '.. autofunction:: %s\n\n' % name """ (_,fname) = tempfile.mkstemp(suffix=".dot") workflow.write_graph(dotfilename=fname, graph2use='hierarchical') ad += self._write_graph_section(fname, 'Graph') + '\n' for name, finst in helper_functions: label = ':func:`' + name + '`' ad += '\n.. _%s:\n\n' % (uri + '.' + name) ad += '\n'.join((label, self.rst_section_levels[2] * len(label))) ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst) helpstr = trim(finst.__doc__, self.rst_section_levels[3]) ad += '\n\n' + helpstr + '\n\n' return ad def _survives_exclude(self, matchstr, match_type): ''' Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False ''' if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': patterns = self.package_skip_patterns elif match_type == 'class': patterns = self.class_skip_patterns else: raise ValueError('Cannot interpret match type "%s"' % match_type) # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace package_uri = '.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) else: dirnames.remove(dirname) # Check filenames for modules for filename in filenames: module_name = filename[:-3] module_uri = '.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, 'module')): modules.append(module_uri) return sorted(modules) def write_modules_api(self, modules,outdir): # write the list written_modules = [] for m in modules: api_str = self.generate_api_doc(m) if not api_str: continue # write out to file outfile = os.path.join(outdir, m + self.rst_extension) fileobj = open(outfile, 'wt') fileobj.write(api_str) fileobj.close() written_modules.append(m) self.written_modules = written_modules def write_api_docs(self, outdir): """Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules """ if not os.path.exists(outdir): os.mkdir(outdir) # compose list of modules modules = self.discover_modules() self.write_modules_api(modules,outdir) def write_index(self, outdir, froot='gen', relative_to=None): """Make a reST API index file from written files Parameters ---------- path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to 'gen'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is. """ if self.written_modules is None: raise ValueError('No modules written') # Get full filename path path = os.path.join(outdir, froot+self.rst_extension) # Path written into index is relative to rootpath if relative_to is not None: relpath = outdir.replace(relative_to + os.path.sep, '') else: relpath = outdir idx = open(path,'wt') w = idx.write w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') w('.. toctree::\n') w(' :maxdepth: 2\n\n') for f in self.written_modules: w(' %s\n' % os.path.join(relpath,f)) idx.close() nipype-0.9.2/tools/make_examples.py000077500000000000000000000054531227300005300173550ustar00rootroot00000000000000#!/usr/bin/env python """Run the py->rst conversion and run all examples. This also creates the index.rst file appropriately, makes figures, etc. """ #----------------------------------------------------------------------------- # Library imports #----------------------------------------------------------------------------- # Stdlib imports import os import sys from glob import glob # Third-party imports # We must configure the mpl backend before making any further mpl imports import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib._pylab_helpers import Gcf # Local tools from toollib import * #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- examples_header = """ .. _examples: Examples ======== .. note_about_examples """ #----------------------------------------------------------------------------- # Function defintions #----------------------------------------------------------------------------- # These global variables let show() be called by the scripts in the usual # manner, but when generating examples, we override it to write the figures to # files with a known name (derived from the script name) plus a counter figure_basename = None # We must change the show command to save instead def show(): allfm = Gcf.get_all_fig_managers() for fcount, fm in enumerate(allfm): fm.canvas.figure.savefig('%s_%02i.png' % (figure_basename, fcount+1)) _mpl_show = plt.show plt.show = show #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- # Work in examples directory cd('users/examples') if not os.getcwd().endswith('users/examples'): raise OSError('This must be run from doc/examples directory') # Run the conversion from .py to rst file sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples') sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples/frontiers_paper') # Make the index.rst file """ index = open('index.rst', 'w') index.write(examples_header) for name in [os.path.splitext(f)[0] for f in glob('*.rst')]: #Don't add the index in there to avoid sphinx errors and don't add the #note_about examples again (because it was added at the top): if name not in(['index','note_about_examples']): index.write(' %s\n' % name) index.close() """ # Execute each python script in the directory. if '--no-exec' in sys.argv: pass else: if not os.path.isdir('fig'): os.mkdir('fig') for script in glob('*.py'): figure_basename = pjoin('fig', os.path.splitext(script)[0]) execfile(script) plt.close('all') nipype-0.9.2/tools/nipype_nightly.py000066400000000000000000000050701227300005300175740ustar00rootroot00000000000000#!/usr/bin/env python """Simple script to update the trunk nightly, build the docs and push to sourceforge. """ import os import sys import subprocess dirname = '/home/cburns/src/nipy-sf/nipype/trunk/' def run_cmd(cmd): print cmd proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ, shell=True) output, error = proc.communicate() returncode = proc.returncode if returncode: msg = 'Running cmd: %s\n Error: %s' % (cmd, error) raise StandardError(msg) print output def update_repos(): """Update svn repository.""" os.chdir(dirname) cmd = 'svn update' run_cmd(cmd) def build_docs(): """Build the sphinx documentation.""" os.chdir(os.path.join(dirname, 'doc')) cmd = 'make html' run_cmd(cmd) def push_to_sf(): """Push documentation to sourceforge.""" os.chdir(dirname + 'doc') cmd = 'make sf_cburns' run_cmd(cmd) def setup_paths(): # Cron has no PYTHONPATH defined, so we need to add the paths to # all libraries we need. pkg_path = '/home/cburns/local/lib/python2.6/site-packages/' pkg_path_64 = '/home/cburns/local/lib64/python2.6/site-packages/' # Add the current directory to path sys.path.insert(0, os.curdir) # Add our local path, where we install nipype, to sys.path sys.path.insert(0, pkg_path) # Needed to add this to my path at one point otherwise import of # apigen failed. #sys.path.insert(2, '/home/cburns/src/nipy-sf/nipype/trunk/tools') # Add networkx, twisted, zope.interface and foolscap. # Basically we need to add all the packages we need that are # installed via setyptools, since it's uses the .pth files for # this. nx_path = os.path.join(pkg_path, 'networkx-0.99-py2.6.egg') sys.path.insert(2, nx_path) twisted_path = os.path.join(pkg_path_64, 'Twisted-8.2.0-py2.6-linux-x86_64.egg') sys.path.insert(2, twisted_path) zope_path = os.path.join(pkg_path_64, 'zope.interface-3.5.2-py2.6-linux-x86_64.egg') sys.path.insert(2, zope_path) foolscap_path = os.path.join(pkg_path, 'foolscap-0.2.9-py2.6.egg') sys.path.insert(2, foolscap_path) # Define our PYTHONPATH variable os.environ['PYTHONPATH'] = ':'.join(sys.path) if __name__ == '__main__': setup_paths() prev_dir = os.path.abspath(os.curdir) update_repos() build_docs() #push_to_sf() os.chdir(prev_dir) nipype-0.9.2/tools/report_coverage.py000066400000000000000000000026721227300005300177250ustar00rootroot00000000000000#!/usr/bin/env python import subprocess def run_tests(cmd): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = proc.communicate() if proc.returncode: msg = 'Running cmd: %s\n Error: %s' % (cmd, error) raise StandardError(msg) # Nose returns the output in stderr return stderr def grab_coverage(output): """Grab coverage lines from nose output.""" output = output.split('\n') covout = [] header = None tcount = None for line in output: if line.startswith('nipype.interfaces.') or \ line.startswith('nipype.pipeline.') or \ line.startswith('nipype.utils.'): # Remove the Missing lines, too noisy percent_index = line.find('%') percent_index += 1 covout.append(line[:percent_index]) if line.startswith('Name '): header = line if line.startswith('Ran '): tcount = line covout.insert(0, header) covout.insert(1, '-'*70) covout.append('-'*70) covout.append(tcount) return '\n'.join(covout) def main(): cmd = 'nosetests --with-coverage --cover-package=nipype' print 'From current directory, running cmd:' print cmd, '\n' output = run_tests(cmd) report = grab_coverage(output) print report main() nipype-0.9.2/tools/run_examples.py000066400000000000000000000024361227300005300172370ustar00rootroot00000000000000import os import sys from shutil import rmtree def run_examples(example, pipelines, plugin): print 'running example: %s with plugin: %s'%(example, plugin) from nipype.utils.config import NipypeConfig config = NipypeConfig() config.enable_debug_mode() __import__(example) for pipeline in pipelines: wf = getattr(sys.modules[example], pipeline) wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin) if os.path.exists(wf.base_dir): rmtree(wf.base_dir) wf.config = {'execution' :{'hash_method': 'timestamp', 'stop_on_first_rerun': 'true'}} wf.run(plugin=plugin, plugin_args={'n_procs': 4}) #run twice to check if nothing is rerunning wf.run(plugin=plugin) if __name__ == '__main__': path, file = os.path.split(__file__) sys.path.insert(0, os.path.realpath(os.path.join(path, '..', 'examples'))) examples = {'fmri_fsl_reuse':['level1_workflow'], 'fmri_spm_nested':['level1','l2pipeline'], 'fmri_spm_dartel':['level1','l2pipeline'], 'fmri_fsl_feeds':['l1pipeline']} plugins = ['Linear', 'MultiProc', 'IPython'] for plugin in plugins: for example, pipelines in examples.items(): run_examples(example, pipelines, plugin) nipype-0.9.2/tools/run_interface.py000066400000000000000000000052671227300005300173660ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to auto-generate our API docs. """ # stdlib imports import os from optparse import OptionParser import sys def listClasses(module=None): if module: __import__(module) pkg = sys.modules[module] print "Available functions:" for k,v in pkg.__dict__.items(): if 'class' in str(v) and k != '__builtins__': print "\t%s"%k def add_options(parser=None, module=None, function=None): interface = None if parser and module and function: __import__(module) interface = getattr(sys.modules[module],function)() for k,v in interface.inputs.iteritems(): parser.add_option("-%s"%k[0], "--%s"%k, dest="IXI%s"%k, metavar=k, action='store',type='string', help="you need help?",default='') return parser, interface def run_instance(interface, options): if interface: print "setting function inputs" for k,v in interface.inputs.iteritems(): optionskey = ''.join(('IXI',k)) if hasattr(options, optionskey): setattr(interface.inputs, k, getattr(options, optionskey)) print interface.inputs print "not really running anything" def get_modfunc(args): module = None function = None posargs = [] skip = False for a in args: if skip: skip = False continue if a.startswith('--'): pass elif a.startswith('-'): skip = True else: posargs.append(a) if posargs: module = posargs[0] if len(posargs)==2: function = posargs[1] return module, function def parse_args(): usage = "usage: %prog [options] module function" parser = OptionParser(usage=usage,version="%prog 1.0", conflict_handler="resolve") parser.add_option("--run", dest="run", action='store_true',help="Execute", default=False) module, function = get_modfunc(sys.argv[1:]) parser, interface = add_options(parser, module, function) (options, args) = parser.parse_args() if options.run and interface: #assign inputs run_instance(interface, options) else: parser.print_help() if module and not function: listClasses(module) parser.exit() #***************************************************************************** if __name__ == '__main__': parse_args() nipype-0.9.2/tools/setup.py000066400000000000000000000005541227300005300156740ustar00rootroot00000000000000#!/usr/bin/env python from distutils.core import setup setup(name='Nipype Tools', version='0.1', description='Utilities used in nipype development', author='Nipype Developers', author_email='nipy-devel@neuroimaging.scipy.org', url='http://nipy.sourceforge.net', scripts=['./nipype_nightly.py', './report_coverage.py'] ) nipype-0.9.2/tools/toollib.py000066400000000000000000000017711227300005300162020ustar00rootroot00000000000000"""Various utilities common to IPython release and maintenance tools. """ # Library imports import os import sys from subprocess import Popen, PIPE, CalledProcessError, check_call from distutils.dir_util import remove_tree # Useful shorthands pjoin = os.path.join cd = os.chdir # Utility functions #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def sh(cmd): """Execute command in a subshell, return status code.""" return check_call(cmd, shell=True) def compile_tree(): """Compile all Python files below current directory.""" vstr = '.'.join(map(str,sys.version_info[:2])) stat = os.system('python %s/lib/python%s/compileall.py .' % (sys.prefix,vstr)) if stat: msg = '*** ERROR: Some Python files in tree do NOT compile! ***\n' msg += 'See messages above for the actual file that produced it.\n' raise SystemExit(msg)