pax_global_header00006660000000000000000000000064142536255200014516gustar00rootroot0000000000000052 comment=81a138c86d4582ec8eead867f499fcb4891f88d4 uncertainties-3.1.7/000077500000000000000000000000001425362552000144035ustar00rootroot00000000000000uncertainties-3.1.7/.github/000077500000000000000000000000001425362552000157435ustar00rootroot00000000000000uncertainties-3.1.7/.github/FUNDING.yml000066400000000000000000000001661425362552000175630ustar00rootroot00000000000000custom: "https://www.paypal.com/donate/?token=sZjF_A0EanEbLxn5d8_srmHt9Y9fLObq5TMZGRB56dRz1CdU8N9qEdnZcFFP8PcC7oAzvG" uncertainties-3.1.7/.gitignore000066400000000000000000000003671425362552000164010ustar00rootroot00000000000000uncertainties-py23/ *.pyc build doc/_build dist MANIFEST # Created by setuptools: uncertainties.egg-info/ # For PyCharm (contains project files): .idea/ # py.test cache files (normally we are using nose though) .cache # vim temporary files .*.swp uncertainties-3.1.7/.travis.yml000066400000000000000000000017661425362552000165260ustar00rootroot00000000000000# Config file for automatic testing at travis-ci.org language: python # Workaround for being able to use Python 3.7 (2019-03) sudo: required os: linux dist: xenial jobs: include: - python: "2.7" env: DEPS="numpy nose" # As of 2018-10, Travis has installation problems with the DEPS below, so Python 3.3 is not tested anymore: #- python: "3.3" # env: DEPS="numpy nose" - python: "3.4" env: DEPS="numpy nose" - python: "3.5" env: DEPS="numpy nose" - python: "3.6" env: DEPS="numpy nose" - python: "3.7" env: DEPS="numpy nose" - python: "3.8" env: DEPS="numpy nose" - python: "3.9" env: DEPS="numpy nose" before_install: - pip install setuptools --upgrade - pip install pip --upgrade - pip install $DEPS - pip install codecov script: - python setup.py egg_info - python setup.py nosetests -sv --with-coverage # Generate documentation #- cd doc #- make html after_success: - codecov uncertainties-3.1.7/00_prepare_for_PyPI.sh000077500000000000000000000025541425362552000204540ustar00rootroot00000000000000#!/bin/sh # This script prepares the package for PyPI. It must be run # before uploading it on PyPI. # This script must be run from its directory. # Fail the script at the first failed command (HOWEVER, maybe when there are # no commits to be done during the merges, the commands fail?): #set -e echo "****************************************************************" echo "WARNING: if any commit fails, RESOLVE IT before running this" echo "script again. Otherwise conflict marks will be committed by the" echo "second run!" echo "****************************************************************" ## Only committed versions are packaged, to help with debugging published code: git commit -a # We make sure that the release and master branches are merged (changes # may have been made on both sides): git checkout master git merge release git checkout release git merge master # Default branch for working on the code: git checkout release # Packaging. We include wheels because it makes it easier to install, # in some cases (https://github.com/lebigot/uncertainties/pull/108, # https://discourse.slicer.org/t/problems-installing-lmfit-python-package/9210/6): python setup.py sdist bdist_wheel echo "Package created. The package can be uploaded with twine upload dist/...*" echo "where ...* is the new versions." echo "WARNING: current git branch is:" git branch | grep '^\*' uncertainties-3.1.7/INSTALL.txt000066400000000000000000000011611425362552000162510ustar00rootroot00000000000000* Some installation methods: python setup.py install or, for an installation in the user Python library (no additional access rights needed): python setup.py install --user or, for an installation in a custom directory my_directory: python setup.py install --install-lib my_directory or, if additional access rights are needed (Unix): sudo python setup.py install * The tests programs (test_*.py) are meant to be run through the Nose testing framework. This can be achieved for instance with a command like nosetests -sv uncertainties/ or simply nosetests uncertainties/ (for a less verbose output). uncertainties-3.1.7/LICENSE.txt000066400000000000000000000026701425362552000162330ustar00rootroot00000000000000Copyright (c) 2010-2020, Eric O. LEBIGOT (EOL). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. uncertainties-3.1.7/MANIFEST.in000066400000000000000000000004561425362552000161460ustar00rootroot00000000000000include INSTALL.txt # Since 2019-02, for setuptools, the following can be replaced by the licence_file argument of setup(): include LICENSE.txt # Docs: recursive-include doc Makefile conf.py *.rst make.bat graft doc/_templates graft doc/_static prune doc/_build # No backup files: global-exclude *~ uncertainties-3.1.7/README.rst000066400000000000000000000040511425362552000160720ustar00rootroot00000000000000uncertainties ============= .. image:: https://readthedocs.org/projects/uncertainties/badge/?version=latest :target: https://uncertainties.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status .. image:: https://img.shields.io/pypi/v/uncertainties.svg :target: https://pypi.org/project/uncertainties/ .. image:: https://pepy.tech/badge/uncertainties/week :target: https://pepy.tech/project/uncertainties .. image:: https://codecov.io/gh/lebigot/uncertainties/branch/master/graph/badge.svg :target: https://codecov.io/gh/lebigot/uncertainties/ .. image:: https://travis-ci.com/lebigot/uncertainties.svg?branch=master :target: https://travis-ci.com/lebigot/uncertainties .. image:: https://ci.appveyor.com/api/projects/status/j5238244myqx0a0r?svg=true :target: https://ci.appveyor.com/project/lebigot/uncertainties This is the ``uncertainties`` Python package, which performs **transparent calculations with uncertainties** (aka "error propagation"): >>> from uncertainties import ufloat >>> from uncertainties.umath import * # sin(), etc. >>> x = ufloat(1, 0.1) # x = 1+/-0.1 >>> print 2*x 2.00+/-0.20 >>> sin(2*x) # In a Python shell, "print" is optional 0.9092974268256817+/-0.08322936730942848 This package also **automatically calculates derivatives of arbitrary functions**: >>> (2*x+1000).derivatives[x] 2.0 The main documentation is available at https://uncertainties.readthedocs.io/. Git branches ------------ The ``release`` branch is the latest stable release. It should pass the tests. ``master*`` branches in the Github repository are bleeding-edge, and do not necessarily pass the tests. The ``master`` branch is the latest, relatively stable versions (while other ``master*`` branches are more experimental). Other branches might be present in the GitHub repository, but they are typically temporary and represent work in progress that does not necessarily run properly yet. License ------- This package and its documentation are released under the `Revised BSD License `_. uncertainties-3.1.7/appveyor.yml000066400000000000000000000035221425362552000167750ustar00rootroot00000000000000# Adapted from https://github.com/bsmurphy/PyKrige/blob/master/appveyor.yml build: false environment: global: # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the # /E:ON and /V:ON options are not enabled in the batch script intepreter # See: http://stackoverflow.com/a/13751649/163740 WITH_COMPILER: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_compiler.cmd" matrix: - PYTHON_VERSION: 2.7 PYTHON_ARCH: "64" MINICONDA: C:\Miniconda-x64 - PYTHON_VERSION: 3.5 PYTHON_ARCH: "64" MINICONDA: C:\Miniconda3-x64 - PYTHON_VERSION: 3.6 PYTHON_ARCH: "64" MINICONDA: C:\Miniconda3-x64 - PYTHON_VERSION: 3.7 PYTHON_ARCH: "64" MINICONDA: C:\Miniconda3-x64 - PYTHON_VERSION: 3.8 PYTHON_ARCH: "64" MINICONDA: C:\Miniconda3-x64 # - PYTHON_VERSION: 3.9 # PYTHON_ARCH: "64" # MINICONDA: C:\Miniconda3-x64 # Not running the tests on 32 bit Python at the moment # as AppVeyor is just too slow #- PYTHON_VERSION: 2.7 # PYTHON_ARCH: "32" # MINICONDA: C:\Miniconda #- PYTHON_VERSION: 3.5 # PYTHON_ARCH: "32" # MINICONDA: C:\Miniconda3 init: - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH% %MINICONDA%" install: - "set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH%" - conda config --set always_yes yes --set changeps1 no - conda update -q conda # Set paths correctly (https://github.com/conda/conda/issues/8865#issuecomment-508865446): - "call %MINICONDA%\\Scripts\\activate" - conda init cmd.exe - conda info -a # Create a conda virtual environement - "conda create -n uncty-env numpy nose python=%PYTHON_VERSION%" - activate uncty-env test_script: - "cd C:\\projects\\uncertainties" - activate uncty-env # Activate the virtual environment - python setup.py egg_info - python setup.py nosetests -sv uncertainties-3.1.7/doc/000077500000000000000000000000001425362552000151505ustar00rootroot00000000000000uncertainties-3.1.7/doc/Makefile000066400000000000000000000063521425362552000166160ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf _build/* pdf: latex (cd _build/latex; $(MAKE) all-pdf) # The HTML needs pdf because it contains a link to the PDF version: html: pdf $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html @echo @echo "Build finished. The HTML pages are in _build/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml @echo @echo "Build finished. The HTML pages are in _build/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: pdf $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in _build/htmlhelp." epub: pdf $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) _build/epub @echo @echo "Build finished; the EPUB documentation is in" \ "_build/epub." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in _build/qthelp, like this:" @echo "# qcollectiongenerator _build/qthelp/uncertaintiesPythonpackage.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile _build/qthelp/uncertaintiesPythonpackage.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex @echo @echo "Build finished; the LaTeX files are in _build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes @echo @echo "The overview file is in _build/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in _build/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in _build/doctest/output.txt." uncertainties-3.1.7/doc/README.rst000066400000000000000000000002561425362552000166420ustar00rootroot00000000000000The documentation can be created by running ``make`` (``make html``, ``make pdf``, etc.). Creating the documentation requires `Sphinx `_.uncertainties-3.1.7/doc/_static/000077500000000000000000000000001425362552000165765ustar00rootroot00000000000000uncertainties-3.1.7/doc/_static/default.css000066400000000000000000000200021425362552000207260ustar00rootroot00000000000000/** * Alternate Sphinx design * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl. */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-size: 14px; letter-spacing: -0.01em; line-height: 150%; text-align: center; /*background-color: #AFC1C4; */ background-color: #BFD1D4; color: black; padding: 0; border: 1px solid #aaa; margin: 0px 80px 0px 80px; min-width: 740px; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } pre { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } td.linenos pre { padding: 0.5em 0; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } hr { border: 1px solid #abc; margin: 2em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; border: 0; } tt.descclassname { background-color: transparent; border: 0; } tt.xref { background-color: transparent; font-weight: bold; border: 0; } a tt { background-color: transparent; font-weight: bold; border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } dl { margin-bottom: 15px; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .refcount { color: #060; } dt:target, .highlight { background-color: #fbe54e; } dl.class, dl.function { border-top: 2px solid #888; } dl.method, dl.attribute { border-top: 1px solid #aaa; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } pre { line-height: 120%; } pre a { color: inherit; text-decoration: underline; } .first { margin-top: 0 !important; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } /* div.documentwrapper { width: 100%; } */ div.clearer { clear: both; } div.related h3 { display: none; } div.related ul { background-image: url(navigation.png); height: 2em; list-style: none; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 0; padding-left: 10px; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; } div.related ul li a:hover { color: #3CA8E7; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.bodywrapper { margin: 0 240px 0 0; border-right: 1px solid #ccc; } div.body a { text-decoration: underline; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: 210px; float: right; text-align: left; /* margin-left: -100%; */ } div.sphinxsidebar h4, div.sphinxsidebar h3 { margin: 1em 0 0.5em 0; font-size: 0.9em; padding: 0.1em 0 0.1em 0.5em; color: white; border: 1px solid #86989B; background-color: #AFC1C4; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; margin-bottom: 7px; list-style: none; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { list-style: square; margin-left: 20px; } p { margin: 0.8em 0 0.5em 0; } p.rubric { font-weight: bold; } h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; color: #11557C; } h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.2em; } h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.footer { background-color: #E3EFF1; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } div.pagination { margin-top: 2em; padding-top: 0.5em; border-top: 1px solid black; text-align: center; } div.sphinxsidebar ul.toc { margin: 1em 0 1em 0; padding: 0 0 0 0.5em; list-style: none; } div.sphinxsidebar ul.toc li { margin: 0.5em 0 0.5em 0; font-size: 0.9em; line-height: 130%; } div.sphinxsidebar ul.toc li p { margin: 0; padding: 0; } div.sphinxsidebar ul.toc ul { margin: 0.2em 0 0.2em 0; padding: 0 0 0 1.8em; } div.sphinxsidebar ul.toc ul li { padding: 0; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 0 0; border: 1px solid #86989B; background-color: #f7f7f7; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #ccc; color: white!important; } table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } img.inheritance { border: 0px } form.pfform { margin: 10px 0 20px 0; } table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } uncertainties-3.1.7/doc/_static/eol.jpg000066400000000000000000000104571425362552000200660ustar00rootroot00000000000000JFIFHHExifMM*Created with GIMP XICC_PROFILE HLinomntrRGB XYZ  1acspMSFTIEC sRGB-HP cprtP3desclwtptbkptrXYZgXYZ,bXYZ@dmndTpdmddvuedLview$lumimeas $tech0 rTRC< gTRC< bTRC< textCopyright (c) 1998 Hewlett-Packard CompanydescsRGB IEC61966-2.1sRGB IEC61966-2.1XYZ QXYZ XYZ o8XYZ bXYZ $descIEC http://www.iec.chIEC http://www.iec.chdesc.IEC 61966-2.1 Default RGB colour space - sRGB.IEC 61966-2.1 Default RGB colour space - sRGBdesc,Reference Viewing Condition in IEC61966-2.1,Reference Viewing Condition in IEC61966-2.1view_. \XYZ L VPWmeassig CRT curv #(-27;@EJOTY^chmrw| %+28>ELRY`gnu| &/8AKT]gqz !-8COZfr~ -;HUcq~ +:IXgw'7HYj{+=Oat 2FZn  % : O d y  ' = T j " 9 Q i  * C \ u & @ Z t .Id %A^z &Ca~1Om&Ed#Cc'Ij4Vx&IlAe@e Ek*Qw;c*R{Gp@j>i  A l !!H!u!!!"'"U"""# #8#f###$$M$|$$% %8%h%%%&'&W&&&''I'z''( (?(q(())8)k))**5*h**++6+i++,,9,n,,- -A-v--..L.../$/Z///050l0011J1112*2c223 3F3334+4e4455M555676r667$7`7788P8899B999:6:t::;-;k;;<' >`>>?!?a??@#@d@@A)AjAAB0BrBBC:C}CDDGDDEEUEEF"FgFFG5G{GHHKHHIIcIIJ7J}JK KSKKL*LrLMMJMMN%NnNOOIOOP'PqPQQPQQR1R|RSS_SSTBTTU(UuUVV\VVWDWWX/X}XYYiYZZVZZ[E[[\5\\]']x]^^l^__a_``W``aOaabIbbcCccd@dde=eef=ffg=ggh?hhiCiijHjjkOkklWlmm`mnnknooxop+ppq:qqrKrss]sttptu(uuv>vvwVwxxnxy*yyzFz{{c{|!||}A}~~b~#G k͂0WGrׇ;iΉ3dʋ0cʍ1fΏ6n֑?zM _ɖ4 uL$h՛BdҞ@iءG&vVǥ8nRĩ7u\ЭD-u`ֲK³8%yhYѹJº;.! zpg_XQKFAǿ=ȼ:ɹ8ʷ6˶5̵5͵6ζ7ϸ9к<Ѿ?DINU\dlvۀ܊ݖޢ)߯6DScs 2F[p(@Xr4Pm8Ww)KmC  !"$"$C@@"/!1QaAq"2BRS !1"2q ?•4D"mˎ,! ܨG_W N֡Aj+etV0{v6;raÖ2e-NTsa\?Ut"MP;#YGr)(ǥ v8>qY, Ms:p_`vvtC$+ڸ$}Y(+uE&lO)Xϥg֝Y|#|̸Aԫڊ]ڂx$Y=}3]E#=*eN}k$\*$$)Iw;t*֜š<@ eR(G1W {8u+?,w'-“T2]Im3^!CfkeN8qۧz|] OB1VLj0BbXJVY IRs#izLCe[He q֨ˢ,7.JV( /7QÈ -.$GljZPckCktu$t[DvFBq[$%/p)ZPvgr+b9qJ qԎ%#Niy g"R~qzpp=)G尗#%JyRMf5s;C5{6-s*$Z~j7C 5|Aqvd}Jqө*_Z[_q6ܶW&;R9 *yskW/Vn/U5wyodN},w>y b )iUqC?ث2e͗νui>.=(ZjY FzۗM8jO.3~Nv8jيe(葟8IZ +ò5w>jjsiv"HdޞX&uncertainties-3.1.7/doc/_static/favicon.ico000066400000000000000000000260361425362552000207260ustar00rootroot0000000000000044 ,(4h %;^uY@* 'B_~jM5 *DeyT; *FfX;.Gf~S3HfrIit`YY_kfulfbdhlqwMwymlntGdkodfs^ZikY_ysuxWicSVmn^VSOMHN]k}fXn]N\bRJB:4201;HT[_efMPsdYYc_|f#.=XuZKWmdlz*?nYMeko**LXSy}r#5h}VZ|})NiW`(@UWex$9uCWg3e>Vi0[>Vj$/V=Vj+0T=Vi01R=Ui42S=Ui63S=Ui63S=Ui63S=Ui63T=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui52S;Qf40Q6I[~2*G~-2wzLz% B 5HSR~ *} 4EEы%RB%Lܹ圳qrdRy>CBfs^eB"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"$" ;'y؇GׄF(y؇b}vg!T P%1@#U6E~iHc# ib I"Dn OHb0bCfJrHY ڊ{uCݍ}f|8_4!) "Q.b(T.7^DzW҆ xd'Of}[ y(H@?ۀUX dKoR:OPkCPڅra'E(tBãst3>ۜ"y؇!F@0 CkK Aʼ{?9&-69b-DR131 CL^v7AP6dmV9cpr?=skEHow:{Cg#:Pڀ#d`hMq7[Q[z/d D9>w,4yRĺ²5h2i#1Df/ЙrRA*tL4H;J)i00u9[/ͷeXyd}N $ܖXwtzX9%{ow_\" Q=woxw}w;\=>4YDi7\QbI{7Vn vל;z )[æ;cݢJ /xed֦q?-ٵ@I)!ZHf|Nk@g+rQ Z4Ih+tu Phʈ#CM#U1ˍ9sH;(LKqml͢u?q/.<9_/MBO``bVA c EK|[}tg逩uK'mWlҰҦ%N&2=a/F;:HrurduK>nѻG|~ۜߤD 6 vٱbMiL@P͚_s]Nֿ0E+E ^tDwyyԷ:ʝJM93p.0ܻx{OotSÃg<#cVG 8u# rt]tbfK3/F qzʰ;{tB:#XJG=_=4/(pƥwr>?*Q"/KD;2mLj eD(MDي+B-dR4FA~R ;X}. d[0yC|J" XNymm=+6dBpf mg}ZaФ!rj[Ϯpq=QI(#!Vg>}Ga_ؕ0d8f\/+ 6@d 9󿦐pau_Xz^O\3nٹ;z1(H#Uߨ6\ D(wBO锬}։H,["VyNZxzUS Z.pYXm]Urt,%S,Bue  }PeT4߷:C) W*X ;9o85F="jt!s13qKXp}Ş{gI!!oZN}h\lS9*ҶDȌ]xt, Jc8{ƞZ3>(h}%`5  ͥ<#JCa[g~?ѯN[:]R*iXNP[v*~0pk M,rŮf|oq<[RO+v}My}qa*m`?y6㪕 @. ZK !d4Ŭ)6[SmێV^fDkM4JLXƐBkɡ*ؔr :5Hu/*R%8q;p=5>X"a~gaG'5NPjv;q$RϿ8ƈ$3ʒC*{W{g}r޹O]4]v;}F;v%qt3O W3kX"-LOl;+1{OeץT/& ݌}.-ܰvaU 2b+ְH&- Zk Ș}%1z$&-↯nhzҫ&f1Lz-ط~7"uَ Ԗ_Gd1؂6Ȉ( C{6k3;&Yzl HfE1R$" l$QF3k1. "}ɮ{"E)-v`\|0py !fuPT#+TD4{X]x)X<]9*(`M5){PV,q,ऊu%:6Ԏ"mV4 t,\҄w~+.;}3u{o9`ʣ'>ԧvɞ|CΝ/F$P>}f 'RVULsݤ--ŞQ ٗϼX倊/=PUJ&DѺxd,B D4it$;6Xm<%OSQ>w^c?|"ݍO30()N>'ώKUMeTW1c 469Rd5)1e3$ײ(Ţ,K0$-h|CF@ Rr*@*c)8쑥`YV} qc+41&}#^ڱ_ s\"R3oxCGuȇԷy(GϮnQr #Wi @ u \}dpV g3VWo!u-hc=Hc~?yJ<盒&BZrͮӊhi">ZYv]C#ZL&Ҟ^#7l5ACE}6?γ)K< 6b=Q'gBP,(Q0YSTv* d=|xM_N_?N7in0uyaHm!UCK/$/4S>[6{G ,khʄ^~é= SK!0iMe{k(+e(µ{Waθ܋VԀС㶔<3(*4N|/?װ`8oΚh5f\YY[Cy e,~Cz*Ű%ֶ޻rMAXs+5~غفw)W1WX2YyVvӬ{8R:Fwp̲tD=W9M Q[Z3Er?07-lW~ m4b%+K7" FD1 gqdl W@I1~:v E~ciRej]+taĎALoYV%ՙRUUSn YamZm @dS{w<{QP}MDE!8-(:䄒Ҋ(>(> m,my]sZL`a٨C{[.[7"0bY6Q4cuUYƬxߞlRSt˓(M ں*YYkT6H%K R ڧW]_V30Rض]Q^&/3.w?V) />\5+{NV yAS$g胔o,VNt?KǎqՏ]zW*_R#~ᑉ.{׆N{=_rȢKF5UL䦬-6}gy!'$*G;Gy_^!r/&q~O5޵saJxe.,o}47zw|n# VdrX ecvΊv%r*vA&Xٕoaaǟ?8Oe&bbJ|S>@)W춥crq'8O*ɾQꏾ5وyp$w-G /eJLpP2_̼뾅^\u[Ŕs*/:i#uQ~TC*uKYi^z!V:m([?{CWv"~g1^n+J`ҲJRC f>jꇧ֔sh:l1 Ey4|KO]6Vb열p}byʋqf#%?_j7g_]kې;NSf0>:[ o)x:`pAPb=]>sgU7jߒ|Gӫn<o_|t [vtu9s]5WC X%׍LN9|/dz 8_BvUԥ."3̙;>tv `֓8Ak01+ҰiȆXXt_3e߬=K.GS ?hDt^ d׀Yz@SQ}i} 嘶ޝ|!~ >N"4?v9f>Gm70T>U8z 簑$?xO?o}xS@:=-fCxae^NIl{b)%CTNUVt$FodVd !pͭ a 0Z(oS a<@J Y[Qzҳ;.[2@yB vsFbxM$e*mɏy_~ҷO7c9ȉz3V28=g\2YjJvR̕pG]Ə W}E_|OOiF5 _=3"5#>"? ߜ!0# 9Վ##Ӈ_W~7عn'w+LsSg> suA%2~6qݛF;_3Ya(60`\8*6H:Nmw%t, ܢ_ໍ拢Nwhch~̔g vo&A &zӬO0YHۇu׶+"}>,{blجƾ7y^=v/IAV-\8?>߫|Y`x;ژn FB,+-"< wkAUjQ@ JT2K+s/,Ȋ1]z)p1bbu}P`2(94мV=V!ð^SOa.,9f>n֗)r@w"XU,Lii,/9J'\=gQ~u4)_YJzJxY SQXRlz"~KSc[ν:@WWyz L4-Za0} Hn`$^tSb"^kc#߳b0`i]"M-B%;O@яRDvL?6F6Mu1ZWEo?9{1@?ջo: ʶ#RG_F:+Z8Dz\XY\se1}'a@DEQ80&B$ xq}Nf='[.jf[\] K纍P A0./v.hsJj!ڗ!\.(F=OVr/Nn>§gcmTT}T\}KV#Zf! 31x䋣tP/B~>u Mg^wicF+k*NQb1宀嫽n#@InPpذ͕LS`Jѳ긘'ڿ̺a0z~g@cf|}f H foch׌Oؗ齠-,"ٗK>?m[XGŚ-;>htMOWK>[ܲpۿ^Y/8eEj),U~-&I`V="C;78ƹYmv׊E~ʎa@)`eG ewOc*VG`Tq >d1mP9=${Jq7DY{BWoر/S? SD^u7.'<{<}Q۽ptsS8g'D -.2"]Lf8 "2}Jr= D-'>S4"C`R h]CN7 Jٗ3sf&Z;wdwjmϬ^ 3m;$vXza/:8?Mv6U\1?}էn~۴Ic?6g-Pqyṏzβ>q%1Azr2ΜpV4HdU0.5_p[K)b8R<9g(/PXu`FU7#AG.P>x)E[8n61'y`@vEޮ!}o{ (q=i "fybJX o֓ - ^,ʤࠡIޓ3= MNf;5~ΚpqP``jmxO=K\d幇3{kRs)umˮBв_"XDKx;GEζP(FuĶI ' x;v <{X3xWwˣw}ۊsaeñ/Tpr'J"_k*{|ǩ"`MYtN|"J&s.\on:bp`tCuFU"!oZ j/zE*bDn\gcy*w] DWhhS6+7nK-2B*fc4AJ`$' Xf&fUO>t_оN=ζh0<jkP}:fֿG=y%O\3v5 vX;bܤ3 np 3<-|>44|cHyk~>][ǫeEq96Tf^ϤTWch^͖e?24(e7~E/gCG ?_Tk \._< 1 `Ք[uqqX[DKѸDJw^yc;o+7M4[Ɓluf;\1#l`{d`݇^E={G*8tB2(C^.Ϛ3m8V_ےi8l[VW~9pf96&DgkkM`peg/[M|2\xhDOc>uX6Q^Ȃ1оx#xk۱9)K̐Ѫ2kxLϴiL O|~~pw/o7dޠ) f=|/BjF5hFAQfh_[IXU Qz47 0ʲIJlQ$hҝ 0)6>9K4 ۋ)&Mh8&@ >S߅6 pl}PzaDdq#t N9w`:m8스gK2~tT6GuÆ rʆP(Z2n3$--cڄoޜ|A{Z3t)Xm=#?d*Eս`n3E~S7 ;e?){|ٌ_ZUѸV]h|_ Vw2kړ5Ɏt/ hcZK6*"CjSl|!d;Wz7~{2pqLrM=U]oEpcR1Z{njߜ|YCvط,[nsk"Q-UՄ`nVk/v|Ҽ.9w궏 Ip oec[ F.rj'wu'R^{|TlvO8d.rGwbB@E]+5kʛk-E Ba|c)^VrI;>&^'(֧(dyfH@ {sXh.U&uVHLЁ,QLy+ٗW Xe={6kœYDL :GMATHF!5gX%eOsܧsZtRX  B}mܡ m*͗+]]g K@]7]7I4|+ǨOHL8njR[:,*̲FDD–G#"KW%;BM6crӏ z9LM2Zo,fYu m<$k :'9m+0BP7#{EoQӆd2 /t]Q+g<h W.ƺA #=k7\#neg Uƅ^u uߕυc>7䠴^\c.%8h*"W3>^ dE"ѝ(1|ιj1[UsD5ơ-h]71yy 49bs>Y}B/xye/_ޗ^`v)δ?|aGQ-ߣ4}Q%GFIOskeh]8S [?iA;vkK!a<_f rD@Ac_:$ gwq xʏ;e?n f~+Voi{]s9ˎ_ rPSAG} 5;kL(yOTl2+ۭ/W:oyhp7bt^@BkR?Fј990.QF4Z: j["?|  /jV~]fl|blE>Nߴ>Gt^=aƋtMg(^<$ ăXx"-F N|yfR )lK9ׂp=̲]1 s۸j29'Vg楥D k_o 2`NGE=%eUU}<6liLgoOfe]CwՇ$IKV:KZ8h*˧m2^w,%[y^RlM5EW~;r.x}+RDlC$P4kvfiս?w/{3oϾr$.\m*7ۼVL튆4tF(7LLݦ|e]=p `җ]\:d+Qn?K7N:ugA ,ei\X}!!moQފ|Eȼcݏw1]xQa ~ѾKgn7'Q NۈKsa_3 ]xloagMσ&G~R՗B©==U~3V7Y@+U[X`?c[F"\Sڧ5M^z寎>}xaIj|׿G|w)y#@*VS9Tu}7dT:L:Fv=mYwvOtn3nܷ/<K! ['lg:m:FöyLoM^I/M_i߯,rQ'Z2e s0+盾2~U:gԜau{\Sm۶gfoDZe)ݣ [/hջXdZ4o>?*8 Tu}O>|`<✧2PcP%H: dE ToPJv̿?IЅ+4ZZbˡe!]_?^lE 3O:}kl'f MfX _?im7?F.>1$u{^v kֶ]UWʌ}[s|5ISgI_?%^RVe'?l}]{{XDnlZGɧzf_}uvz_Wc?x{}.~W~((2L13}؄΍橏ƽwwL`|';.-q/ۡ.S^줷5/M{vF@%,R&/3~˧?תC*ZMF%d%x3Nguji}Nvyri(O {Vщ49=b}w6 ^n`滗cK347g[vogڒoiϼ`koQKCW\ _Rs|[sA%DD8H&WuӫA-4&(Fz @CYšzJNچJWM¼S_ZҜXQoأU_UXgk EߐxO n~w ^>~w/yH$}:heռe!m\6j͎m:Ⱙ h>w׼۔*mtUԖRuŴ(%c@z 2+7:?8ÆYKb!'1`]M}mtLo;]/XuKG\_eˣX"Q1j鸄u["WTD а5euH>e EVÛ{؝>=UD]A91y7DRߵ5s+c/]  L6wٴ9;|e`ZrWYAn}`D:飾w`O_tkkud2y6[Gwv$ݓZ>J|Qǿ'ז`XCud۵K7b^T:vtKU_43hN~q~K͂L|]Mg"IXG&Um0*dt WhFXJIY^T2hlށPϳ/3?{سud㾥19h 6ړ`J2)fNh7TPjieeg=_Lwfmu6(uygu|GUsHߚ>qr;/Fܯkuoc9#vSO14!ئ$? *Q"n[|oKI ֑>hk#$JG5@4@ +KNjp=Ǯ~1ϬT>6mPpl%՞w)O\)66gfYg疭O"L\Z5U5]h,IϘIDATz-[O.mƒ`a,YGP__7r)> zd$Aoj=㵙:}-fVќk3 9?,16"E%3GU]`msCV~f#-Bں2C5Osrz`|Z+74y:X,E+ڟ@WX֬^\ٱ4Fc#60P6jxjKvLT{}xw=wՈ^rQ4jX"yk&6yl ڡ5so]i ZJb`)VD"HO-w}d@ K ,Kl`Oi}2e%%)$ +f19y(B}"Q6djuIdOjS<{瑯z}t̒TN~O:GfusnE摘*˛!͕h!?"}fMD|I~WݑR,.޶)Z|m,s@`x&g0> ;Y]OԻ<_BBy<"HX P2bUkJic!EP DYey}}T2/o}y&r^.~7{^i5f8ya!bCqD*S{1 ӽSnO}Nf'g$",刲 y Mk t׼vg)iJ%2& lBT $2!6"1)c*ƯA")e[Hx!X!!+={ow̬6޹;WULo9}z)?8&!`D/^Uw; 4бRyLD+w{wm<|Ztkf _u Bx3UawJn~¿]c{;࢈R]wL0,@ !h bG4Ϣ>qygخܒMoߔ/}Iw3Eށ<q8L'BYl9ȷ:+5K;$?5mpx3bMc@pm\N 6R8qΉF8Ͻ8wA$ P`캆&u:zGn{Lx5\/_x-6>wyOǙ`N%2D.4bDj-M_7]'VSWhh-d`!3,hIRP{HT|!HJcwH4( A0DJOԮO7Οsٿ~w}|_dl/vtػܳ؀GpG )ЀjuttDl-)__ nm-^L&/ۇFstp)cHa@5ҭ%TTJH- F's{hFžy?Ėj:2tfq!r.rqp X[ H@@f~N~vnꗿk,6  c|RMp@x,\ӽ.z &jN۾ӯ5m/wupJ A  +|\18Λ̬4:m /x}Rny75e;ceDn`_=WYX5t\j~ލ;(^lBd] h)@!hup۲?ncm1e[uǂ W_~bpAYQZVUOz?|d3G_уwȿ?gݝ7 ߨhSopŊ8uIbE:6X0bI$OryRgoyͭcRąym^!0wK7}xΆOf;Py>D>³Ҡ%JA2Lk4fg@/pݝ7 [~ch"Yn^r3Ol%@Qge6.'3f7`|1]1#{Cor %H9 pxGi{ _:V[kz{ڗ$[2nqbn|toz{mv6&S z='⣴zK.7 ~;^W"L]WO^,c>yūtәyl4Bp)b~tppwo OO=)J)IWI>g~I '%RvY1Bu:Ŝ]gva|xt[صkǶxy|CqPYeA}zͯџN/mMkMy\=xK?x7O%^pf'Zt=a:n1;62|;qAk#j 8bB@jH FzEbr/}y8m6qIaFlhxNSjM}W>֞gX-8ߕ\ڡbT3 u S̹v!;<2:W{!|Y> }0+mN{?#Grr+x 0d>=?ҦLSݠ2ShrI\2ÀÐpv'EzB GA%lA3C ?8g4_aN2RD2} kA<ߘ!L#BiQ||#Ǒ}r}"M¯={iDV(b$~zK~"d kׯA8LL 5+F%'2:HK#4b .= APu/?C|y-" q(1brx腐z&>dKL3|KQ5>`{eFD Q|^Xɟ*V&4p\Z$1G+@«-UK //:xBhCzQ#:!޾yߩKW> y,y[joW+lh,0̐^9g443C%c3|OYZ4}z~a}* JD ܧ=,([X_a( Q\M7򀅟D[ :?rsd3|\!}dUfb71| n^Ȳ1UD>WL#I -s}]hBC0խ3Ek0"Pztϳ& i$]WBҊoխ؂D yχE8 AUdkz q_;hZ+lJ68`>G26ϱ/`58~Ѷ9ƟB%,&LV~p  W} 3dljc 6ΠKXAHXyH^;]/aYXQ}TүFm.qzr@41g;Hge5  <- LzO#s33=jzQ#%0;s #F؎Qwj7 |TjLk]j!W {~y3W/So4ը^{w6QZ+[xc hzF$`iia& Xyn7R/8X|>ic&hиEΧhB oo8(q7f`{c֢gf #vqŝIENDB`uncertainties-3.1.7/doc/_templates/000077500000000000000000000000001425362552000173055ustar00rootroot00000000000000uncertainties-3.1.7/doc/_templates/layout.html000066400000000000000000000070341425362552000215140ustar00rootroot00000000000000{% extends "!layout.html" %} {% block rootrellink %} {{ toctree() }} {% endblock %} {% block document %} {{ super() }} Fork me on GitHub {% endblock %} {% block relbar1 %}
uncertainties
{{ super() }} {% endblock %} {#############################################################################} {# Sidebar customization #) {# put the sidebar before the body #} {% block sidebar1 %} {%- macro sidebar() %} {%- if not embedded %}{% if not theme_nosidebar|tobool %}
{%- block sidebarlogo %} {%- if logo %} {%- endif %} {%- endblock %} {%- block sidebartoc %} {%- block sidebarglobaltoc %}

{{ _('Table of contents') }}

{{ toctree() }} {%- endblock %} {%- endblock %} {%- block sidebarrel %} {%- endblock %} {%- block sidebarsourcelink %} {%- if show_source and has_source and sourcename %}

{{ _('This Page') }}

{%- endif %} {%- endblock %} {%- if customsidebar %} {% include customsidebar %} {%- endif %} {%- if display_toc %}

{{ _('Section contents') }}

{{ toc }} {%- endif %} {%- block sidebarsearch %} {%- if pagename != "search" %} {%- endif %} {%- endblock %} {%- block copyright %}

Documentation license

Creative Commons License {%- endblock %}
{%- endif %}{% endif %} {%- endmacro %} {{ sidebar() }}{% endblock %} {% block sidebar2 %}{% endblock %} uncertainties-3.1.7/doc/conf.py000066400000000000000000000153121425362552000164510ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # uncertainties Python package documentation build configuration file, created by # sphinx-quickstart on Tue Jun 8 18:32:22 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from datetime import date import sys, os sys.path.insert(0, os.path.abspath('..')) import uncertainties # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index_TOC' # General information about the project. project = u'uncertainties Python package' copyright = u'2010–%d, Eric O. LEBIGOT (EOL)' % date.today().year # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1' # The full version, including alpha/beta/rc tags. release = uncertainties.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'uncertaintiesdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index_TOC', 'uncertaintiesPythonPackage.tex', u'uncertainties Python package Documentation', u'Eric O. LEBIGOT (EOL)', 'manual'), ] #latex_engine = "xelatex" # Not recognized by readthedocs.io as of 2018-04-08 # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = r'\DeclareUnicodeCharacter{207B}{$^-$}' latex_elements = { # Superscript -, etc. for pdflatex (unnecessary, with xelatex): 'preamble': r''' \DeclareUnicodeCharacter{207B}{$^-$} \DeclareUnicodeCharacter{22C5}{$\cdot$} ''' } # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True uncertainties-3.1.7/doc/index.rst000066400000000000000000000407531425362552000170220ustar00rootroot00000000000000.. meta:: :description: The uncertainties Python package :keywords: error propagation, uncertainties, error calculations, Python, calculator, library, package ==================================== Welcome to the uncertainties package ==================================== The `uncertainties package`_ is a free, cross-platform program that **transparently** handles calculations with **numbers with uncertainties** (like 3.14±0.01). It can also yield the **derivatives** of any expression. The :mod:`uncertainties` package **takes the pain and complexity out** of uncertainty calculations. Error propagation is not to be feared anymore! Calculations of results with uncertainties, or of derivatives, can be performed either in an **interactive session** (as with a calculator), or in **programs** written in the Python_ programming language. Existing calculation code can **run with little or no change**. Whatever the complexity of a calculation, this package returns its result with an uncertainty as predicted by linear `error propagation theory`_. It automatically :ref:`calculates derivatives ` and uses them for calculating uncertainties. Almost all uncertainty calculations are performed **analytically**. **Correlations** between variables are automatically handled, which sets this module apart from many existing error propagation codes. You may want to check the following related uncertainty calculation Python packages to see if they better suit your needs: soerp_ (higher-order approximations) and mcerp_ (Monte-Carlo approach). .. index:: calculator An easy-to-use calculator ========================= Calculations involving **numbers with uncertainties** can be performed even without knowing anything about the Python_ programming language. After `installing this package`_ and `invoking the Python interpreter`_, calculations with **automatic error propagation** can be performed **transparently** (i.e., through the usual syntax for mathematical formulas): >>> from uncertainties import ufloat >>> from uncertainties.umath import * # sin(), etc. >>> x = ufloat(1, 0.1) # x = 1+/-0.1 >>> print 2*x 2.00+/-0.20 >>> sin(2*x) # In a Python shell, "print" is optional 0.9092974268256817+/-0.08322936730942848 Thus, existing calculation code designed for regular numbers can run with numbers with uncertainties with :ref:`no or little modification `. .. index:: correlations; simple example Another strength of this package is its correct handling of **correlations**. For instance, the following quantity is exactly zero even though :data:`x` has an uncertainty: >>> x-x 0.0+/-0 Many other error propagation codes return the incorrect value 0±0.1414… because they wrongly assume that the two subtracted quantities are *independent* random variables. **Arrays** of numbers with uncertainties are :ref:`transparently handled ` too. **Derivatives** are similarly very :ref:`easy to obtain `: >>> (2*x+1000).derivatives[x] 2.0 They are calculated with a :ref:`fast method `. Available documentation ======================= The :doc:`user_guide` details many of the features of this package. The part :doc:`numpy_guide` describes how arrays of numbers with uncertainties can be created and used. The :doc:`tech_guide` gives advanced technical details. .. only:: html A :download:`PDF version <_build/latex/uncertaintiesPythonPackage.pdf>` of the documentation is also available. Additional information is available through the pydoc_ command, which gives access to many of the documentation strings included in the code. .. index:: installation .. _installing this package: Installation and download ========================= Important note -------------- The installation commands below should be **run in a DOS or Unix command shell** (*not* in a Python shell). Under Windows (version 7 and earlier), a command shell can be obtained by running ``cmd.exe`` (through the Run… menu item from the Start menu). Under Unix (Linux, Mac OS X,…), a Unix shell is available when opening a terminal (in Mac OS X, the Terminal program is found in the Utilities folder, which can be accessed through the Go menu in the Finder). Automatic install or upgrade ---------------------------- One of the automatic installation or upgrade procedures below might work on your system, if you have a Python package installer or use certain Linux distributions. Under Unix, it may be necessary to prefix the commands below with ``sudo``, so that the installation program has **sufficient access rights to the system**. If you use the `Anaconda distribution `_, you can install the latest version with .. code-block:: sh conda install -c conda-forge uncertainties If you have `pip `_, you can try to install the latest version with .. code-block:: sh pip install --upgrade uncertainties If you have setuptools_, you can try to automatically install or upgrade this package with .. code-block:: sh easy_install --upgrade uncertainties The :mod:`uncertainties` package is also available for **Windows** through the `Python(x,y)`_ distribution. It may also be included in Christoph Gohlke's Base distribution of `scientific Python packages`_. **Mac OS X** users who use the `MacPorts package manager `_ can install :mod:`uncertainties` with ``sudo port install py**-uncertainties``, and upgrade it with ``sudo port upgrade py**-uncertainties`` where ``**`` represents the desired Python version (``27``, ``33``, etc.). The :mod:`uncertainties` package is also available through the following **Linux** distributions and software platforms: `Ubuntu `_, `Fedora `_, `openSUSE `_, `Debian `_ and `Maemo `_. Manual download and install --------------------------- Alternatively, you can simply download_ the package archive from the Python Package Index (PyPI) and unpack it. The package can then be installed by **going into the unpacked directory** (:file:`uncertainties-…`), and running the provided :file:`setup.py` program with .. code-block:: sh python setup.py install (where the default ``python`` interpreter must generally be replaced by the version of Python for which the package should be installed: ``python3``, ``python3.3``, etc.). For an installation with Python 2.6+ in the *user* Python library (no additional access rights needed): .. code-block:: sh python setup.py install --user For an installation in a custom directory :file:`my_directory`: .. code-block:: sh python setup.py install --install-lib my_directory If additional access rights are needed (Unix): .. code-block:: sh sudo python setup.py install You can also simply **move** the :file:`uncertainties-py*` directory that corresponds best to your version of Python to a location that Python can import from (directory in which scripts using :mod:`uncertainties` are run, etc.); the chosen :file:`uncertainties-py*` directory should then be renamed :file:`uncertainties`. Python 3 users should then run ``2to3 -w .`` from inside this directory so as to automatically adapt the code to Python 3. Source code ----------- The latest, bleeding-edge but working `code `_ and `documentation source `_ are available `on GitHub `_. The :mod:`uncertainties` package is written in pure Python and has no external dependency except for the future_ package (the `NumPy`_ package is optional). It contains about 7000 lines of code. 75 % of these lines are documentation strings and comments. The remaining 25 % are split between unit tests (15 % of the total) and the calculation code proper (10 % of the total). :mod:`uncertainties` is thus a **lightweight, portable package** with abundant documentation and tests. Migration from version 1 to version 2 ===================================== Some **incompatible changes** were introduced in version 2 of :mod:`uncertainties` (see the `version history`_). While the version 2 line will support the version 1 syntax for some time, it is recommended to **update existing programs** as soon as possible. This can be made easier through the provided **automatic updater**. The automatic updater works like Python's `2to3 `_ updater. It can be run (in a Unix or DOS shell) with: .. code-block:: sh python -m uncertainties.1to2 For example, updating a single Python program can be done with .. code-block:: sh python -m uncertainties.1to2 -w example.py All the Python programs contained under a directory ``Programs`` (including in nested sub-directories) can be automatically updated with .. code-block:: sh python -m uncertainties.1to2 -w Programs Backups are automatically created, unless the ``-n`` option is given. Some **manual adjustments** might be necessary after running the updater (incorrectly modified lines, untouched obsolete syntax). While the updater creates backup copies by default, it is generally useful to **first create a backup** of the modified directory, or alternatively to use some `version control `_ system. Reviewing the modifications with a `file comparison tool `_ might also be useful. What others say =============== - "*Superb,*" "*wonderful,*" "*It's like magic.*" (`Joaquin Abian `_) - "*pretty amazing*" (`John Kitchin `_) - "*An awesome python package*" (`Jason Moore `_) - "*Utterly brilliant*" (`Jeffrey Simpson `_) - "*An amazing time saver*" (`Paul Nakroshis `_) - "*Seems to be the gold standard for this kind of thing*" (`Peter Williams `_) - "*This package has a great interface and makes error propagation something to stop fearing.*" (`Dr Dawes `_) - "*uncertainties makes error propagation dead simple.*" (`enrico documentation `_) - "*many inspiring ideas*" (`Abraham Lee `_) - "*Those of us working with experimental data or simulation results will appreciate this.*" (`Konrad Hinsen `_) - "*PyPI\'s uncertainties rocks!*" (`Siegfried Gevatter `_) - "*A very cool Python module*" (`Ram Rachum `_) - "*Holy f\*\*\* this would have saved me so much f\*\*\*ing time last semester*." (`reddit `_) Future developments =================== Planned future developments include (starting from the most requested ones): - handling of complex numbers with uncertainties; - increased support for `NumPy`_: Fourier Transform with uncertainties, automatic wrapping of functions that accept or produce arrays, standard deviation of arrays, more convenient matrix creation, new linear algebra methods (eigenvalue and QR decompositions, determinant,…), input of arrays with uncertainties as strings (like in NumPy),…; - `JSON `_ support; - addition of :attr:`real` and :attr:`imag` attributes, for increased compatibility with existing code (Python numbers have these attributes); - addition of new functions from the :mod:`math` module; - fitting routines that conveniently handle data with uncertainties; - a re-correlate function that puts correlations back between data that was saved in separate files; - support for multi-precision numbers with uncertainties. **Call for contributions**: I got multiple requests for complex numbers with uncertainties, Fourier Transform support, and the automatic wrapping of functions that accept or produce arrays. Please contact me if you are interested in contributing. Patches are welcome. They must have a high standard of legibility and quality in order to be accepted (otherwise it is always possible to create a new Python package by branching off this one, and I would still be happy to help with the effort). **Please support the continued development of this program** by `donating $10`_ or more through PayPal (no PayPal account necessary). I love modern board games, so this will go towards giving my friends and I some special gaming time! .. index:: support Contact ======= **Feature requests, bug reports, or feedback are much welcome.** They can be sent_ to the creator of :mod:`uncertainties`, `Eric O. LEBIGOT (EOL)`_. .. figure:: _static/eol.* :height: 64 :width: 64 :target: http://linkedin.com/pub/eric-lebigot/22/293/277 :align: center :alt: Eric O. LEBIGOT (EOL) How to cite this package ======================== If you use this package for a publication (in a journal, on the web, etc.), please cite it by including as much information as possible from the following: *Uncertainties: a Python package for calculations with uncertainties*, Eric O. LEBIGOT. Adding the version number is optional. Acknowledgments =============== The author wishes to thank all the people who made generous `donations`_: they help keep this project alive by providing positive feedback. I greatly appreciate having gotten key technical input from Arnaud Delobelle, Pierre Cladé, and Sebastian Walter. Patches by Pierre Cladé, Tim Head, José Sabater Montes, Martijn Pieters, Ram Rachum, Christoph Deil, Gabi Davar, Roman Yurchak and Paul Romano are gratefully acknowledged. I would also like to thank users who contributed with feedback and suggestions, which greatly helped improve this program: Joaquin Abian, Jason Moore, Martin Lutz, Víctor Terrón, Matt Newville, Matthew Peel, Don Peterson, Mika Pflueger, Albert Puig, Abraham Lee, Arian Sanusi, Martin Laloux, Jonathan Whitmore, Federico Vaggi, Marco A. Ferra, Hernan Grecco, David Zwicker, James Hester, Andrew Nelson, and many others. I am grateful to the Anaconda, macOS and Linux distribution maintainers of this package (Jonathan Stickel, David Paleino, Federico Ceratto, Roberto Colistete Jr, Filipe Pires Alvarenga Fernandes, and Felix Yan) and also to Gabi Davar and Pierre Raybaut for including it in `Python(x,y)`_ and to Christoph Gohlke for including it in his Base distribution of `scientific Python packages`_ for Windows. .. index:: license License ======= This software is released under a **dual license**; one of the following options can be chosen: 1. The `Revised BSD License`_ (© 2010–2021, Eric O. LEBIGOT [EOL]). 2. Any other license, as long as it is obtained from the creator of this package. .. _Python: http://python.org/ .. _Python(x,y): https://python-xy.github.io/ .. _scientific Python packages: http://www.lfd.uci.edu/~gohlke/pythonlibs/ .. _error propagation theory: http://en.wikipedia.org/wiki/Propagation_of_uncertainty .. _invoking the Python interpreter: http://docs.python.org/tutorial/interpreter.html .. _setuptools: http://pypi.python.org/pypi/setuptools .. _download: http://pypi.python.org/pypi/uncertainties/#downloads .. _donations: https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=4TK7KNDTEDT4S .. _Eric O. LEBIGOT (EOL): http://linkedin.com/pub/eric-lebigot/22/293/277 .. _sent: mailto:eric.lebigot@normalesup.org .. _Revised BSD License: http://opensource.org/licenses/BSD-3-Clause .. _uncertainties package: http://pypi.python.org/pypi/uncertainties/ .. _pydoc: http://docs.python.org/library/pydoc.html .. _NumPy: http://numpy.scipy.org/ .. _donating $10: donations_ .. _version history: https://pypi.python.org/pypi/uncertainties#version-history .. _soerp: https://pypi.python.org/pypi/soerp .. _mcerp: https://pypi.python.org/pypi/mcerp .. _Pint: https://pypi.python.org/pypi/Pint/ .. _future: https://pypi.org/project/future/ uncertainties-3.1.7/doc/index_TOC.rst000066400000000000000000000002031425362552000175110ustar00rootroot00000000000000Table of Contents ================= .. toctree:: :maxdepth: 1 Overview user_guide numpy_guide tech_guide uncertainties-3.1.7/doc/make.bat000066400000000000000000000056671425362552000165730ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation set SPHINXBUILD=sphinx-build set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (_build\*) do rmdir /q /s %%i del /q /s _build\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html echo. echo.Build finished. The HTML pages are in _build/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml echo. echo.Build finished. The HTML pages are in _build/dirhtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in _build/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% _build/qthelp echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in _build/qthelp, like this: echo.^> qcollectiongenerator _build\qthelp\uncertaintiesPythonpackage.qhcp echo.To view the help file: echo.^> assistant -collectionFile _build\qthelp\uncertaintiesPythonpackage.ghc goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex echo. echo.Build finished; the LaTeX files are in _build/latex. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes echo. echo.The overview file is in _build/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck echo. echo.Link check complete; look for any errors in the above output ^ or in _build/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest echo. echo.Testing of doctests in the sources finished, look at the ^ results in _build/doctest/output.txt. goto end ) :end uncertainties-3.1.7/doc/numpy_guide.rst000066400000000000000000000163351425362552000202370ustar00rootroot00000000000000.. index: NumPy support ======================= Uncertainties in arrays ======================= .. index:: unumpy The unumpy package ================== This package contains: 1. utilities that help with the **creation and manipulation** of NumPy_ arrays and matrices of numbers with uncertainties; 2. **generalizations** of multiple NumPy functions so that they also work with arrays that contain numbers with uncertainties. While :ref:`basic operations on arrays ` that contain numbers with uncertainties can be performed without it, the :mod:`unumpy` package is useful for more advanced uses. Operations on arrays (including their cosine, etc.) can thus be performed transparently. These features can be made available with >>> from uncertainties import unumpy .. Here, there is no need to mention unumpy.unlinalg, because it is indeed made available through "import unumpy". Creation and manipulation of arrays and matrices ------------------------------------------------ .. index:: single: arrays; creation and manipulation single: creation; arrays Arrays ^^^^^^ Arrays of numbers with uncertainties can be built from values and uncertainties: >>> arr = unumpy.uarray([1, 2], [0.01, 0.002]) >>> print arr [1.0+/-0.01 2.0+/-0.002] NumPy arrays of numbers with uncertainties can also be built directly through NumPy, thanks to NumPy's support of arrays of arbitrary objects: >>> arr = numpy.array([ufloat(1, 0.1), ufloat(2, 0.002)]) .. index:: single: matrices; creation and manipulation single: creation; matrices Matrices ^^^^^^^^ Matrices of numbers with uncertainties are best created in one of two ways. The first way is similar to using :func:`uarray`: >>> mat = unumpy.umatrix([1, 2], [0.01, 0.002]) Matrices can also be built by converting arrays of numbers with uncertainties into matrices through the :class:`unumpy.matrix` class: >>> mat = unumpy.matrix(arr) :class:`unumpy.matrix` objects behave like :class:`numpy.matrix` objects of numbers with uncertainties, but with better support for some operations (such as matrix inversion). For instance, regular NumPy matrices cannot be inverted, if they contain numbers with uncertainties (i.e., ``numpy.matrix([[ufloat(…), …]]).I`` does not work). This is why the :class:`unumpy.matrix` class is provided: both the inverse and the pseudo-inverse of a matrix can be calculated in the usual way: if :data:`mat` is a :class:`unumpy.matrix`, >>> print mat.I does calculate the inverse or pseudo-inverse of :data:`mat` with uncertainties. .. index:: pair: nominal value; uniform access (array) pair: uncertainty; uniform access (array) pair: standard deviation; uniform access (array) Uncertainties and nominal values ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Nominal values and uncertainties in arrays (and matrices) can be directly accessed (through functions that work on pure float arrays too): >>> unumpy.nominal_values(arr) array([ 1., 2.]) >>> unumpy.std_devs(mat) matrix([[ 0.1 , 0.002]]) .. index:: mathematical operation; on an array of numbers Mathematical functions ---------------------- This module defines uncertainty-aware mathematical functions that generalize those from :mod:`uncertainties.umath` so that they work on NumPy arrays of numbers with uncertainties instead of just scalars: >>> print unumpy.cos(arr) # Cosine of each array element NumPy's function names are used, and not those from the :mod:`math` module (for instance, :func:`unumpy.arccos` is defined, like in NumPy, and is not named :func:`acos` like in the :mod:`math` module). The definition of the mathematical quantities calculated by these functions is available in the documentation for :mod:`uncertainties.umath` (which is accessible through :func:`help` or ``pydoc``). .. index:: pair: testing and operations (in arrays); NaN NaN testing and NaN-aware operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ One particular function pertains to NaN testing: ``unumpy.isnan()``. It returns true for each NaN *nominal value* (and false otherwise). Since NaN±1 is *not* (the scalar) NaN, functions like ``numpy.nanmean()`` do not skip such values. This is where ``unumpy.isnan()`` is useful, as it can be used for masking out numbers with a NaN nominal value: >>> nan = float("nan") >>> arr = numpy.array([nan, uncertainties.ufloat(nan, 1), uncertainties.ufloat(1, nan), 2]) >>> arr array([nan, nan+/-1.0, 1.0+/-nan, 2], dtype=object) >>> arr[~unumpy.isnan(arr)].mean() 1.5+/-nan or equivalently, by using masked arrays: >>> masked_arr = numpy.ma.array(arr, mask=unumpy.isnan(arr)) >>> masked_arr.mean() 1.5+/-nan In this case the uncertainty is NaN as it should be, because one of the numbers does have an undefined uncertainty, which makes the final uncertainty undefined (but the average is well defined). In general, uncertainties are not NaN and one obtains the mean of the non-NaN values. .. index:: saving to file; array .. index:: reading from file; array Storing arrays in text format ============================= Arrays of numbers with uncertainties can be directly :ref:`pickled `, saved to file and read from a file. Pickling has the advantage of preserving correlations between errors. Storing instead arrays in **text format** loses correlations between errors but has the advantage of being both computer- and human-readable. This can be done through NumPy's :func:`savetxt` and :func:`loadtxt`. Writing the array to file can be done by asking NumPy to use the *representation* of numbers with uncertainties (instead of the default float conversion): >>> numpy.savetxt('arr.txt', arr, fmt='%r') This produces a file `arr.txt` that contains a text representation of the array:: 1.0+/-0.01 2.0+/-0.002 The file can then be read back by instructing NumPy to convert all the columns with :func:`uncertainties.ufloat_fromstr`. The number :data:`num_cols` of columns in the input file (1, in our example) must be determined in advance, because NumPy requires a converter for each column separately. For Python 2: >>> converters = dict.fromkeys(range(num_cols), uncertainties.ufloat_fromstr) For Python 3, since :func:`numpy.loadtxt` passes bytes to converters, they must first be converted into a string: >>> converters = dict.fromkeys( range(num_cols), lambda col_bytes: uncertainties.ufloat_fromstr(col_bytes.decode("latin1"))) (Latin 1 appears to in fact be the encoding used in :func:`numpy.savetxt` [as of NumPy 1.12]. This encoding seems to be the one hardcoded in :func:`numpy.compat.asbytes`.) The array can then be loaded: >>> arr = numpy.loadtxt('arr.txt', converters=converters, dtype=object) .. index:: linear algebra; additional functions, ulinalg Additional array functions: unumpy.ulinalg ========================================== The :mod:`unumpy.ulinalg` module contains more uncertainty-aware functions for arrays that contain numbers with uncertainties. It currently offers generalizations of two functions from :mod:`numpy.linalg` that work on arrays (or matrices) that contain numbers with uncertainties, the **matrix inverse and pseudo-inverse**: >>> unumpy.ulinalg.inv([[ufloat(2, 0.1)]]) array([[0.5+/-0.025]], dtype=object) >>> unumpy.ulinalg.pinv(mat) matrix([[0.2+/-0.0012419339757], [0.4+/-0.00161789987329]], dtype=object) .. _NumPy: http://numpy.scipy.org/ uncertainties-3.1.7/doc/tech_guide.rst000066400000000000000000000335421425362552000200110ustar00rootroot00000000000000.. index:: technical details =============== Technical Guide =============== Testing whether an object is a number with uncertainty ------------------------------------------------------ The recommended way of testing whether :data:`value` carries an uncertainty handled by this module is by checking whether :data:`value` is an instance of :class:`UFloat`, through ``isinstance(value, uncertainties.UFloat)``. .. index:: pickling .. index:: saving to file; number with uncertainty .. index:: reading from file; number with uncertainty .. _pickling: Pickling -------- The quantities with uncertainties created by the :mod:`uncertainties` package can be `pickled `_ (they can be stored in a file, for instance). If multiple variables are pickled together (including when pickling :doc:`NumPy arrays `), their correlations are preserved: >>> import pickle >>> x = ufloat(2, 0.1) >>> y = 2*x >>> p = pickle.dumps([x, y]) # Pickling to a string >>> (x2, y2) = pickle.loads(p) # Unpickling into new variables >>> y2 - 2*x2 0.0+/-0 The final result is exactly zero because the unpickled variables :data:`x2` and :data:`y2` are completely correlated. However, **unpickling necessarily creates new variables that bear no relationship with the original variables** (in fact, the pickled representation can be stored in a file and read from another program after the program that did the pickling is finished: the unpickled variables cannot be correlated to variables that can disappear). Thus >>> x - x2 0.0+/-0.14142135623730953 which shows that the original variable :data:`x` and the new variable :data:`x2` are completely uncorrelated. .. index:: comparison operators; technical details .. _comparison_operators: Comparison operators -------------------- Comparison operations (>, ==, etc.) on numbers with uncertainties have a **pragmatic semantics**, in this package: numbers with uncertainties can be used wherever Python numbers are used, most of the time with a result identical to the one that would be obtained with their nominal value only. This allows code that runs with pure numbers to also work with numbers with uncertainties. .. index:: boolean value The **boolean value** (``bool(x)``, ``if x …``) of a number with uncertainty :data:`x` is defined as the result of ``x != 0``, as usual. However, since the objects defined in this module represent probability distributions and not pure numbers, comparison operators are interpreted in a specific way. The result of a comparison operation is defined so as to be essentially consistent with the requirement that uncertainties be small: the **value of a comparison operation** is True only if the operation yields True for all *infinitesimal* variations of its random variables around their nominal values, *except*, possibly, for an *infinitely small number* of cases. Example: >>> x = ufloat(3.14, 0.01) >>> x == x True because a sample from the probability distribution of :data:`x` is always equal to itself. However: >>> y = ufloat(3.14, 0.01) >>> x == y False since :data:`x` and :data:`y` are independent random variables that *almost* always give a different value (put differently, :data:`x`-:data:`y` is not equal to 0, as it can take many different values). Note that this is different from the result of ``z = 3.14; t = 3.14; print z == t``, because :data:`x` and :data:`y` are *random variables*, not pure numbers. Similarly, >>> x = ufloat(3.14, 0.01) >>> y = ufloat(3.00, 0.01) >>> x > y True because :data:`x` is supposed to have a probability distribution largely contained in the 3.14±~0.01 interval, while :data:`y` is supposed to be well in the 3.00±~0.01 one: random samples of :data:`x` and :data:`y` will most of the time be such that the sample from :data:`x` is larger than the sample from :data:`y`. Therefore, it is natural to consider that for all practical purposes, ``x > y``. Since comparison operations are subject to the same constraints as other operations, as required by the :ref:`linear approximation ` method, their result should be essentially *constant* over the regions of highest probability of their variables (this is the equivalent of the linearity of a real function, for boolean values). Thus, it is not meaningful to compare the following two independent variables, whose probability distributions overlap: >>> x = ufloat(3, 0.01) >>> y = ufloat(3.0001, 0.01) In fact the function (x, y) → (x > y) is not even continuous over the region where x and y are concentrated, which violates the assumption of approximate linearity made in this package on operations involving numbers with uncertainties. Comparing such numbers therefore returns a boolean result whose meaning is undefined. However, values with largely overlapping probability distributions can sometimes be compared unambiguously: >>> x = ufloat(3, 1) >>> x 3.0+/-1.0 >>> y = x + 0.0002 >>> y 3.0002+/-1.0 >>> y > x True In fact, correlations guarantee that :data:`y` is always larger than :data:`x`: ``y-x`` correctly satisfies the assumption of linearity, since it is a constant "random" function (with value 0.0002, even though :data:`y` and :data:`x` are random). Thus, it is indeed true that :data:`y` > :data:`x`. .. index:: linear propagation of uncertainties .. _linear_method: Linear propagation of uncertainties ----------------------------------- Constraints on the uncertainties ================================ This package calculates the standard deviation of mathematical expressions through the linear approximation of `error propagation theory`_. The standard deviations and nominal values calculated by this package are thus meaningful approximations as long as **uncertainties are "small"**. A more precise version of this constraint is that the final calculated functions must have **precise linear expansions in the region where the probability distribution of their variables is the largest**. Mathematically, this means that the linear terms of the final calculated functions around the nominal values of their variables should be much larger than the remaining higher-order terms over the region of significant probability (because such higher-order contributions are neglected). For example, calculating ``x*10`` with :data:`x` = 5±3 gives a *perfect result* since the calculated function is linear. So does ``umath.atan(umath.tan(x))`` for :data:`x` = 0±1, since only the *final* function counts (not an intermediate function like :func:`tan`). Another example is ``sin(0+/-0.01)``, for which :mod:`uncertainties` yields a meaningful standard deviation since the sine is quite linear over 0±0.01. However, ``cos(0+/-0.01)``, yields an approximate standard deviation of 0 because it is parabolic around 0 instead of linear; this might not be precise enough for all applications. **More precise uncertainty estimates** can be obtained, if necessary, with the soerp_ and mcerp_ packages. The soerp_ package performs *second-order* error propagation: this is still quite fast, but the standard deviation of higher-order functions like f(x) = x\ :sup:`3` for x = 0±0.1 is calculated as being exactly zero (as with :mod:`uncertainties`). The mcerp_ package performs Monte-Carlo calculations, and can in principle yield very precise results, but calculations are much slower than with approximation schemes. .. index:: pair: uncertainty; NaN NaN uncertainty =============== If linear `error propagation theory`_ cannot be applied, the functions defined by :mod:`uncertainties` internally use a `not-a-number value `_ (``nan``) for the derivative. As a consequence, it is possible for uncertainties to be ``nan``: >>> umath.sqrt(ufloat(0, 1)) 0.0+/-nan This indicates that **the derivative required by linear error propagation theory is not defined** (a Monte-Carlo calculation of the resulting random variable is more adapted to this specific case). However, even in this case where the derivative at the nominal value is infinite, the :mod:`uncertainties` package **correctly handles perfectly precise numbers**: >>> umath.sqrt(ufloat(0, 0)) 0.0+/-0 is thus the correct result, despite the fact that the derivative of the square root is not defined in zero. .. _math_def_num_uncert: Mathematical definition of numbers with uncertainties ----------------------------------------------------- .. index:: number with uncertainty; definition .. index:: probability distribution Mathematically, **numbers with uncertainties** are, in this package, **probability distributions**. They are *not restricted* to normal (Gaussian) distributions and can be **any distribution**. These probability distributions are reduced to two numbers: a nominal value and an uncertainty. Thus, both independent variables (:class:`Variable` objects) and the result of mathematical operations (:class:`AffineScalarFunc` objects) contain these two values (respectively in their :attr:`nominal_value` and :attr:`std_dev` attributes). .. index:: uncertainty; definition The **uncertainty** of a number with uncertainty is simply defined in this package as the **standard deviation** of the underlying probability distribution. The numbers with uncertainties manipulated by this package are assumed to have a probability distribution mostly contained around their nominal value, in an interval of about the size of their standard deviation. This should cover most practical cases. .. index:: nominal value; definition A good choice of **nominal value** for a number with uncertainty is thus the median of its probability distribution, the location of highest probability, or the average value. Probability distributions (random variables and calculation results) are printed as:: nominal value +/- standard deviation but this does not imply any property on the nominal value (beyond the fact that the nominal value is normally inside the region of high probability density), or that the probability distribution of the result is symmetrical (this is rarely strictly the case). .. _differentiation method: Differentiation method ---------------------- The :mod:`uncertainties` package automatically calculates the derivatives required by linear error propagation theory. Almost all the derivatives of the fundamental functions provided by :mod:`uncertainties` are obtained through analytical formulas (the few mathematical functions that are instead differentiated through numerical approximation are listed in ``umath_core.num_deriv_funcs``). The derivatives of mathematical *expressions* are evaluated through a fast and precise method: :mod:`uncertainties` transparently implements `automatic differentiation`_ with reverse accumulation. This method essentially consists in keeping track of the value of derivatives, and in automatically applying the `chain rule `_. Automatic differentiation is faster than symbolic differentiation and more precise than numerical differentiation. The derivatives of any expression can be obtained with :mod:`uncertainties` in a simple way, as demonstrated in the :ref:`User Guide `. .. _variable_tracking: Tracking of random variables ---------------------------- This package keeps track of all the random variables a quantity depends on, which allows one to perform transparent calculations that yield correct uncertainties. For example: >>> x = ufloat(2, 0.1) >>> a = 42 >>> poly = x**2 + a >>> poly 46.0+/-0.4 >>> poly - x*x 42+/-0 Even though ``x*x`` has a non-zero uncertainty, the result has a zero uncertainty, because it is equal to :data:`a`. If the variable :data:`a` above is modified, the value of :data:`poly` is not modified, as is usual in Python: >>> a = 123 >>> print poly 46.0+/-0.4 # Still equal to x**2 + 42, not x**2 + 123 Random variables can, on the other hand, have their uncertainty updated on the fly, because quantities with uncertainties (like :data:`poly`) keep track of them: >>> x.std_dev = 0 >>> print poly 46+/-0 # Zero uncertainty, now As usual, Python keeps track of objects as long as they are used. Thus, redefining the value of :data:`x` does not change the fact that :data:`poly` depends on the quantity with uncertainty previously stored in :data:`x`: >>> x = 10000 >>> print poly 46+/-0 # Unchanged These mechanisms make quantities with uncertainties behave mostly like regular numbers, while providing a fully transparent way of handling correlations between quantities. .. index:: number with uncertainty; classes, Variable class .. index:: AffineScalarFunc class .. _classes: Python classes for variables and functions with uncertainty ----------------------------------------------------------- Numbers with uncertainties are represented through two different classes: 1. a class for independent random variables (:class:`Variable`, which inherits from :class:`UFloat`), 2. a class for functions that depend on independent variables (:class:`AffineScalarFunc`, aliased as :class:`UFloat`). Documentation for these classes is available in their Python docstring, which can for instance displayed through pydoc_. The factory function :func:`ufloat` creates variables and thus returns a :class:`Variable` object: >>> x = ufloat(1, 0.1) >>> type(x) :class:`Variable` objects can be used as if they were regular Python numbers (the summation, etc. of these objects is defined). Mathematical expressions involving numbers with uncertainties generally return :class:`AffineScalarFunc` objects, because they represent mathematical functions and not simple variables; these objects store all the variables they depend on: >>> type(umath.sin(x)) .. _automatic differentiation: http://en.wikipedia.org/wiki/Automatic_differentiation .. _pydoc: http://docs.python.org/library/pydoc.html .. _error propagation theory: http://en.wikipedia.org/wiki/Error_propagation .. _soerp: https://pypi.python.org/pypi/soerp .. _mcerp: https://pypi.python.org/pypi/mcerp uncertainties-3.1.7/doc/user_guide.rst000066400000000000000000000543441425362552000200470ustar00rootroot00000000000000.. index:: user guide .. _user guide: ========== User Guide ========== Basic setup =========== Basic mathematical operations involving numbers with uncertainties only require a simple import: >>> from uncertainties import ufloat The :func:`ufloat` function creates numbers with uncertainties. Existing calculation code can usually run with no or little modification and automatically produce results with uncertainties. .. The "import uncertainties" is put here because some examples requires uncertainties to have been imported (and not only ufloat). The :mod:`uncertainties` module contains other features, which can be made accessible through >>> import uncertainties The :mod:`uncertainties` package also contains sub-modules for :ref:`advanced mathematical functions `, and :doc:`arrays and matrices `. .. index:: pair: number with uncertainty; creation Creating numbers with uncertainties =================================== Numbers with uncertainties can be input either numerically, or through one of many string representations, so that files containing numbers with uncertainties can easily be parsed. Thus, x = 0.20±0.01 can be expressed in many convenient ways, including: >>> x = ufloat(0.20, 0.01) # x = 0.20+/-0.01 >>> from uncertainties import ufloat_fromstr >>> x = ufloat_fromstr("0.20+/-0.01") >>> x = ufloat_fromstr("(2+/-0.1)e-01") # Factored exponent >>> x = ufloat_fromstr("0.20(1)") # Short-hand notation >>> x = ufloat_fromstr("20(1)e-2") # Exponent notation >>> x = ufloat_fromstr(u"0.20±0.01") # Pretty-print form >>> x = ufloat_fromstr("0.20") # Automatic uncertainty of +/-1 on last digit Each number created this way is an **independent (random) variable** (for details, see the :ref:`Technical Guide `). More information can be obtained with ``pydoc uncertainties.ufloat`` and ``pydoc uncertainties.ufloat_fromstr`` ("20(1)×10\ :sup:`-2`\ " is also recognized, etc.). Basic math ========== Calculations can be performed directly, as with regular real numbers: >>> square = x**2 >>> print square 0.040+/-0.004 .. index:: mathematical operation; on a scalar, umath .. _advanced math operations: Mathematical operations ======================= Besides being able to apply basic mathematical operations to numbers with uncertainty, this package provides generalizations of **most of the functions from the standard** :mod:`math` **module**. These mathematical functions are found in the :mod:`uncertainties.umath` module: >>> from uncertainties.umath import * # Imports sin(), etc. >>> sin(x**2) 0.03998933418663417+/-0.003996800426643912 The list of available mathematical functions can be obtained with the ``pydoc uncertainties.umath`` command. .. index:: pair: testing (scalar); NaN NaN testing ----------- NaN values can appear in a number with uncertainty. Care must be taken with such values, as values like NaN±1, 1±NaN and NaN±NaN are by definition *not* NaN, which is a float. Testing whether a number with uncertainty has a **NaN nominal value** can be done with the provided function ``uncertainties.umath.isnan()``, which generalizes the standard ``math.isnan()``. Checking whether the *uncertainty* of ``x`` is NaN can be done directly with the standard function: ``math.isnan(x.std_dev)`` (or equivalently ``math.isnan(x.s)``). .. index:: arrays; simple use, matrices; simple use .. _simple_array_use: Arrays of numbers with uncertainties ==================================== It is possible to put numbers with uncertainties in NumPy_ arrays and matrices: >>> arr = numpy.array([ufloat(1, 0.01), ufloat(2, 0.1)]) >>> 2*arr [2.0+/-0.02 4.0+/-0.2] >>> print arr.sum() 3.00+/-0.10 Thus, usual operations on NumPy arrays can be performed transparently even when these arrays contain numbers with uncertainties. :doc:`More complex operations on NumPy arrays and matrices ` can be performed through the dedicated :mod:`uncertainties.unumpy` module. .. index:: correlations; detailed example Automatic correlations ====================== Correlations between variables are **automatically handled** whatever the number of variables involved, and whatever the complexity of the calculation. For example, when :data:`x` is the number with uncertainty defined above, >>> square = x**2 >>> print square 0.040+/-0.004 >>> square - x*x 0.0+/-0 >>> y = x*x + 1 >>> y - square 1.0+/-0 The last two printed results above have a zero uncertainty despite the fact that :data:`x`, :data:`y` and :data:`square` have a non-zero uncertainty: the calculated functions give the same value for all samples of the random variable :data:`x`. Thanks to the automatic correlation handling, calculations can be performed in as many steps as necessary, exactly as with simple floats. When various quantities are combined through mathematical operations, the result is calculated by taking into account all the correlations between the quantities involved. All of this is done completely **transparently**. Access to the uncertainty and to the nominal value ================================================== .. index:: pair: nominal value; scalar pair: uncertainty; scalar The nominal value and the uncertainty (standard deviation) can also be accessed independently: >>> print square 0.040+/-0.004 >>> print square.nominal_value 0.04 >>> print square.n # Abbreviation 0.04 >>> print square.std_dev 0.004 >>> print square.s # Abbreviation 0.004 Access to the individual sources of uncertainty =============================================== The various contributions to an uncertainty can be obtained through the :func:`error_components` method, which maps the **independent variables a quantity depends on** to their **contribution to the total uncertainty**. According to :ref:`linear error propagation theory ` (which is the method followed by :mod:`uncertainties`), the sum of the squares of these contributions is the squared uncertainty. The individual contributions to the uncertainty are more easily usable when the variables are **tagged**: >>> u = ufloat(1, 0.1, "u variable") # Tag >>> v = ufloat(10, 0.1, "v variable") >>> sum_value = u+2*v >>> sum_value 21.0+/-0.223606797749979 >>> for (var, error) in sum_value.error_components().items(): ... print "{}: {}".format(var.tag, error) ... u variable: 0.1 v variable: 0.2 The variance (i.e. squared uncertainty) of the result (:data:`sum_value`) is the quadratic sum of these independent uncertainties, as it should be (``0.1**2 + 0.2**2``). The tags *do not have to be distinct*. For instance, *multiple* random variables can be tagged as ``"systematic"``, and their contribution to the total uncertainty of :data:`result` can simply be obtained as: >>> syst_error = math.sqrt(sum( # Error from *all* systematic errors ... error**2 ... for (var, error) in result.error_components().items() ... if var.tag == "systematic")) The remaining contribution to the uncertainty is: >>> other_error = math.sqrt(result.std_dev**2 - syst_error**2) The variance of :data:`result` is in fact simply the quadratic sum of these two errors, since the variables from :func:`result.error_components` are independent. .. index:: comparison operators Comparison operators ==================== Comparison operators behave in a natural way: >>> print x 0.200+/-0.010 >>> y = x + 0.0001 >>> y 0.2001+/-0.01 >>> y > x True >>> y > 0 True One important concept to keep in mind is that :func:`ufloat` creates a random variable, so that two numbers with the same nominal value and standard deviation are generally different: >>> y = ufloat(1, 0.1) >>> z = ufloat(1, 0.1) >>> print y 1.00+/-0.10 >>> print z 1.00+/-0.10 >>> y == y True >>> y == z False In physical terms, two rods of the same nominal length and uncertainty on their length are generally of different sizes: :data:`y` is different from :data:`z`. More detailed information on the semantics of comparison operators for numbers with uncertainties can be found in the :ref:`Technical Guide `. .. index:: covariance matrix Covariance and correlation matrices =================================== Covariance matrix ----------------- The covariance matrix between various variables or calculated quantities can be simply obtained: >>> sum_value = u+2*v >>> cov_matrix = uncertainties.covariance_matrix([u, v, sum_value]) has value :: [[0.01, 0.0, 0.01], [0.0, 0.01, 0.02], [0.01, 0.02, 0.05]] In this matrix, the zero covariances indicate that :data:`u` and :data:`v` are independent from each other; the last column shows that :data:`sum_value` does depend on these variables. The :mod:`uncertainties` package keeps track at all times of all correlations between quantities (variables and functions): >>> sum_value - (u+2*v) 0.0+/-0 Correlation matrix ------------------ If the NumPy_ package is available, the correlation matrix can be obtained as well: >>> corr_matrix = uncertainties.correlation_matrix([u, v, sum_value]) >>> corr_matrix array([[ 1. , 0. , 0.4472136 ], [ 0. , 1. , 0.89442719], [ 0.4472136 , 0.89442719, 1. ]]) .. index:: correlations; correlated variables Correlated variables ==================== Reciprocally, **correlated variables can be created** transparently, provided that the NumPy_ package is available. Use of a covariance matrix -------------------------- Correlated variables can be obtained through the *covariance* matrix: >>> (u2, v2, sum2) = uncertainties.correlated_values([1, 10, 21], cov_matrix) creates three new variables with the listed nominal values, and the given covariance matrix: >>> sum_value 21.0+/-0.223606797749979 >>> sum2 21.0+/-0.223606797749979 >>> sum2 - (u2+2*v2) 0.0+/-3.83371856862256e-09 The theoretical value of the last expression is exactly zero, like for ``sum - (u+2*v)``, but numerical errors yield a small uncertainty (3e-9 is indeed very small compared to the uncertainty on :data:`sum2`: correlations should in fact cancel the uncertainty on :data:`sum2`). The covariance matrix is the desired one: >>> uncertainties.covariance_matrix([u2, v2, sum2]) reproduces the original covariance matrix :data:`cov_matrix` (up to rounding errors). Use of a correlation matrix --------------------------- Alternatively, correlated values can be defined through: - a sequence of nominal values and standard deviations, and - a *correlation* matrix between each variable of this sequence (the correlation matrix is the covariance matrix normalized with individual standard deviations; it has ones on its diagonal)—in the form of a NumPy array-like object, e.g. a list of lists, or a NumPy array. Example: >>> (u3, v3, sum3) = uncertainties.correlated_values_norm( ... [(1, 0.1), (10, 0.1), (21, 0.22360679774997899)], corr_matrix) >>> print u3 1.00+/-0.10 The three returned numbers with uncertainties have the correct uncertainties and correlations (:data:`corr_matrix` can be recovered through :func:`correlation_matrix`). .. index:: single: C code; wrapping single: Fortran code; wrapping single: wrapping (C, Fortran,…) functions .. index:: printing formatting Printing ======== .. Overview: Numbers with uncertainties can be printed conveniently: >>> print x 0.200+/-0.010 The resulting form can generally be parsed back with :func:`ufloat_fromstr` (except for the LaTeX form). .. Precision matching: The nominal value and the uncertainty always have the **same precision**: this makes it easier to compare them. Standard formats ---------------- .. Formatting method: More **control over the format** can be obtained (in Python 2.6+) through the usual :func:`format` method of strings: >>> print 'Result = {:10.2f}'.format(x) Result = 0.20+/- 0.01 (Python 2.6 requires ``'{0:10.2f}'`` instead, with the usual explicit index. In Python 2.5 and earlier versions, :func:`str.format` is not available, but one can use the :func:`format` method of numbers with uncertainties instead: ``'Result = %s' % x.format('10.2f')``.) .. Legacy formats and base syntax of the format specification: **All the float format specifications** are accepted, except those with the ``n`` format type. In particular, a fill character, an alignment option, a sign or zero option, a width, or the ``%`` format type are all supported. The usual **float formats with a precision** retain their original meaning (e.g. ``.2e`` uses two digits after the decimal point): code that works with floats produces similar results when running with numbers with uncertainties. Precision control ----------------- .. Precision control: It is possible to **control the number of significant digits of the uncertainty** by adding the precision modifier ``u`` after the precision (and before any valid float format type like ``f``, ``e``, the empty format type, etc.): >>> print '1 significant digit on the uncertainty: {:.1u}'.format(x) 1 significant digit on the uncertainty: 0.20+/-0.01 >>> print '3 significant digits on the uncertainty: {:.3u}'.format(x) 3 significant digits on the uncertainty: 0.2000+/-0.0100 >>> print '1 significant digit, exponent notation: {:.1ue}'.format(x) 1 significant digit, exponent notation: (2.0+/-0.1)e-01 >>> print '1 significant digit, percentage: {:.1u%}'.format(x) 1 significant digit, percentage: (20+/-1)% When :mod:`uncertainties` must **choose the number of significant digits on the uncertainty**, it uses the `Particle Data Group `_ rounding rules (these rules keep the number of digits small, which is convenient for reading numbers with uncertainties, and at the same time prevent the uncertainty from being displayed with too few digits): >>> print 'Automatic number of digits on the uncertainty: {}'.format(x) Automatic number of digits on the uncertainty: 0.200+/-0.010 >>> print x 0.200+/-0.010 Custom options -------------- .. Options: :mod:`uncertainties` provides even more flexibility through custom formatting options. They can be added at the end of the format string: - ``P`` for **pretty-printing**: >>> print '{:.2e}'.format(x) (2.00+/-0.10)e-01 >>> print u'{:.2eP}'.format(x) (2.00±0.10)×10⁻¹ The pretty-printing mode thus uses "±", "×" and superscript exponents. Note that the pretty-printing mode implies using **Unicode format strings** (``u'…'`` in Python 2, but simply ``'…'`` in Python 3). - ``S`` for the **shorthand notation**: >>> print '{:+.1uS}'.format(x) # Sign, 1 digit for the uncertainty, shorthand +0.20(1) In this notation, the digits in parentheses represent the uncertainty on the last digits of the nominal value. - ``L`` for a **LaTeX** output: >>> print x*1e7 (2.00+/-0.10)e+06 >>> print '{:L}'.format(x*1e7) # Automatic exponent form, LaTeX \left(2.00 \pm 0.10\right) \times 10^{6} - ``p`` is for requiring that parentheses be always printed around the …±… part (without enclosing any exponent or trailing "%", etc.). This can for instance be useful so as to explicitly factor physical units: >>> print '{:p} kg'.format(x) # Adds parentheses (0.200+/-0.010) kg >>> print("{:p} kg".format(x*1e7)) # No parentheses added (exponent) (2.00+/-0.10)e+06 kg These custom formatting options **can be combined** (when meaningful). Details ------- .. Common exponent: A **common exponent** is automatically calculated if an exponent is needed for the larger of the nominal value (in absolute value) and the uncertainty (the rule is the same as for floats). The exponent is generally **factored**, for increased legibility: >>> print x*1e7 (2.00+/-0.10)e+06 When a *format width* is used, the common exponent is not factored: >>> print 'Result = {:10.1e}'.format(x*1e-10) Result = 2.0e-11+/- 0.1e-11 (Using a (minimal) width of 1 is thus a way of forcing exponents to not be factored.) Thanks to this feature, each part (nominal value and standard deviation) is correctly aligned across multiple lines, while the relative magnitude of the error can still be readily estimated thanks to the common exponent. .. Special cases: An uncertainty which is *exactly* **zero** is always formatted as an integer: >>> print ufloat(3.1415, 0) 3.1415+/-0 >>> print ufloat(3.1415e10, 0) (3.1415+/-0)e+10 >>> print ufloat(3.1415, 0.0005) 3.1415+/-0.0005 >>> print '{:.2f}'.format(ufloat(3.14, 0.001)) 3.14+/-0.00 >>> print '{:.2f}'.format(ufloat(3.14, 0.00)) 3.14+/-0 **All the digits** of a number with uncertainty are given in its representation: >>> y = ufloat(1.23456789012345, 0.123456789) >>> print y 1.23+/-0.12 >>> print repr(y) 1.23456789012345+/-0.123456789 >>> y 1.23456789012345+/-0.123456789 **More information** on formatting can be obtained with ``pydoc uncertainties.UFloat.__format__`` (customization of the LaTeX output, etc.). Global formatting ----------------- It is sometimes useful to have a **consistent formatting** across multiple parts of a program. Python's `string.Formatter class `_ allows one to do just that. Here is how it can be used to consistently use the shorthand notation for numbers with uncertainties: .. code-block:: python class ShorthandFormatter(string.Formatter): def format_field(self, value, format_spec): if isinstance(value, uncertainties.UFloat): return value.format(format_spec+'S') # Shorthand option added # Special formatting for other types can be added here (floats, etc.) else: # Usual formatting: return super(ShorthandFormatter, self).format_field( value, format_spec) frmtr = ShorthandFormatter() print frmtr.format("Result = {0:.1u}", x) # 1-digit uncertainty prints with the shorthand notation: ``Result = 0.20(1)``. Customizing the pretty-print and LaTeX outputs ---------------------------------------------- The pretty print and LaTeX outputs themselves can be customized. For example, the pretty-print representation of numbers with uncertainty can display multiplication with a centered dot (⋅) instead of the default symbol (×), like in ``(2.00±0.10)⋅10⁻¹``; this is easily done through the global setting ``uncertainties.core.MULT_SYMBOLS["pretty-print"] = "⋅"``. Beyond this multiplication symbol, the "±" symbol, the parentheses and the exponent representations can also be customized globally. The details can be found in the documentation of :func:`uncertainties.core.format_num`. Making custom functions accept numbers with uncertainties ========================================================= This package allows **code which is not meant to be used with numbers with uncertainties to handle them anyway**. This is for instance useful when calling external functions (which are out of the user's control), including functions written in C or Fortran. Similarly, **functions that do not have a simple analytical form** can be automatically wrapped so as to also work with arguments that contain uncertainties. It is thus possible to take a function :func:`f` *that returns a single float*, and to automatically generalize it so that it also works with numbers with uncertainties: >>> wrapped_f = uncertainties.wrap(f) The new function :func:`wrapped_f` (optionally) *accepts a number with uncertainty* in place of any float *argument* of :func:`f` (note that floats contained instead *inside* arguments of :func:`f`, like in a list or a NumPy array, *cannot* be replaced by numbers with uncertainties). :func:`wrapped_f` returns the same values as :func:`f`, but with uncertainties. With a simple wrapping call like above, uncertainties in the function result are automatically calculated numerically. **Analytical uncertainty calculations can be performed** if derivatives are provided to :func:`wrap`. More details are available in the documentation string of :func:`wrap` (accessible through the ``pydoc`` command, or Python's :func:`help` shell function). Miscellaneous utilities ======================= .. index:: standard deviation; on the fly modification It is sometimes useful to modify the error on certain parameters so as to study its impact on a final result. With this package, the **uncertainty of a variable can be changed** on the fly: >>> sum_value = u+2*v >>> sum_value 21.0+/-0.223606797749979 >>> prev_uncert = u.std_dev >>> u.std_dev = 10 >>> sum_value 21.0+/-10.00199980003999 >>> u.std_dev = prev_uncert The relevant concept is that :data:`sum_value` does depend on the variables :data:`u` and :data:`v`: the :mod:`uncertainties` package keeps track of this fact, as detailed in the :ref:`Technical Guide `, and uncertainties can thus be updated at any time. .. index:: pair: nominal value; uniform access (scalar) pair: uncertainty; uniform access (scalar) pair: standard deviation; uniform access (scalar) When manipulating ensembles of numbers, *some* of which contain uncertainties while others are simple floats, it can be useful to access the **nominal value and uncertainty of all numbers in a uniform manner**. This is what the :func:`nominal_value` and :func:`std_dev` functions do: >>> print uncertainties.nominal_value(x) 0.2 >>> print uncertainties.std_dev(x) 0.01 >>> uncertainties.nominal_value(3) 3 >>> uncertainties.std_dev(3) 0.0 Finally, a utility method is provided that directly yields the `standard score `_ (number of standard deviations) between a number and a result with uncertainty: with :data:`x` equal to 0.20±0.01, >>> x.std_score(0.17) -3.0 .. index:: derivatives .. _derivatives: Derivatives =========== Since the application of :ref:`linear error propagation theory ` involves the calculation of **derivatives**, this package automatically performs such calculations; users can thus easily get the derivative of an expression with respect to any of its variables: >>> u = ufloat(1, 0.1) >>> v = ufloat(10, 0.1) >>> sum_value = u+2*v >>> sum_value.derivatives[u] 1.0 >>> sum_value.derivatives[v] 2.0 These values are obtained with a :ref:`fast differentiation algorithm `. Additional information ====================== The capabilities of the :mod:`uncertainties` package in terms of array handling are detailed in :doc:`numpy_guide`. Details about the theory behind this package and implementation information are given in the :doc:`tech_guide`. .. _NumPy: http://numpy.scipy.org/ .. |minus2html| raw:: html -2 uncertainties-3.1.7/setup.py000077500000000000000000000374041425362552000161300ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # !! This program must run with all version of Python since 2.3 included. import os import sys # setuptools has python_requires, but distutils doesn't, so we test the # Python version manually: min_version = (2, 7) error_msg = ("Sorry, this package is for Python %d.%d and higher only." % min_version) try: if sys.version_info < min_version: sys.exit(error_msg) except AttributeError: # sys.version_info was introduced in Python 2.0 sys.exit(error_msg) # Common options for distutils/setuptools's setup(): setup_options = dict( name='uncertainties', version='3.1.7', author='Eric O. LEBIGOT (EOL)', author_email='eric.lebigot@normalesup.org', url='http://uncertainties-python-package.readthedocs.io/', license='Revised BSD License', description=('Transparent calculations with uncertainties on the' ' quantities involved (aka error propagation);' ' fast calculation of derivatives'), long_description='''\ Overview ======== ``uncertainties`` allows **calculations** such as (2 +/- 0.1)*2 = 4 +/- 0.2 to be **performed transparently**. Much more complex mathematical expressions involving numbers with uncertainties can also be evaluated directly. The ``uncertainties`` package **takes the pain and complexity out** of uncertainty calculations. **Detailed information** about this package can be found on its `main website`_. Basic examples ============== .. code-block:: python >>> from uncertainties import ufloat >>> x = ufloat(2, 0.25) >>> x 2.0+/-0.25 >>> square = x**2 # Transparent calculations >>> square 4.0+/-1.0 >>> square.nominal_value 4.0 >>> square.std_dev # Standard deviation 1.0 >>> square - x*x 0.0 # Exactly 0: correlations taken into account >>> from uncertainties.umath import * # sin(), etc. >>> sin(1+x**2) -0.95892427466313845+/-0.2836621854632263 >>> print (2*x+1000).derivatives[x] # Automatic calculation of derivatives 2.0 >>> from uncertainties import unumpy # Array manipulation >>> random_vars = unumpy.uarray([1, 2], [0.1, 0.2]) >>> print random_vars [1.0+/-0.1 2.0+/-0.2] >>> print random_vars.mean() 1.50+/-0.11 >>> print unumpy.cos(random_vars) [0.540302305868+/-0.0841470984808 -0.416146836547+/-0.181859485365] Main features ============= - **Transparent calculations with uncertainties**: **no or little modification of existing code** is needed. Similarly, the Python_ (or IPython_) shell can be used as **a powerful calculator** that handles quantities with uncertainties (``print`` statements are optional, which is convenient). - **Correlations** between expressions are correctly taken into account. Thus, ``x-x`` is exactly zero, for instance (most implementations found on the web yield a non-zero uncertainty for ``x-x``, which is incorrect). - **Almost all mathematical operations** are supported, including most functions from the standard math_ module (sin,...). Comparison operators (``>``, ``==``, etc.) are supported too. - Many **fast operations on arrays and matrices** of numbers with uncertainties are supported. - **Extensive support for printing** numbers with uncertainties (including LaTeX support and pretty-printing). - Most uncertainty calculations are performed **analytically**. - This module also gives access to the **derivatives** of any mathematical expression (they are used by error propagation theory, and are thus automatically calculated by this module). Installation or upgrade ======================= Installation instructions are available on the `main web site `_ for this package. Contact ======= Please send **feature requests, bug reports, or feedback** to `Eric O. LEBIGOT (EOL)`_. Version history =============== Main changes: - 3.1.6: The pretty-print and LaTeX format can now be customized. - 3.1.5: Added a "p" formatting option, that makes sure that there are always parentheses around the … ± … part of printed numbers. - 3.1.4: Python 2.7+ is now required. - 3.1.2: Fix for NumPy 1.17 and ``unumpy.ulinalg.pinv()``. - 3.1: Variables built through a correlation or covariance matrix, and that have uncertainties that span many orders of magnitude are now calculated more accurately (improved ``correlated_values()`` and ``correlated_values_norm()`` functions). - 3.0: Massive speedup for some operations involving large numbers of numbers with uncertainty, like ``sum(ufloat(1, 1) for _ in xrange(100000))`` (this is about 5,000 times faster than before). - 2.4.8: Friendlier completions in Python shells, etc.: internal functions should not appear anymore (for the user modules: ``uncertainties``, ``uncertainties.umath`` and ``uncertainties.unumpy``). Parsing the shorthand notation (e.g. ``3.1(2)``) now works with infinite values (e.g. ``-inf(inf)``); this mirrors the ability to print such numbers with uncertainty. The Particle Data Group rounding rule is applied in more cases (e.g. printing 724.2±26.2 now gives ``724±26``). The shorthand+LaTeX formatting of numbers with an infinite nominal value is fixed. ``uncertainties.unumpy.matrix`` now uses ``.std_devs`` instead of ``.std_devs()``, for consistency with floats with uncertainty (automatic conversion of code added to ``uncertainties.1to2``). - 2.4.7: String formatting now works for ``(-)inf+/-...`` numbers. - 2.4.5: String formatting now works for ``NaN+/-...`` numbers. - 2.4.4: The documentation license now allows its commercial use. - 2.4.2: `NumPy 1.8 compatibility `_. - 2.4.1: In ``uncertainties.umath``, functions ``ceil()``, ``floor()``, ``isinf()``, ``isnan()`` and ``trunc()`` now return values of the same type as the corresponding ``math`` module function (instead of generally returning a value with a zero uncertainty ``...+/-0``). - 2.4: Extensive support for the formatting_ of numbers with uncertainties. A zero uncertainty is now explicitly displayed as the integer 0. The new formats are generally understood by ``ufloat_fromstr()``. Abbreviations for the nominal value (``n``) and the standard deviation (``s``) are now available. - 2.3.6: Full support for limit cases of the power operator ``umath.pow()``. - 2.3.5: Uncertainties and derivatives can now be NaN (not-a-number). Full support for numbers with a zero uncertainty (``sqrt(ufloat(0, 0))`` now works). Full support for limit cases of the power operator (``x**y``). - 2.3: Functions wrapped so that they accept numbers with uncertainties instead of floats now have full keyword arguments support (improved ``wrap()`` function). Incompatible change: ``wrap(..., None)`` should be replaced by ``wrap(...)`` or ``wrap(..., [])``. - 2.2: Creating arrays and matrices of numbers with uncertainties with ``uarray()`` and ``umatrix()`` now requires two simple arguments (nominal values and standard deviations) instead of a tuple argument. This is consistent with the new, simpler ``ufloat()`` interface. The previous usage will be supported for some time. Users are encouraged to update their code, for instance through the newly provided `code updater`_, which in addition now automatically converts ``.set_std_dev(v)`` to ``.std_dev = v``. - 2.1: Numbers with uncertainties are now created more directly like ``ufloat(3, 0.1)``, ``ufloat(3, 0.1, "pi")``, ``ufloat_fromstr("3.0(1)")``, or ``ufloat_fromstr("3.0(1)", "pi")``. The previous ``ufloat((3, 0.1))`` and ``ufloat("3.0(1)")`` forms will be supported for some time. Users are encouraged to update their code, for instance through the newly provided `code updater`_. - 2.0: The standard deviation is now obtained more directly without an explicit call (``x.std_dev`` instead of ``x.std_dev()``). ``x.std_dev()`` will be supported for some time. Users are encouraged to update their code. The standard deviation of a variable can now be directly updated with ``x.std_dev = 0.1``. As a consequence, ``x.set_std_dev()`` is deprecated. - 1.9.1: Support added for pickling subclasses of ``UFloat`` (= ``Variable``). - 1.9: Added functions for handling correlation matrices: ``correlation_matrix()`` and ``correlated_values_norm()``. (These new functions mirror the covariance-matrix based ``covariance_matrix()`` and ``correlated_values()``.) ``UFloat.position_in_sigmas()`` is now named ``UFloat.std_score()``, so as to follow the common naming convention (`standard score `_). Obsolete functions were removed (from the main module: ``NumberWithUncert``, ``num_with_uncert``, ``array_u``, ``nominal_values``, ``std_devs``). - 1.8: Compatibility with Python 3.2 added. - 1.7.2: Compatibility with Python 2.3, Python 2.4, Jython 2.5.1 and Jython 2.5.2 added. - 1.7.1: New semantics: ``ufloat("12.3(78)")`` now represents 12.3+/-7.8 instead of 12.3+/-78. - 1.7: ``ufloat()`` now raises ValueError instead of a generic Exception, when given an incorrect string representation, like ``float()`` does. - 1.6: Testing whether an object is a number with uncertainty should now be done with ``isinstance(..., UFloat)``. ``AffineScalarFunc`` is not imported by ``from uncertainties import *`` anymore, but its new alias ``UFloat`` is. - 1.5.5: The first possible license is now the Revised BSD License instead of GPLv2, which makes it easier to include this package in other projects. - 1.5.4.2: Added ``umath.modf()`` and ``umath.frexp()``. - 1.5.4: ``ufloat`` does not accept a single number (nominal value) anymore. This removes some potential confusion about ``ufloat(1.1)`` (zero uncertainty) being different from ``ufloat("1.1")`` (uncertainty of 1 on the last digit). - 1.5.2: ``float_u``, ``array_u`` and ``matrix_u`` renamed ``ufloat``, ``uarray`` and ``umatrix``, for ease of typing. - 1.5: Added functions ``nominal_value`` and ``std_dev``, and modules ``unumpy`` (additional support for NumPy_ arrays and matrices) and ``unumpy.ulinalg`` (generalization of some functions from ``numpy.linalg``). Memory footprint of arrays of numbers with uncertainties divided by 3. Function ``array_u`` is 5 times faster. Main function ``num_with_uncert`` renamed ``float_u``, for consistency with ``unumpy.array_u`` and ``unumpy.matrix_u``, with the added benefit of a shorter name. - 1.4.5: Added support for the standard ``pickle`` module. - 1.4.2: Added support for the standard ``copy`` module. - 1.4: Added utilities for manipulating NumPy_ arrays of numbers with uncertainties (``array_u``, ``nominal_values`` and ``std_devs``). - 1.3: Numbers with uncertainties are now constructed with ``num_with_uncert()``, which replaces ``NumberWithUncert()``. This simplifies the class hierarchy by removing the ``NumberWithUncert`` class. - 1.2.5: Numbers with uncertainties can now be entered as ``NumberWithUncert("1.23+/-0.45")`` too. - 1.2.3: ``log(x, base)`` is now supported by ``umath.log()``, in addition to ``log(x)``. - 1.2.2: Values with uncertainties are now output like 3+/-1, in order to avoid confusing 3+-1 with 3+(-1). - 1.2: A new function, ``wrap()``, is exposed, which allows non-Python functions (e.g. Fortran or C used through a module such as SciPy) to handle numbers with uncertainties. - 1.1: Mathematical functions (such as cosine, etc.) are in a new uncertainties.umath module; they do not override functions from the ``math`` module anymore. - 1.0.12: Main class (``Number_with_uncert``) renamed ``NumberWithUncert`` so as to follow `PEP 8`_. - 1.0.11: ``origin_value`` renamed more appropriately as ``nominal_value``. - 1.0.9: ``correlations()`` renamed more appropriately as ``covariance_matrix()``. .. _Python: http://docs.python.org/tutorial/interpreter.html .. _IPython: http://ipython.readthedocs.io/en/stable/ .. _NumPy: http://numpy.scipy.org/ .. _math: http://docs.python.org/library/math.html .. _PEP 8: http://www.python.org/dev/peps/pep-0008/ .. _error propagation theory: http://en.wikipedia.org/wiki/Propagation_of_uncertainty .. _Eric O. LEBIGOT (EOL): mailto:eric.lebigot@normalesup.org .. _PayPal: https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=4TK7KNDTEDT4S .. _main website: http://uncertainties-python-package.readthedocs.io/ .. _code updater: http://uncertainties-python-package.readthedocs.io/en/latest/index.html#migration-from-version-1-to-version-2 .. _formatting: http://uncertainties-python-package.readthedocs.io/en/latest/user_guide.html#printing''', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Other Audience', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', # Python 3.1 failed because of a problem with NumPy 1.6.1 (whereas # everything was fine with Python 3.2 and Python 2.7). 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: Implementation :: Jython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Education', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Physics', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities' ], keywords=[ 'error propagation', 'uncertainties', 'uncertainty calculations', 'standard deviation', 'derivatives', 'partial derivatives', 'differentiation' ], # Files are defined in MANIFEST (which is automatically created by # python setup.py sdist bdist_wheel): packages=[ 'uncertainties', 'uncertainties.unumpy', 'uncertainties.lib1to2', 'uncertainties.lib1to2.fixes' ], # The code runs with both Python 2 and Python 3: options={"bdist_wheel": {"universal": True}} ) # The best available setup() is used (some users do not have # setuptools): try: from setuptools import setup # Some setuptools-specific options can be added: addtl_setup_options = dict( project_urls={ 'Documentation': 'https://uncertainties-python-package.readthedocs.io/', 'Source': 'https://github.com/lebigot/uncertainties' }, install_requires=['future'], tests_require=['nose', 'numpy'], # Optional dependencies install using: # `easy_install uncertainties[optional]` extras_require={ 'optional': ['numpy'], 'docs': ['sphinx'], } ) # easy_install uncertainties[tests] option: addtl_setup_options['extras_require']['tests'] = ( addtl_setup_options['tests_require']) # easy_install uncertainties[all] option: all dependencies are # gathered addtl_setup_options['extras_require']['all'] = set( sum(addtl_setup_options['extras_require'].values(), [])) setup_options.update(addtl_setup_options) except ImportError: from distutils.core import setup # End of setup definition setup(**setup_options) uncertainties-3.1.7/uncertainties/000077500000000000000000000000001425362552000172605ustar00rootroot00000000000000uncertainties-3.1.7/uncertainties/1to2.py000077500000000000000000000006001425362552000204160ustar00rootroot00000000000000#!/usr/bin/env python ''' Fixes code like the 2to3 Python utility, but with fixers from the local fixes directory. (c) 2013 by Eric O. LEBIGOT (EOL). ''' # Code inspired by the 2to3 Python code. import sys if sys.version_info < (2, 6): sys.exit("Please run this program with Python 2.6+.") import lib2to3.main sys.exit(lib2to3.main.main('uncertainties.lib1to2.fixes')) uncertainties-3.1.7/uncertainties/__init__.py000066400000000000000000000222131425362552000213710ustar00rootroot00000000000000#!! Whenever the documentation below is updated, setup.py should be # checked for consistency. ''' Calculations with full error propagation for quantities with uncertainties. Derivatives can also be calculated. Web user guide: https://pythonhosted.org/uncertainties/. Example of possible calculation: (0.2 +/- 0.01)**2 = 0.04 +/- 0.004. Correlations between expressions are correctly taken into account (for instance, with x = 0.2+/-0.01, 2*x-x-x is exactly zero, as is y-x-x with y = 2*x). Examples: import uncertainties from uncertainties import ufloat from uncertainties.umath import * # sin(), etc. # Mathematical operations: x = ufloat(0.20, 0.01) # x = 0.20+/-0.01 x = ufloat_fromstr("0.20+/-0.01") # Other representation x = ufloat_fromstr("0.20(1)") # Other representation # Implicit uncertainty of +/-1 on the last digit: x = ufloat_fromstr("0.20") print x**2 # Square: prints "0.040+/-0.004" print sin(x**2) # Prints "0.0399...+/-0.00399..." print x.std_score(0.17) # Prints "-3.0": deviation of -3 sigmas # Access to the nominal value, and to the uncertainty: square = x**2 # Square print square # Prints "0.040+/-0.004" print square.nominal_value # Prints "0.04" print square.std_dev # Prints "0.004..." print square.derivatives[x] # Partial derivative: 0.4 (= 2*0.20) # Correlations: u = ufloat(1, 0.05, "u variable") # Tag v = ufloat(10, 0.1, "v variable") sum_value = u+v u.std_dev = 0.1 # Standard deviations can be updated on the fly print sum_value - u - v # Prints "0+/-0" (exact result) # List of all sources of error: print sum_value # Prints "11.00+/-0.14" for (var, error) in sum_value.error_components().iteritems(): print "%s: %f" % (var.tag, error) # Individual error components # Covariance matrices: cov_matrix = uncertainties.covariance_matrix([u, v, sum_value]) print cov_matrix # 3x3 matrix # Correlated variables can be constructed from a covariance matrix, if # NumPy is available: (u2, v2, sum2) = uncertainties.correlated_values([1, 10, 11], cov_matrix) print u2 # Value and uncertainty of u: correctly recovered (1.00+/-0.10) print uncertainties.covariance_matrix([u2, v2, sum2]) # == cov_matrix - The main function provided by this module is ufloat, which creates numbers with uncertainties (Variable objects). Variable objects can be used as if they were regular Python numbers. The main attributes and methods of Variable objects are defined in the documentation of the Variable class. - Valid operations on numbers with uncertainties include basic mathematical functions (addition, etc.). Most operations from the standard math module (sin, etc.) can be applied on numbers with uncertainties by using their generalization from the uncertainties.umath module: from uncertainties.umath import sin print sin(ufloat_fromstr("1+/-0.01")) # 0.841+/-0.005 print sin(1) # umath.sin() also works on floats, exactly like math.sin() Logical operations (>, ==, etc.) are also supported. Basic operations on NumPy arrays or matrices of numbers with uncertainties can be performed: 2*numpy.array([ufloat(1, 0.01), ufloat(2, 0.1)]) More complex operations on NumPy arrays can be performed through the dedicated uncertainties.unumpy sub-module (see its documentation). Calculations that are performed through non-Python code (Fortran, C, etc.) can handle numbers with uncertainties instead of floats through the provided wrap() wrapper: import uncertainties # wrapped_f is a version of f that can take arguments with # uncertainties, even if f only takes floats: wrapped_f = uncertainties.wrap(f) If some derivatives of the wrapped function f are known (analytically, or numerically), they can be given to wrap()--see the documentation for wrap(). - Utility functions are also provided: the covariance matrix between random variables can be calculated with covariance_matrix(), or used as input for the definition of correlated quantities (correlated_values() function--defined only if the NumPy module is available). - Mathematical expressions involving numbers with uncertainties generally return AffineScalarFunc objects, which also print as a value with uncertainty. Their most useful attributes and methods are described in the documentation for AffineScalarFunc. Note that Variable objects are also AffineScalarFunc objects. UFloat is an alias for AffineScalarFunc, provided as a convenience: testing whether a value carries an uncertainty handled by this module should be done with insinstance(my_value, UFloat). - Mathematically, numbers with uncertainties are, in this package, probability distributions. These probabilities are reduced to two numbers: a nominal value and an uncertainty. Thus, both variables (Variable objects) and the result of mathematical operations (AffineScalarFunc objects) contain these two values (respectively in their nominal_value and std_dev attributes). The uncertainty of a number with uncertainty is simply defined in this package as the standard deviation of the underlying probability distribution. The numbers with uncertainties manipulated by this package are assumed to have a probability distribution mostly contained around their nominal value, in an interval of about the size of their standard deviation. This should cover most practical cases. A good choice of nominal value for a number with uncertainty is thus the median of its probability distribution, the location of highest probability, or the average value. - When manipulating ensembles of numbers, some of which contain uncertainties, it can be useful to access the nominal value and uncertainty of all numbers in a uniform manner: x = ufloat_fromstr("3+/-0.1") print nominal_value(x) # Prints 3 print std_dev(x) # Prints 0.1 print nominal_value(3) # Prints 3: nominal_value works on floats print std_dev(3) # Prints 0: std_dev works on floats - Probability distributions (random variables and calculation results) are printed as: nominal value +/- standard deviation but this does not imply any property on the nominal value (beyond the fact that the nominal value is normally inside the region of high probability density), or that the probability distribution of the result is symmetrical (this is rarely strictly the case). - Linear approximations of functions (around the nominal values) are used for the calculation of the standard deviation of mathematical expressions with this package. The calculated standard deviations and nominal values are thus meaningful approximations as long as the functions involved have precise linear expansions in the region where the probability distribution of their variables is the largest. It is therefore important that uncertainties be small. Mathematically, this means that the linear term of functions around the nominal values of their variables should be much larger than the remaining higher-order terms over the region of significant probability. For instance, sin(0+/-0.01) yields a meaningful standard deviation since it is quite linear over 0+/-0.01. However, cos(0+/-0.01) yields an approximate standard deviation of 0 (because the cosine is not well approximated by a line around 0), which might not be precise enough for all applications. - Comparison operations (>, ==, etc.) on numbers with uncertainties have a pragmatic semantics, in this package: numbers with uncertainties can be used wherever Python numbers are used, most of the time with a result identical to the one that would be obtained with their nominal value only. However, since the objects defined in this module represent probability distributions and not pure numbers, comparison operator are interpreted in a specific way. The result of a comparison operation ("==", ">", etc.) is defined so as to be essentially consistent with the requirement that uncertainties be small: the value of a comparison operation is True only if the operation yields True for all infinitesimal variations of its random variables, except, possibly, for an infinitely small number of cases. Example: "x = 3.14; y = 3.14" is such that x == y but x = ufloat(3.14, 0.01) y = ufloat(3.14, 0.01) is not such that x == y, since x and y are independent random variables that almost never give the same value. However, x == x still holds. The boolean value (bool(x), "if x...") of a number with uncertainty x is the result of x != 0. - The uncertainties package is for Python 2.3 and above. - This package contains tests. They can be run either manually or automatically with the nose unit testing framework (nosetests). (c) 2009-2016 by Eric O. LEBIGOT (EOL) . Please send feature requests, bug reports, or feedback to this address. Please support future development by donating $10 or more through PayPal! This software is released under a dual license. (1) The BSD license. (2) Any other license, as long as it is obtained from the original author.''' from builtins import map from .core import * from .core import __all__ # For a correct help(uncertainties) # Numerical version: __version_info__ = (3, 1, 7) __version__ = '.'.join(map(str, __version_info__)) __author__ = 'Eric O. LEBIGOT (EOL) ' uncertainties-3.1.7/uncertainties/core.py000066400000000000000000003715001425362552000205700ustar00rootroot00000000000000# coding=utf-8 """ Main module for the uncertainties package, with internal functions. """ # The idea behind this module is to replace the result of mathematical # operations by a local approximation of the defining function. For # example, sin(0.2+/-0.01) becomes the affine function # (AffineScalarFunc object) whose nominal value is sin(0.2) and # whose variations are given by sin(0.2+delta) = 0.98...*delta. # Uncertainties can then be calculated by using this local linear # approximation of the original function. from __future__ import division # Many analytical derivatives depend on this from builtins import str, next, map, zip, range, object import math from math import sqrt, log, isnan, isinf # Optimization: no attribute look-up import re import sys if sys.version_info < (3,): from past.builtins import basestring else: # Avoid importing from past in Python 3 since it utilizes the builtin # 'imp' module, which is deprecated as of Python 3.4, see # https://docs.python.org/3/library/imp.html. The 2to3 tool replaces # basestring with str, so that's what we effectively do here as well: basestring = str try: from math import isinfinite # !! Python 3.2+ except ImportError: def isinfinite(x): return isinf(x) or isnan(x) import copy import warnings import itertools import inspect import numbers import collections # The following restricts the local function getargspec() to the common # features of inspect.getargspec() and inspect.getfullargspec(): if sys.version_info < (3,): # !! Could be removed when moving to Python 3 only from inspect import getargspec else: from inspect import getfullargspec as getargspec # Attributes that are always exported (some other attributes are # exported only if the NumPy module is available...): __all__ = [ # All sub-modules and packages are not imported by default, # in particular because NumPy might be unavailable. 'ufloat', # Main function: returns a number with uncertainty 'ufloat_fromstr', # Important function: returns a number with uncertainty # Uniform access to nominal values and standard deviations: 'nominal_value', 'std_dev', # Utility functions (more are exported if NumPy is present): 'covariance_matrix', # Class for testing whether an object is a number with # uncertainty. Not usually created by users (except through the # Variable subclass), but possibly manipulated by external code # ['derivatives()' method, etc.]. 'UFloat', # Wrapper for allowing non-pure-Python function to handle # quantitities with uncertainties: 'wrap' ] ############################################################################### def set_doc(doc_string): """ Decorator function that sets the docstring to the given text. It is useful for functions whose docstring is calculated (including string substitutions). """ def set_doc_string(func): func.__doc__ = doc_string return func return set_doc_string # Some types known to not depend on Variable objects are put in # CONSTANT_TYPES. The most common types can be put in front, as this # may slightly improve the execution speed. FLOAT_LIKE_TYPES = (numbers.Number,) CONSTANT_TYPES = FLOAT_LIKE_TYPES+(complex,) ############################################################################### # Utility for issuing deprecation warnings def deprecation(message): ''' Warns the user with the given message, by issuing a DeprecationWarning. ''' # stacklevel = 3 points to the original user call (not to the # function from this module that called deprecation()). # DeprecationWarning is ignored by default: not used. warnings.warn('Obsolete: %s Code can be automatically updated with' ' python -m uncertainties.1to2 -w ProgramDirectory.' % message, stacklevel=3) ############################################################################### ## Definitions that depend on the availability of NumPy: try: import numpy except ImportError: pass else: # NumPy numbers do not depend on Variable objects: FLOAT_LIKE_TYPES += (numpy.generic,) CONSTANT_TYPES += FLOAT_LIKE_TYPES[-1:] # Entering variables as a block of correlated values. Only available # if NumPy is installed. #! It would be possible to dispense with NumPy, but a routine should be # written for obtaining the eigenvectors of a symmetric matrix. See # for instance Numerical Recipes: (1) reduction to tri-diagonal # [Givens or Householder]; (2) QR / QL decomposition. def correlated_values(nom_values, covariance_mat, tags=None): """ Return numbers with uncertainties (AffineScalarFunc objects) that correctly reproduce the given covariance matrix, and have the given (float) values as their nominal value. The correlated_values_norm() function returns the same result, but takes a correlation matrix instead of a covariance matrix. The list of values and the covariance matrix must have the same length, and the matrix must be a square (symmetric) one. The numbers with uncertainties returned depend on newly created, independent variables (Variable objects). nom_values -- sequence with the nominal (real) values of the numbers with uncertainties to be returned. covariance_mat -- full covariance matrix of the returned numbers with uncertainties. For example, the first element of this matrix is the variance of the first number with uncertainty. This matrix must be a NumPy array-like (list of lists, NumPy array, etc.). tags -- if 'tags' is not None, it must list the tag of each new independent variable. """ # !!! It would in principle be possible to handle 0 variance # variables by first selecting the sub-matrix that does not contain # such variables (with the help of numpy.ix_()), and creating # them separately. std_devs = numpy.sqrt(numpy.diag(covariance_mat)) # For numerical stability reasons, we go through the correlation # matrix, because it is insensitive to any change of scale in the # quantities returned. However, care must be taken with 0 variance # variables: calculating the correlation matrix cannot be simply done # by dividing by standard deviations. We thus use specific # normalization values, with no null value: norm_vector = std_devs.copy() norm_vector[norm_vector==0] = 1 return correlated_values_norm( # !! The following zip() is a bit suboptimal: correlated_values() # separates back the nominal values and the standard deviations: list(zip(nom_values, std_devs)), covariance_mat/norm_vector/norm_vector[:,numpy.newaxis], tags) __all__.append('correlated_values') def correlated_values_norm(values_with_std_dev, correlation_mat, tags=None): ''' Return correlated values like correlated_values(), but takes instead as input: - nominal (float) values along with their standard deviation, and - a correlation matrix (i.e. a normalized covariance matrix). values_with_std_dev -- sequence of (nominal value, standard deviation) pairs. The returned, correlated values have these nominal values and standard deviations. correlation_mat -- correlation matrix between the given values, except that any value with a 0 standard deviation must have its correlations set to 0, with a diagonal element set to an arbitrary value (something close to 0-1 is recommended, for a better numerical precision). When no value has a 0 variance, this is the covariance matrix normalized by standard deviations, and thus a symmetric matrix with ones on its diagonal. This matrix must be an NumPy array-like (list of lists, NumPy array, etc.). tags -- like for correlated_values(). ''' # If no tags were given, we prepare tags for the newly created # variables: if tags is None: tags = (None,) * len(values_with_std_dev) (nominal_values, std_devs) = numpy.transpose(values_with_std_dev) # We diagonalize the correlation matrix instead of the # covariance matrix, because this is generally more stable # numerically. In fact, the covariance matrix can have # coefficients with arbitrary values, through changes of units # of its input variables. This creates numerical instabilities. # # The covariance matrix is diagonalized in order to define # the independent variables that model the given values: (variances, transform) = numpy.linalg.eigh(correlation_mat) # Numerical errors might make some variances negative: we set # them to zero: variances[variances < 0] = 0. # Creation of new, independent variables: # We use the fact that the eigenvectors in 'transform' are # special: 'transform' is unitary: its inverse is its transpose: variables = tuple( # The variables represent "pure" uncertainties: Variable(0, sqrt(variance), tag) for (variance, tag) in zip(variances, tags)) # The coordinates of each new uncertainty as a function of the # new variables must include the variable scale (standard deviation): transform *= std_devs[:, numpy.newaxis] # Representation of the initial correlated values: values_funcs = tuple( AffineScalarFunc( value, LinearCombination(dict(zip(variables, coords)))) for (coords, value) in zip(transform, nominal_values)) return values_funcs __all__.append('correlated_values_norm') ############################################################################### # Mathematical operations with local approximations (affine scalar # functions) class NotUpcast(Exception): 'Raised when an object cannot be converted to a number with uncertainty' def to_affine_scalar(x): """ Transforms x into a constant affine scalar function (AffineScalarFunc), unless it is already an AffineScalarFunc (in which case x is returned unchanged). Raises an exception unless x belongs to some specific classes of objects that are known not to depend on AffineScalarFunc objects (which then cannot be considered as constants). """ if isinstance(x, AffineScalarFunc): return x if isinstance(x, CONSTANT_TYPES): # No variable => no derivative: return AffineScalarFunc(x, LinearCombination({})) # Case of lists, etc. raise NotUpcast("%s cannot be converted to a number with" " uncertainty" % type(x)) # Step constant for numerical derivatives in # partial_derivative(). Value chosen to as to get better numerical # results: STEP_SIZE = sqrt(sys.float_info.epsilon) # !! It would be possible to split the partial derivative calculation # into two functions: one for positional arguments (case of integer # arg_ref) and one for keyword arguments (case of string # arg_ref). However, this would either duplicate the code for the # numerical differentiation, or require a call, which is probably more # expensive in time than the tests done here. def partial_derivative(f, arg_ref): """ Return a function that numerically calculates the partial derivative of function f with respect to its argument arg_ref. arg_ref -- describes which variable to use for the differentiation. If f is called with f(*args, **kwargs) arguments, an integer represents the index of an argument in args, and a string represents the name of an argument in kwargs. """ # Which set of function parameter contains the variable to be # changed? the positional or the optional keyword arguments? change_kwargs = isinstance(arg_ref, basestring) def partial_derivative_of_f(*args, **kwargs): """ Partial derivative, calculated with the (-epsilon, +epsilon) method, which is more precise than the (0, +epsilon) method. """ # args_with_var contains the arguments (either args or kwargs) # that contain the variable that must be shifted, as a mutable # object (because the variable contents will be modified): # The values in args need to be modified, for the # differentiation: it is converted to a list: if change_kwargs: args_with_var = kwargs else: args_with_var = list(args) # The step is relative to the parameter being varied, so that # shifting it does not suffer from finite precision limitations: step = STEP_SIZE*abs(args_with_var[arg_ref]) if not step: # Arbitrary, but "small" with respect to 1: step = STEP_SIZE args_with_var[arg_ref] += step if change_kwargs: shifted_f_plus = f(*args, **args_with_var) else: shifted_f_plus = f(*args_with_var, **kwargs) args_with_var[arg_ref] -= 2*step # Optimization: only 1 list copy if change_kwargs: shifted_f_minus = f(*args, **args_with_var) else: shifted_f_minus = f(*args_with_var, **kwargs) return (shifted_f_plus - shifted_f_minus)/2/step return partial_derivative_of_f class NumericalDerivatives(object): """ Convenient access to the partial derivatives of a function, calculated numerically. """ # This is not a list because the number of arguments of the # function is not known in advance, in general. def __init__(self, function): """ 'function' is the function whose derivatives can be computed. """ self._function = function def __getitem__(self, n): """ Return the n-th numerical derivative of the function. """ return partial_derivative(self._function, n) class IndexableIter(object): ''' Iterable whose values can also be accessed through indexing. The input iterable values are cached. Some attributes: iterable -- iterable used for returning the elements one by one. returned_elements -- list with the elements directly accessible. through indexing. Additional elements are obtained from self.iterable. none_converter -- function that takes an index and returns the value to be returned when None is obtained form the iterable (instead of None). ''' def __init__(self, iterable, none_converter=lambda index: None): ''' iterable -- iterable whose values will be returned. none_converter -- function applied to None returned values. The value that replaces None is none_converter(index), where index is the index of the element. ''' self.iterable = iterable self.returned_elements = [] self.none_converter = none_converter def __getitem__(self, index): returned_elements = self.returned_elements try: return returned_elements[index] except IndexError: # Element not yet cached for pos in range(len(returned_elements), index+1): value = next(self.iterable) if value is None: value = self.none_converter(pos) returned_elements.append(value) return returned_elements[index] def __str__(self): return '<%s: [%s...]>' % ( self.__class__.__name__, ', '.join(map(str, self.returned_elements))) def wrap(f, derivatives_args=[], derivatives_kwargs={}): """ Wraps a function f into a function that also accepts numbers with uncertainties (UFloat objects); the wrapped function returns the value of f with the correct uncertainty and correlations. The wrapped function is intended to be used as a drop-in replacement for the original function: they can be called in the exact same way, the only difference being that numbers with uncertainties can be given to the wrapped function where f accepts float arguments. Doing so may be necessary when function f cannot be expressed analytically (with uncertainties-compatible operators and functions like +, *, umath.sin(), etc.). f must return a float-like (i.e. a float, an int, etc., not a list, etc.), unless when called with no number with uncertainty. This is because the wrapped function generally returns numbers with uncertainties: they represent a probability distribution over the real numbers. If the wrapped function is called with no argument that has an uncertainty, the value of f is returned. Parameters: the derivatives_* parameters can be used for defining some of the partial derivatives of f. All the (non-None) derivatives must have the same signature as f. derivatives_args -- Iterable that, when iterated over, returns either derivatives (functions) or None. derivatives_args can in particular be a simple sequence (list or tuple) that gives the derivatives of the first positional parameters of f. Each function must be the partial derivative of f with respect to the corresponding positional parameters. These functions take the same arguments as f. The positional parameters of a function are usually positional-or-keyword parameters like in the call func(a, b=None). However, they also include var-positional parameters given through the func(a, b, *args) *args syntax. In the last example, derivatives_args can be an iterable that returns the derivative with respect to a, b and then to each optional argument in args. A value of None (instead of a function) obtained when iterating over derivatives_args is automatically replaced by the relevant numerical derivative. This derivative is not used if the corresponding argument is not a number with uncertainty. A None value can therefore be used for non-scalar arguments of f (like string arguments). If the derivatives_args iterable yields fewer derivatives than needed, wrap() automatically sets the remaining unspecified derivatives to None (i.e. to the automatic numerical calculation of derivatives). An indefinite number of derivatives can be specified by having derivatives_args be an infinite iterator; this can for instance be used for specifying the derivatives of functions with a undefined number of argument (like sum(), whose partial derivatives all return 1). derivatives_kwargs -- Dictionary that maps keyword parameters to their derivatives, or None (as in derivatives_args). Keyword parameters are defined as being those of kwargs when f has a signature of the form f(..., **kwargs). In Python 3, these keyword parameters also include keyword-only parameters. Non-mapped keyword parameters are replaced automatically by None: the wrapped function will use, if necessary, numerical differentiation for these parameters (as with derivatives_args). Note that this dictionary only maps keyword *parameters* from the *signature* of f. The way the wrapped function is called is immaterial: for example, if f has signature f(a, b=None), then derivatives_kwargs should be the empty dictionary, even if the wrapped f can be called a wrapped_f(a=123, b=42). Example (for illustration purposes only, as uncertainties.umath.sin() runs faster than the examples that follow): wrap(math.sin) is a sine function that can be applied to numbers with uncertainties. Its derivative will be calculated numerically. wrap(math.sin, [None]) would have produced the same result. wrap(math.sin, [math.cos]) is the same function, but with an analytically defined derivative. Numerically calculated derivatives are meaningless when the function is not differentiable (e.g., math.hypot(x, y) in (x, y) = (0, 0), and sqrt(x) in x = 0). The corresponding uncertainties are either meaningless (case of hypot) or raise an exception when calculated (case of sqrt). In such cases, it is recommended (but not mandatory) to supply instead a derivative function that returns NaN where the function is not differentiable. This function can still numerically calculate the derivative where defined, for instance by using the uncertainties.core.partial_derivative() function. The correctness of the supplied analytical derivatives an be tested by setting them to None instead and comparing the analytical and the numerical differentiation results. Note on efficiency: the wrapped function assumes that f cannot accept numbers with uncertainties as arguments. If f actually does handle some arguments even when they have an uncertainty, the wrapped function ignores this fact, which might lead to a performance hit: wrapping a function that actually accepts numbers with uncertainty is likely to make it slower. """ derivatives_args_index = IndexableIter( # Automatic addition of numerical derivatives in case the # supplied derivatives_args is shorter than the number of # arguments in *args: itertools.chain(derivatives_args, itertools.repeat(None))) # Derivatives for keyword arguments (includes var-keyword # parameters **kwargs, but also var-or-keyword parameters, and # keyword-only parameters (Python 3): derivatives_all_kwargs = {} for (name, derivative) in derivatives_kwargs.items(): # Optimization: None keyword-argument derivatives are converted # right away to derivatives (instead of doing this every time a # None derivative is encountered when calculating derivatives): if derivative is None: derivatives_all_kwargs[name] = partial_derivative(f, name) else: derivatives_all_kwargs[name] = derivative # When the wrapped function is called with keyword arguments that # map to positional-or-keyword parameters, their derivative is # looked for in derivatives_all_kwargs. We define these # additional derivatives: try: argspec = getargspec(f) except TypeError: # Some functions do not provide meta-data about their # arguments (see PEP 362). One cannot use keyword arguments # for positional-or-keyword parameters with them: nothing has # to be done: pass else: # With Python 3, there is no need to handle keyword-only # arguments (and therefore to use inspect.getfullargspec()) # because they are already handled by derivatives_kwargs. for (index, name) in enumerate(argspec.args): # The following test handles the case of # positional-or-keyword parameter for which automatic # numerical differentiation is used: when the wrapped # function is called with a keyword argument for this # parameter, the numerical derivative must be calculated # with respect to the parameter name. In the other case, # where the wrapped function is called with a positional # argument, the derivative with respect to its index must # be used: derivative = derivatives_args_index[index] if derivative is None: derivatives_all_kwargs[name] = partial_derivative(f, name) else: derivatives_all_kwargs[name] = derivative # Optimization: None derivatives for the positional arguments are # converted to the corresponding numerical differentiation # function (instead of doing this over and over later every time a # None derivative is found): none_converter = lambda index: partial_derivative(f, index) for (index, derivative) in enumerate( derivatives_args_index.returned_elements): if derivative is None: derivatives_args_index.returned_elements[index] = ( none_converter(index)) # Future None values are also automatically converted: derivatives_args_index.none_converter = none_converter ## Wrapped function: #! Setting the doc string after "def f_with...()" does not # seem to work. We define it explicitly: @set_doc("""\ Version of %s(...) that returns an affine approximation (AffineScalarFunc object), if its result depends on variables (Variable objects). Otherwise, returns a simple constant (when applied to constant arguments). Warning: arguments of the function that are not AffineScalarFunc objects must not depend on uncertainties.Variable objects in any way. Otherwise, the dependence of the result in uncertainties.Variable objects will be incorrect. Original documentation: %s""" % (f.__name__, f.__doc__)) def f_with_affine_output(*args, **kwargs): ######################################## # The involved random variables must first be gathered, so # that they can be independently updated. # The arguments that contain an uncertainty (AffineScalarFunc # objects) are gathered, as positions or names; they will be # replaced by their nominal value in order to calculate # the necessary derivatives of f. pos_w_uncert = [index for (index, value) in enumerate(args) if isinstance(value, AffineScalarFunc)] names_w_uncert = [key for (key, value) in kwargs.items() if isinstance(value, AffineScalarFunc)] ######################################## # Value of f() at the nominal value of the arguments with # uncertainty: # The usual behavior of f() is kept, if no number with # uncertainty is provided: if (not pos_w_uncert) and (not names_w_uncert): return f(*args, **kwargs) ### Nominal values of the (scalar) arguments: # !! Possible optimization: If pos_w_uncert is empty, there # is actually no need to create a mutable version of args and # one could do args_values = args. However, the wrapped # function is typically called with numbers with uncertainties # as positional arguments (i.e., pos_w_uncert is not emtpy), # so this "optimization" is not implemented here. ## Positional arguments: args_values = list(args) # Now mutable: modified below # Arguments with an uncertainty are converted to their nominal # value: for index in pos_w_uncert: args_values[index] = args[index].nominal_value ## Keyword arguments: # For efficiency reasons, kwargs is not copied. Instead, its # values with uncertainty are modified: # The original values with uncertainties are needed: they are # saved in the following dictionary (which only contains # values with uncertainty): kwargs_uncert_values = {} for name in names_w_uncert: value_with_uncert = kwargs[name] # Saving for future use: kwargs_uncert_values[name] = value_with_uncert # The original dictionary is modified (for efficiency reasons): kwargs[name] = value_with_uncert.nominal_value f_nominal_value = f(*args_values, **kwargs) # If the value is not a float, then this code cannot provide # the result, as it returns a UFloat, which represents a # random real variable. This happens for instance when # ufloat()*numpy.array() is calculated: the # AffineScalarFunc.__mul__ operator, obtained through wrap(), # returns a NumPy array, not a float: if not isinstance(f_nominal_value, FLOAT_LIKE_TYPES): return NotImplemented ######################################## # Calculation of the linear part of the function value, # defined by (coefficient, argument) pairs, where 'argument' # is an AffineScalarFunc (for all AffineScalarFunc found as # argument of f): linear_part = [] for pos in pos_w_uncert: linear_part.append(( # Coefficient: derivatives_args_index[pos](*args_values, **kwargs), # Linear part of the AffineScalarFunc expression: args[pos]._linear_part)) for name in names_w_uncert: # Optimization: caching of the automatic numerical # derivatives for keyword arguments that are # discovered. This gives a speedup when the original # function is called repeatedly with the same keyword # arguments: derivative = derivatives_all_kwargs.setdefault( name, # Derivative never needed before: partial_derivative(f, name)) linear_part.append(( # Coefficient: derivative(*args_values, **kwargs), # Linear part of the AffineScalarFunc expression: kwargs_uncert_values[name]._linear_part)) # The function now returns the necessary linear approximation # to the function: return AffineScalarFunc( f_nominal_value, LinearCombination(linear_part)) f_with_affine_output = set_doc("""\ Version of %s(...) that returns an affine approximation (AffineScalarFunc object), if its result depends on variables (Variable objects). Otherwise, returns a simple constant (when applied to constant arguments). Warning: arguments of the function that are not AffineScalarFunc objects must not depend on uncertainties.Variable objects in any way. Otherwise, the dependence of the result in uncertainties.Variable objects will be incorrect. Original documentation: %s""" % (f.__name__, f.__doc__))(f_with_affine_output) # It is easier to work with f_with_affine_output, which represents # a wrapped version of 'f', when it bears the same name as 'f': # ! __name__ is read-only, in Python 2.3: f_with_affine_output.name = f.__name__ return f_with_affine_output def force_aff_func_args(func): """ Takes an operator op(x, y) and wraps it. The constructed operator returns func(x, to_affine_scalar(y)) if y can be upcast with to_affine_scalar(); otherwise, it returns NotImplemented. Thus, func() is only called on two AffineScalarFunc objects, if its first argument is an AffineScalarFunc. """ def op_on_upcast_args(x, y): """ Return %s(self, to_affine_scalar(y)) if y can be upcast through to_affine_scalar. Otherwise returns NotImplemented. """ % func.__name__ try: y_with_uncert = to_affine_scalar(y) except NotUpcast: # This module does not know how to handle the comparison: # (example: y is a NumPy array, in which case the NumPy # array will decide that func() should be applied # element-wise between x and all the elements of y): return NotImplemented else: return func(x, y_with_uncert) return op_on_upcast_args ######################################## # Definition of boolean operators, that assume that self and # y_with_uncert are AffineScalarFunc. # The fact that uncertainties must be small is used, here: the # comparison functions are supposed to be constant for most values of # the random variables. # Even though uncertainties are supposed to be small, comparisons # between 3+/-0.1 and 3.0 are handled correctly (even though x == 3.0 is # not a constant function in the 3+/-0.1 interval). The comparison # between x and x is handled too, when x has an uncertainty. In fact, # as explained in the main documentation, it is possible to give a # useful meaning to the comparison operators, in these cases. def eq_on_aff_funcs(self, y_with_uncert): """ __eq__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ difference = self - y_with_uncert # Only an exact zero difference means that self and y are # equal numerically: return not(difference._nominal_value or difference.std_dev) def ne_on_aff_funcs(self, y_with_uncert): """ __ne__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return not eq_on_aff_funcs(self, y_with_uncert) def gt_on_aff_funcs(self, y_with_uncert): """ __gt__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return self._nominal_value > y_with_uncert._nominal_value def ge_on_aff_funcs(self, y_with_uncert): """ __ge__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return (gt_on_aff_funcs(self, y_with_uncert) or eq_on_aff_funcs(self, y_with_uncert)) def lt_on_aff_funcs(self, y_with_uncert): """ __lt__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return self._nominal_value < y_with_uncert._nominal_value def le_on_aff_funcs(self, y_with_uncert): """ __le__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return (lt_on_aff_funcs(self, y_with_uncert) or eq_on_aff_funcs(self, y_with_uncert)) ######################################## def first_digit(value): ''' Return the first digit position of the given value, as an integer. 0 is the digit just before the decimal point. Digits to the right of the decimal point have a negative position. Return 0 for a null value. ''' try: return int(math.floor(math.log10(abs(value)))) except ValueError: # Case of value == 0 return 0 def PDG_precision(std_dev): ''' Return the number of significant digits to be used for the given standard deviation, according to the rounding rules of the Particle Data Group (2010) (http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf). Also returns the effective standard deviation to be used for display. ''' exponent = first_digit(std_dev) # The first three digits are what matters: we get them as an # integer number in [100; 999). # # In order to prevent underflow or overflow when calculating # 10**exponent, the exponent is slightly modified first and a # factor to be applied after "removing" the new exponent is # defined. # # Furthermore, 10**(-exponent) is not used because the exponent # range for very small and very big floats is generally different. if exponent >= 0: # The -2 here means "take two additional digits": (exponent, factor) = (exponent-2, 1) else: (exponent, factor) = (exponent+1, 1000) digits = int(std_dev/10.**exponent*factor) # int rounds towards zero # Rules: if digits <= 354: return (2, std_dev) elif digits <= 949: return (1, std_dev) else: # The parentheses matter, for very small or very large # std_dev: return (2, 10.**exponent*(1000/factor)) # Definition of a basic (format specification only, no full-feature # format string) formatting function that works whatever the version # of Python. This function exists so that the more capable format() is # used instead of the % formatting operator, if available: robust_format = format class CallableStdDev(float): ''' Class for standard deviation results, which used to be callable. Provided for compatibility with old code. Issues an obsolescence warning upon call. ''' # This class is a float. It must be set to the standard deviation # upon construction. def __call__ (self): deprecation('the std_dev attribute should not be called' ' anymore: use .std_dev instead of .std_dev().') return self # Exponent letter: the keys are the possible main_fmt_type values of # format_num(): EXP_LETTERS = {'f': 'e', 'F': 'E'} def robust_align(orig_str, fill_char, align_option, width): ''' Aligns the given string with the given fill character. orig_str -- string to be aligned (str or unicode object). fill_char -- if empty, space is used. align_option -- as accepted by format(). wdith -- string that contains the width. ''' # print "ALIGNING", repr(orig_str), "WITH", fill_char+align_option, # print "WIDTH", width return format(orig_str, fill_char+align_option+width) # Maps some Unicode code points ("-", "+", and digits) to their # superscript version: TO_SUPERSCRIPT = { 0x2b: u'⁺', 0x2d: u'⁻', 0x30: u'⁰', 0x31: u'¹', 0x32: u'²', 0x33: u'³', 0x34: u'⁴', 0x35: u'⁵', 0x36: u'⁶', 0x37: u'⁷', 0x38: u'⁸', 0x39: u'⁹' } # Inverted TO_SUPERSCRIPT table, for use with unicode.translate(): # #! Python 2.7+ can use a dictionary comprehension instead: FROM_SUPERSCRIPT = { ord(sup): normal for (normal, sup) in TO_SUPERSCRIPT.items()} def to_superscript(value): ''' Return a (Unicode) string with the given value as superscript characters. The value is formatted with the %d %-operator format. value -- integer. ''' return (u'%d' % value).translate(TO_SUPERSCRIPT) def nrmlze_superscript(number_str): ''' Return a string with superscript digits transformed into regular digits. Non-superscript digits are not changed before the conversion. Thus, the string can also contain regular digits. ValueError is raised if the conversion cannot be done. number_str -- string to be converted (of type str, but also possibly, for Python 2, unicode, which allows this string to contain superscript digits). ''' # !! Python 3 doesn't need this str(), which is only here for giving the # .translate() method to str objects in Python 2 (this str() comes # from the builtins module of the future package and is therefore # a subclass of unicode, in Python 2): return int(str(number_str).translate(FROM_SUPERSCRIPT)) PM_SYMBOLS = {'pretty-print': u'±', 'latex': r' \pm ', 'default': '+/-'} # Multiplication symbol for pretty printing (so that pretty printing can # be customized): MULT_SYMBOLS = {'pretty-print': u'×', 'latex': r'\times'} # Function that transforms a numerical exponent produced by format_num() into # the corresponding string notation (for non-default modes): EXP_PRINT = { 'pretty-print': lambda common_exp: u'%s10%s' % ( MULT_SYMBOLS['pretty-print'], to_superscript(common_exp)), 'latex': lambda common_exp: r' %s 10^{%d}' % ( MULT_SYMBOLS['latex'], common_exp)} # Symbols used for grouping (typically between parentheses) in format_num(): GROUP_SYMBOLS = { 'pretty-print': ('(', ')'), # Because of possibly exponents inside the parentheses (case of a # specified field width), it is better to use auto-adjusting # parentheses. This has the side effect of making the part between # the parentheses non-breakable (the text inside parentheses in a # LaTeX math expression $...$ can be broken). 'latex': (r'\left(', r'\right)'), 'default': ('(', ')') # Basic text mode } def format_num(nom_val_main, error_main, common_exp, fmt_parts, prec, main_pres_type, options): u''' Return a formatted number with uncertainty. Null errors (error_main) are displayed as the integer 0, with no decimal point. The formatting can be customized globally through the PM_SYMBOLS, MULT_SYMBOLS, GROUP_SYMBOLS and EXP_PRINT dictionaries, which contain respectively the symbol for ±, for multiplication, for parentheses, and a function that maps an exponent to something like "×10²" (using MULT_SYMBOLS). Each of these dictionary has (at least) a 'pretty-print' and a 'latex' key, that define the symbols to be used for these two output formats (the PM_SYMBOLS and GROUP_SYMBOLS also have a 'default' key for the default output format). For example, the defaults for the 'pretty-print' format are: - PM_SYMBOLS['pretty-print'] = '±' - MULT_SYMBOLS['pretty-print'] = '×' - GROUP_SYMBOLS['pretty-print'] = ( '(', ')' ) - EXP_PRINT['pretty-print']: see the source code. Arguments: nom_val_main, error_main -- nominal value and error, before using common_exp (e.g., "1.23e2" would have a main value of 1.23; similarly, "12.3+/-0.01" would have a main value of 12.3). common_exp -- common exponent to use. If None, no common exponent is used. fmt_parts -- mapping that contains at least the following parts of the format specification: fill, align, sign, zero, width, comma, type; the value are strings. These format specification parts are handled. The width is applied to each value, or, if the shorthand notation is used, globally. If the error is special (zero, NaN, inf), the parts are applied as much as possible to the nominal value. prec -- precision to use with the main_pres_type format type (see below). main_pres_type -- format presentation type, either "f" or "F". This defines how the mantissas, exponents and NaN/inf values are represented (in the same way as for float). None, the empty string, or "%" are not accepted. options -- options (as an object that support membership testing, like for instance a string). "P" is for pretty-printing ("±" between the nominal value and the error, superscript exponents, etc.). "L" is for a LaTeX output. "S" is for the shorthand notation 1.23(1). "p" is for making sure that the …±… part is surrounded by parentheses. "%" adds a final percent sign, and parentheses if the shorthand notation is not used. Options can be combined. The P option has priority over the L option (if both are given). For details, see the documentation for AffineScalarFunction.__format__(). ''' # print (nom_val_main, error_main, common_exp, # fmt_parts, prec, main_pres_type, options) # If a decimal point were always present in zero rounded errors # that are not zero, the formatting would be difficult, in general # (because the formatting options are very general): an example # is'{:04.0f}'.format(0.1), which gives "0000" and would have to # give "000.". Another example is '{:<4.0f}'.format(0.1), which # gives "0 " but should give "0. ". This is cumbersome to # implement in the general case, because no format prints "0." # for 0. Furthermore, using the .0f format already brings the same # kind of difficulty: non-zero numbers can appear as the exact # integer zero, after rounding. The problem is not larger, for # numbers with an error. # # That said, it is good to indicate null errors explicitly when # possible: printing 3.1±0 with the default format prints 3.1+/-0, # which shows that the uncertainty is exactly zero. # The suffix of the result is calculated first because it is # useful for the width handling of the shorthand notation. # Printing type for parts of the result (exponent, parentheses), # taking into account the priority of the pretty-print mode over # the LaTeX mode. This setting does not apply to everything: for # example, NaN is formatted as \mathrm{nan} (or NAN) if the LaTeX # mode is required. if 'P' in options: print_type = 'pretty-print' elif 'L' in options: print_type = 'latex' else: print_type = 'default' # Exponent part: if common_exp is None: exp_str = '' elif print_type == 'default': # Case of e or E. The same convention as Python 2.7 # to 3.3 is used for the display of the exponent: exp_str = EXP_LETTERS[main_pres_type]+'%+03d' % common_exp else: exp_str = EXP_PRINT[print_type](common_exp) # Possible % sign: percent_str = '' if '%' in options: if 'L' in options: # % is a special character, in LaTeX: it must be escaped. # # Using '\\' in the code instead of r'\' so as not to # confuse emacs's syntax highlighting: percent_str += ' \\' percent_str += '%' #################### # Only true if the error should not have an exponent (has priority # over common_exp): special_error = not error_main or isinfinite(error_main) # Nicer representation of the main nominal part, with no trailing # zeros, when the error does not have a defined number of # significant digits: if special_error and fmt_parts['type'] in ('', 'g', 'G'): # The main part is between 1 and 10 because any possible # exponent is taken care of by common_exp, so it is # formatted without an exponent (otherwise, the exponent # would have to be handled for the LaTeX option): fmt_suffix_n = (fmt_parts['prec'] or '')+fmt_parts['type'] else: fmt_suffix_n = '.%d%s' % (prec, main_pres_type) # print "FMT_SUFFIX_N", fmt_suffix_n #################### # Calculation of the mostly final numerical part value_str (no % # sign, no global width applied). # Error formatting: if 'S' in options: # Shorthand notation: # Calculation of the uncertainty part, uncert_str: if error_main == 0: # The error is exactly zero uncert_str = '0' elif isnan(error_main): uncert_str = robust_format(error_main, main_pres_type) if 'L' in options: uncert_str = r'\mathrm{%s}' % uncert_str elif isinf(error_main): if 'L' in options: uncert_str = r'\infty' else: uncert_str = robust_format(error_main, main_pres_type) else: # Error with a meaningful first digit (not 0, and real number) uncert = round(error_main, prec) # The representation uncert_str of the uncertainty (which will # be put inside parentheses) is calculated: # The uncertainty might straddle the decimal point: we # keep it as it is, in this case (e.g. 1.2(3.4), as this # makes the result easier to read); the shorthand # notation then essentially coincides with the +/- # notation: if first_digit(uncert) >= 0 and prec > 0: # This case includes a zero rounded error with digits # after the decimal point: uncert_str = '%.*f' % (prec, uncert) else: if uncert: # The round is important because 566.99999999 can # first be obtained when 567 is wanted (%d prints the # integer part, not the rounded value): uncert_str = '%d' % round(uncert*10.**prec) else: # The decimal point indicates a truncated float # (this is easy to do, in this case, since # fmt_prefix_e is ignored): uncert_str = '0.' # End of the final number representation (width and alignment # not included). This string is important for the handling of # the width: value_end = '(%s)%s%s' % (uncert_str, exp_str, percent_str) any_exp_factored = True # Single exponent in the output ########## # Nominal value formatting: # Calculation of fmt_prefix_n (prefix for the format of the # main part of the nominal value): if fmt_parts['zero'] and fmt_parts['width']: # Padding with zeros must be done on the nominal value alone: # Remaining width (for the nominal value): nom_val_width = max(int(fmt_parts['width']) - len(value_end), 0) fmt_prefix_n = '%s%s%d%s' % ( fmt_parts['sign'], fmt_parts['zero'], nom_val_width, fmt_parts['comma']) else: # Any 'zero' part should not do anything: it is not # included fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] # print "FMT_PREFIX_N", fmt_prefix_n # print "FMT_SUFFIX_N", fmt_suffix_n nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n) ########## # Overriding of nom_val_str for LaTeX,; possibly based on the # existing value (for NaN vs nan): if 'L' in options: if isnan(nom_val_main): nom_val_str = r'\mathrm{%s}' % nom_val_str elif isinf(nom_val_main): # !! It is wasteful, in this case, to replace # nom_val_str: could this be avoided while avoiding to # duplicate the formula for nom_val_str for the common # case (robust_format(...))? nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '') value_str = nom_val_str+value_end # Global width, if any: if fmt_parts['width']: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): value_str = robust_align( value_str, fmt_parts['fill'], fmt_parts['align'] or '>', fmt_parts['width']) else: # +/- notation: # The common exponent is factored or not, depending on the # width. This gives nice columns for the nominal values and # the errors (no shift due to a varying exponent), when a need # is given: any_exp_factored = not fmt_parts['width'] # True when the error part has any exponent directly attached # (case of an individual exponent for both the nominal value # and the error, when the error is a non-0, real number). # The goal is to avoid the strange notation nane-10, and to # avoid the 0e10 notation for an exactly zero uncertainty, # because .0e can give this for a non-zero error (the goal is # to have a zero uncertainty be very explicit): error_has_exp = not any_exp_factored and not special_error # Like error_has_exp, but only for real number handling # (there is no special meaning to a zero nominal value): nom_has_exp = not any_exp_factored and not isinfinite(nom_val_main) # Prefix for the parts: if fmt_parts['width']: # Individual widths # If zeros are needed, then the width is taken into # account now (before the exponent is added): if fmt_parts['zero']: width = int(fmt_parts['width']) # Remaining (minimum) width after including the # exponent: remaining_width = max(width-len(exp_str), 0) fmt_prefix_n = '%s%s%d%s' % ( fmt_parts['sign'], fmt_parts['zero'], remaining_width if nom_has_exp else width, fmt_parts['comma']) fmt_prefix_e = '%s%d%s' % ( fmt_parts['zero'], remaining_width if error_has_exp else width, fmt_parts['comma']) else: fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] fmt_prefix_e = fmt_parts['comma'] else: # Global width fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] fmt_prefix_e = fmt_parts['comma'] ## print "ANY_EXP_FACTORED", any_exp_factored ## print "ERROR_HAS_EXP", error_has_exp ## print "NOM_HAS_EXP", nom_has_exp #################### # Nominal value formatting: # !! The following fails with Python < 2.6 when the format is # not accepted by the % operator. This can happen when # special_error is true, as the format used for the nominal # value is essentially the format provided by the user, which # may be empty: # print "FMT_PREFIX_N", fmt_prefix_n # print "FMT_SUFFIX_N", fmt_suffix_n nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n) # print "NOM_VAL_STR", nom_val_str #################### # Error formatting: # !! Note: .0f applied to a float has no decimal point, but # this does not appear to be documented # (http://docs.python.org/2/library/string.html#format-specification-mini-language). This # feature is used anyway, because it allows a possible comma # format parameter to be handled more conveniently than if the # 'd' format was used. # # The following uses a special integer representation of a # zero uncertainty: if error_main: # The handling of NaN/inf in the nominal value identical to # the handling of NaN/inf in the standard deviation: if (isinfinite(nom_val_main) # Only some formats have a nicer representation: and fmt_parts['type'] in ('', 'g', 'G')): # The error can be formatted independently: fmt_suffix_e = (fmt_parts['prec'] or '')+fmt_parts['type'] else: fmt_suffix_e = '.%d%s' % (prec, main_pres_type) else: fmt_suffix_e = '.0%s' % main_pres_type error_str = robust_format(error_main, fmt_prefix_e+fmt_suffix_e) ########## # Overriding of nom_val_str and error_str for LaTeX: if 'L' in options: if isnan(nom_val_main): nom_val_str = r'\mathrm{%s}' % nom_val_str elif isinf(nom_val_main): nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '') if isnan(error_main): error_str = r'\mathrm{%s}' % error_str elif isinf(error_main): error_str = r'\infty' if nom_has_exp: nom_val_str += exp_str if error_has_exp: error_str += exp_str #################### # Final alignment of each field, if needed: if fmt_parts['width']: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): effective_align = fmt_parts['align'] or '>' # robust_format() is used because it may handle alignment # options, where the % operator does not: nom_val_str = robust_align( nom_val_str, fmt_parts['fill'], effective_align, fmt_parts['width']) error_str = robust_align( error_str, fmt_parts['fill'], effective_align, fmt_parts['width']) #################### pm_symbol = PM_SYMBOLS[print_type] # Shortcut #################### # Construction of the final value, value_str, possibly with # grouping (typically inside parentheses): (LEFT_GROUPING, RIGHT_GROUPING) = GROUP_SYMBOLS[print_type] # The nominal value and the error might have to be explicitly # grouped together with parentheses, so as to prevent an # ambiguous notation. This is done in parallel with the # percent sign handling because this sign may too need # parentheses. if any_exp_factored and common_exp is not None: # Exponent value_str = ''.join(( LEFT_GROUPING, nom_val_str, pm_symbol, error_str, RIGHT_GROUPING, exp_str, percent_str)) else: # No exponent value_str = ''.join([nom_val_str, pm_symbol, error_str]) if percent_str: value_str = ''.join(( LEFT_GROUPING, value_str, RIGHT_GROUPING, percent_str)) elif 'p' in options: value_str = ''.join((LEFT_GROUPING, value_str, RIGHT_GROUPING)) return value_str def signif_dgt_to_limit(value, num_signif_d): ''' Return the precision limit necessary to display value with num_signif_d significant digits. The precision limit is given as -1 for 1 digit after the decimal point, 0 for integer rounding, etc. It can be positive. ''' fst_digit = first_digit(value) limit_no_rounding = fst_digit-num_signif_d+1 # The number of significant digits of the uncertainty, when # rounded at this limit_no_rounding level, can be too large by 1 # (e.g., with num_signif_d = 1, 0.99 gives limit_no_rounding = -1, but # the rounded value at that limit is 1.0, i.e. has 2 # significant digits instead of num_signif_d = 1). We correct for # this effect by adjusting limit if necessary: rounded = round(value, -limit_no_rounding) fst_digit_rounded = first_digit(rounded) if fst_digit_rounded > fst_digit: # The rounded limit is fst_digit_rounded-num_signif_d+1; # but this can only be 1 above the non-rounded limit: limit_no_rounding += 1 return limit_no_rounding class LinearCombination(object): """ Linear combination of Variable differentials. The linear_combo attribute can change formally, but its value always remains the same. Typically, the linear combination can thus be expanded. The expanded form of linear_combo is a mapping from Variables to the coefficient of their differential. """ # ! Invariant: linear_combo is represented internally exactly as # the linear_combo argument to __init__(): __slots__ = "linear_combo" def __init__(self, linear_combo): """ linear_combo can be modified by the object, during its lifetime. This allows the object to change its internal representation over time (for instance by expanding the linear combination and replacing the original expression with the expanded one). linear_combo -- if linear_combo is a dict, then it represents an expanded linear combination and must map Variables to the coefficient of their differential. Otherwise, it should be a list of (coefficient, LinearCombination) pairs (that represents a linear combination expression). """ self.linear_combo = linear_combo def __bool__(self): """ Return True only if the linear combination is non-empty, i.e. if the linear combination contains any term. """ return bool(self.linear_combo) def expanded(self): """ Return True if and only if the linear combination is expanded. """ return isinstance(self.linear_combo, dict) def expand(self): """ Expand the linear combination. The expansion is a collections.defaultdict(float). This should only be called if the linear combination is not yet expanded. """ # The derivatives are built progressively by expanding each # term of the linear combination until there is no linear # combination to be expanded. # Final derivatives, constructed progressively: derivatives = collections.defaultdict(float) while self.linear_combo: # The list of terms is emptied progressively # One of the terms is expanded or, if no expansion is # needed, simply added to the existing derivatives. # # Optimization note: since Python's operations are # left-associative, a long sum of Variables can be built # such that the last term is essentially a Variable (and # not a NestedLinearCombination): popping from the # remaining terms allows this term to be quickly put in # the final result, which limits the number of terms # remaining (and whose size can temporarily grow): (main_factor, main_expr) = self.linear_combo.pop() # print "MAINS", main_factor, main_expr if main_expr.expanded(): for (var, factor) in main_expr.linear_combo.items(): derivatives[var] += main_factor*factor else: # Non-expanded form for (factor, expr) in main_expr.linear_combo: # The main_factor is applied to expr: self.linear_combo.append((main_factor*factor, expr)) # print "DERIV", derivatives self.linear_combo = derivatives def __getstate__(self): # Not false, otherwise __setstate__() will not be called: return (self.linear_combo,) def __setstate__(self, state): (self.linear_combo,) = state class AffineScalarFunc(object): """ Affine functions that support basic mathematical operations (addition, etc.). Such functions can for instance be used for representing the local (linear) behavior of any function. This class can also be used to represent constants. The variables of affine scalar functions are Variable objects. AffineScalarFunc objects include facilities for calculating the 'error' on the function, from the uncertainties on its variables. Main attributes and methods: - nominal_value, std_dev: value at the origin / nominal value, and standard deviation. The standard deviation can be NaN or infinity. - n, s: abbreviations for nominal_value and std_dev. - error_components(): error_components()[x] is the error due to Variable x. - derivatives: derivatives[x] is the (value of the) derivative with respect to Variable x. This attribute is a Derivatives dictionary whose keys are the Variable objects on which the function depends. The values are the numerical values of the derivatives. All the Variable objects on which the function depends are in 'derivatives'. - std_score(x): position of number x with respect to the nominal value, in units of the standard deviation. """ # To save memory in large arrays: __slots__ = ('_nominal_value', '_linear_part') # !! Fix for mean() in NumPy 1.8.0: class dtype(object): type = staticmethod(lambda value: value) #! The code could be modified in order to accommodate for non-float # nominal values. This could for instance be done through # the operator module: instead of delegating operations to # float.__*__ operations, they could be delegated to # operator.__*__ functions (while taking care of properly handling # reverse operations: __radd__, etc.). def __init__(self, nominal_value, linear_part): """ nominal_value -- value of the function when the linear part is zero. linear_part -- LinearCombination that describes the linear part of the AffineScalarFunc. """ # ! A technical consistency requirement is that the # linear_part can be nested inside a NestedLinearCombination # (because this is how functions on AffineScalarFunc calculate # their result: by constructing nested expressions for them). # Defines the value at the origin: # Only float-like values are handled. One reason is that it # does not make sense for a scalar function to be affine to # not yield float values. Another reason is that it would not # make sense to have a complex nominal value, here (it would # not be handled correctly at all): converting to float should # be possible. self._nominal_value = float(nominal_value) # In order to have a linear execution time for long sums, the # _linear_part is generally left as is (otherwise, each # successive term would expand to a linearly growing sum of # terms: efficiently handling such terms [so, without copies] # is not obvious, when the algorithm should work for all # functions beyond sums). self._linear_part = linear_part # The following prevents the 'nominal_value' attribute from being # modified by the user: @property def nominal_value(self): "Nominal value of the random number." return self._nominal_value # Abbreviation (for formulas, etc.): n = nominal_value ############################################################ # Making derivatives a property gives the user a clean syntax, # which is consistent with derivatives becoming a dictionary. @property def derivatives(self): """ Return a mapping from each Variable object on which the function (self) depends to the value of the derivative with respect to that variable. This mapping should not be modified. Derivative values are always floats. This mapping is cached, for subsequent calls. """ if not self._linear_part.expanded(): self._linear_part.expand() # Attempts to get the contribution of a variable that the # function does not depend on raise a KeyError: self._linear_part.linear_combo.default_factory = None return self._linear_part.linear_combo ############################################################ ### Operators: operators applied to AffineScalarFunc and/or ### float-like objects only are supported. This is why methods ### from float are used for implementing these operators. # Operators with no reflection: ######################################## # __nonzero__() is supposed to return a boolean value (it is used # by bool()). It is for instance used for converting the result # of comparison operators to a boolean, in sorted(). If we want # to be able to sort AffineScalarFunc objects, __nonzero__ cannot # return a AffineScalarFunc object. Since boolean results (such # as the result of bool()) don't have a very meaningful # uncertainty unless it is zero, this behavior is fine. def __bool__(self): """ Equivalent to self != 0. """ #! This might not be relevant for AffineScalarFunc objects # that contain values in a linear space which does not convert # the float 0 into the null vector (see the __eq__ function: # __nonzero__ works fine if subtracting the 0 float from a # vector of the linear space works as if 0 were the null # vector of that space): return self != 0. # Uses the AffineScalarFunc.__ne__ function ######################################## ## Logical operators: warning: the resulting value cannot always ## be differentiated. # The boolean operations are not differentiable everywhere, but # almost... # (1) I can rely on the assumption that the user only has "small" # errors on variables, as this is used in the calculation of the # standard deviation (which performs linear approximations): # (2) However, this assumption is not relevant for some # operations, and does not have to hold, in some cases. This # comes from the fact that logical operations (e.g. __eq__(x,y)) # are not differentiable for many usual cases. For instance, it # is desirable to have x == x for x = n+/-e, whatever the size of e. # Furthermore, n+/-e != n+/-e', if e != e', whatever the size of e or # e'. # (3) The result of logical operators does not have to be a # function with derivatives, as these derivatives are either 0 or # don't exist (i.e., the user should probably not rely on # derivatives for his code). # !! In Python 2.7+, it may be possible to use functools.total_ordering. # __eq__ is used in "if data in [None, ()]", for instance. It is # therefore important to be able to handle this case too, which is # taken care of when force_aff_func_args(eq_on_aff_funcs) # returns NotImplemented. __eq__ = force_aff_func_args(eq_on_aff_funcs) __ne__ = force_aff_func_args(ne_on_aff_funcs) __gt__ = force_aff_func_args(gt_on_aff_funcs) # __ge__ is not the opposite of __lt__ because these operators do # not always yield a boolean (for instance, 0 <= numpy.arange(10) # yields an array). __ge__ = force_aff_func_args(ge_on_aff_funcs) __lt__ = force_aff_func_args(lt_on_aff_funcs) __le__ = force_aff_func_args(le_on_aff_funcs) ######################################## # Uncertainties handling: def error_components(self): """ Individual components of the standard deviation of the affine function (in absolute value), returned as a dictionary with Variable objects as keys. The returned variables are the independent variables that the affine function depends on. This method assumes that the derivatives contained in the object take scalar values (and are not a tuple, like what math.frexp() returns, for instance). """ # Calculation of the variance: error_components = {} for (variable, derivative) in self.derivatives.items(): # print "TYPE", type(variable), type(derivative) # Individual standard error due to variable: # 0 is returned even for a NaN derivative (in this case no # multiplication by the derivative is performed): an exact # variable obviously leads to no uncertainty in the # functions that depend on it. if variable._std_dev == 0: # !!! Shouldn't the errors always be floats, as a # convention of this module? error_components[variable] = 0 else: error_components[variable] = abs(derivative*variable._std_dev) return error_components @property def std_dev(self): """ Standard deviation of the affine function. This method assumes that the function returns scalar results. This returned standard deviation depends on the current standard deviations [std_dev] of the variables (Variable objects) involved. """ #! It would be possible to not allow the user to update the #std dev of Variable objects, in which case AffineScalarFunc #objects could have a pre-calculated or, better, cached #std_dev value (in fact, many intermediate AffineScalarFunc do #not need to have their std_dev calculated: only the final #AffineScalarFunc returned to the user does). return CallableStdDev(sqrt(sum( delta**2 for delta in self.error_components().values()))) # Abbreviation (for formulas, etc.): s = std_dev def __repr__(self): # Not putting spaces around "+/-" helps with arrays of # Variable, as each value with an uncertainty is a # block of signs (otherwise, the standard deviation can be # mistaken for another element of the array). std_dev = self.std_dev # Optimization, since std_dev is calculated # A zero standard deviation is printed because otherwise, # ufloat_fromstr() does not correctly parse back the value # ("1.23" is interpreted as "1.23(1)"): if std_dev: std_dev_str = repr(std_dev) else: std_dev_str = '0' return "%r+/-%s" % (self.nominal_value, std_dev_str) def __str__(self): # An empty format string and str() usually return the same # string # (http://docs.python.org/2/library/string.html#format-specification-mini-language): return self.format('') def __format__(self, format_spec): ''' Formats a number with uncertainty. The format specification are the same as for format() for floats, as defined for Python 2.6+ (restricted to what the % operator accepts, if using an earlier version of Python), except that the n presentation type is not supported. In particular, the usual precision, alignment, sign flag, etc. can be used. The behavior of the various presentation types (e, f, g, none, etc.) is similar. Moreover, the format is extended: the number of digits of the uncertainty can be controlled, as is the way the uncertainty is indicated (with +/- or with the short-hand notation 3.14(1), in LaTeX or with a simple text string,...). Beyond the use of options at the end of the format specification, the main difference with floats is that a "u" just before the presentation type (f, e, g, none, etc.) activates the "uncertainty control" mode (e.g.: ".6u"). This mode is also activated when not using any explicit precision (e.g.: "g", "10f", "+010,e" format specifications). If the uncertainty does not have a meaningful number of significant digits (0 and NaN uncertainties), this mode is automatically deactivated. The nominal value and the uncertainty always use the same precision. This implies trailing zeros, in general, even with the g format type (contrary to the float case). However, when the number of significant digits of the uncertainty is not defined (zero or NaN uncertainty), it has no precision, so there is no matching. In this case, the original format specification is used for the nominal value (any "u" is ignored). Any precision (".p", where p is a number) is interpreted (if meaningful), in the uncertainty control mode, as indicating the number p of significant digits of the displayed uncertainty. Example: .1uf will return a string with one significant digit in the uncertainty (and no exponent). If no precision is given, the rounding rules from the Particle Data Group are used, if possible (http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf). For example, the "f" format specification generally does not use the default 6 digits after the decimal point, but applies the PDG rules. A common exponent is used if an exponent is needed for the larger of the nominal value (in absolute value) and the standard deviation, unless this would result in a zero uncertainty being represented as 0e... or a NaN uncertainty as NaNe.... Thanks to this common exponent, the quantity that best describes the associated probability distribution has a mantissa in the usual 1-10 range. The common exponent is factored (as in "(1.2+/-0.1)e-5"). unless the format specification contains an explicit width (" 1.2e-5+/- 0.1e-5") (this allows numbers to be in a single column, when printing numbers over many lines). Specifying a minimum width of 1 is a way of forcing any common exponent to not be factored out. The fill, align, zero and width parameters of the format specification are applied individually to each of the nominal value and standard deviation or, if the shorthand notation is used, globally. The sign parameter of the format specification is only applied to the nominal value (since the standard deviation is positive). In the case of a non-LaTeX output, the returned string can normally be parsed back with ufloat_fromstr(). This however excludes cases where numbers use the "," thousands separator, for example. Options can be added, at the end of the format specification. Multiple options can be specified: - When "P" is present, the pretty-printing mode is activated: "±" separates the nominal value from the standard deviation, exponents use superscript characters, etc. - When "S" is present (like in .1uS), the short-hand notation 1.234(5) is used, indicating an uncertainty on the last digits; if the digits of the uncertainty straddle the decimal point, it uses a fixed-point notation, like in 12.3(4.5). - When "L" is present, the output is formatted with LaTeX. - "p" ensures that there are parentheses around the …±… part (no parentheses are added if some are already present, for instance because of an exponent or of a trailing % sign, etc.). This produces outputs like (1.0±0.2) or (1.0±0.2)e7, which can be useful for removing any ambiguity if physical units are added after the printed number. An uncertainty which is exactly zero is represented as the integer 0 (i.e. with no decimal point). The "%" format type forces the percent sign to be at the end of the returned string (it is not attached to each of the nominal value and the standard deviation). Some details of the formatting can be customized as described in format_num(). ''' # Convention on limits "between" digits: 0 = exactly at the # decimal point, -1 = after the first decimal, 1 = before the # units digit, etc. # Convention on digits: 0 is units (10**0), 1 is tens, -1 is # tenths, etc. # This method does the format specification parsing, and # calculates the various parts of the displayed value # (mantissas, exponent, position of the last digit). The # formatting itself is delegated to format_num(). ######################################## # Format specification parsing: match = re.match(r''' (?P[^{}]??)(?P[<>=^]?) # fill cannot be { or } (?P[-+ ]?) (?P0?) (?P\d*) (?P,?) (?:\.(?P\d+))? (?Pu?) # Precision for the uncertainty? # The type can be omitted. Options must not go here: (?P[eEfFgG%]??) # n not supported (?P[PSLp]*) # uncertainties-specific flags $''', format_spec, re.VERBOSE) # Does the format specification look correct? if not match: raise ValueError( 'Format specification %r cannot be used with object of type' ' %r. Note that uncertainties-specific flags must be put at' ' the end of the format string.' # Sub-classes handled: % (format_spec, self.__class__.__name__)) # Effective format presentation type: f, e, g, etc., or None, # like in # https://docs.python.org/3.4/library/string.html#format-specification-mini-language. Contrary # to what is written in the documentation, it is not true that # None is "the same as 'g'": "{}".format() and "{:g}" do not # give the same result, on 31415000000.0. None is thus kept as # is instead of being replaced by "g". pres_type = match.group('type') or None # Shortcut: fmt_prec = match.group('prec') # Can be None ######################################## # Since the '%' (percentage) format specification can change # the value to be displayed, this value must first be # calculated. Calculating the standard deviation is also an # optimization: the standard deviation is generally # calculated: it is calculated only once, here: nom_val = self.nominal_value std_dev = self.std_dev # 'options' is the options that must be given to format_num(): options = set(match.group('options')) ######################################## # The '%' format is treated internally as a display option: it # should not be applied individually to each part: if pres_type == '%': # Because '%' does 0.0055*100, the value # 0.5499999999999999 is obtained, which rounds to 0.5. The # original rounded value is 0.006. The same behavior is # found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'. # If a different behavior is needed, a solution to this # problem would be to do the rounding before the # multiplication. std_dev *= 100 nom_val *= 100 pres_type = 'f' options.add('%') # At this point, pres_type is in eEfFgG or None (not %). ######################################## # Non-real values (nominal value or standard deviation) must # be handled in a specific way: real_values = [value for value in [abs(nom_val), std_dev] if not isinfinite(value)] # Calculation of digits_limit, which defines the precision of # the nominal value and of the standard deviation (it can be # None when it does not matter, like for NaN±NaN): # Reference value for the calculation of a possible exponent, # if needed: if pres_type in (None, 'e', 'E', 'g', 'G'): # Reference value for the exponent: the largest value # defines what the exponent will be (another convention # could have been chosen, like using the exponent of the # nominal value, irrespective of the standard deviation): try: exp_ref_value = max(real_values) except ValueError: # No non-NaN value: NaN±NaN… # No meaningful common exponent can be obtained: pass ## else: ## print "EXP_REF_VAL", exp_ref_value # Should the precision be interpreted like for a float, or # should the number of significant digits on the uncertainty # be controlled? if (( # Default behavior: number of significant digits on the # uncertainty controlled (if useful, i.e. only in # situations where the nominal value and the standard # error digits are truncated at the same place): (not fmt_prec and len(real_values)==2) or match.group('uncert_prec')) # Explicit control # The number of significant digits of the uncertainty must # be meaningful, otherwise the position of the significant # digits of the uncertainty does not have a clear # meaning. This gives us the *effective* uncertainty # control mode: and std_dev and not isinfinite(std_dev)): # The number of significant digits on the uncertainty is # controlled. # The limit digits_limit on the digits of nom_val and std_dev # to be displayed is calculated. If the exponent notation is # used, this limit is generally different from the finally # displayed limit (e.g. 314.15+/-0.01 has digits_limit=-2, but # will be displayed with an exponent as (3.1415+/-0.0001)e+02, # which corresponds to 4 decimals after the decimal point, not # 2). # Number of significant digits to use: if fmt_prec: num_signif_d = int(fmt_prec) # Can only be non-negative if not num_signif_d: raise ValueError("The number of significant digits" " on the uncertainty should be positive") else: (num_signif_d, std_dev) = PDG_precision(std_dev) digits_limit = signif_dgt_to_limit(std_dev, num_signif_d) else: # No control of the number of significant digits on the # uncertainty. ## print "PRECISION NOT BASED ON UNCERTAINTY" # The precision has the same meaning as for floats (it is # not the uncertainty that defines the number of digits). # The usual default precision is used (this is useful for # 3.141592±NaN with an "f" format specification, for # example): # # prec is the precision for the main parts of the final # format (in the sense of float formatting): # # https://docs.python.org/3.4/library/string.html#format-specification-mini-language if fmt_prec: prec = int(fmt_prec) elif pres_type is None: prec = 12 else: prec = 6 if pres_type in ('f', 'F'): digits_limit = -prec else: # Format type in None, eEgG # We first calculate the number of significant digits # to be displayed (if possible): if pres_type in ('e', 'E'): # The precision is the number of significant # digits required - 1 (because there is a single # digit before the decimal point, which is not # included in the definition of the precision with # the e/E format type): num_signif_digits = prec+1 else: # Presentation type in None, g, G # Effective format specification precision: the rule # of # http://docs.python.org/2.7/library/string.html#format-specification-mini-language # is used: # The final number of significant digits to be # displayed is not necessarily obvious: trailing # zeros are removed (with the gG presentation # type), so num_signif_digits is the number of # significant digits if trailing zeros were not # removed. This quantity is relevant for the # rounding implied by the exponent test of the g/G # format: # 0 is interpreted like 1 (as with floats with a # gG presentation type): num_signif_digits = prec or 1 # The number of significant digits is important for # example for determining the exponent: ## print "NUM_SIGNIF_DIGITS", num_signif_digits digits_limit = ( signif_dgt_to_limit(exp_ref_value, num_signif_digits) if real_values else None) ## print "DIGITS_LIMIT", digits_limit ####################################### # Common exponent notation: should it be used? use_exp is set # accordingly. If a common exponent should be used (use_exp is # True), 'common_exp' is set to the exponent that should be # used. if pres_type in ('f', 'F'): use_exp = False elif pres_type in ('e', 'E'): if not real_values: use_exp = False else: use_exp = True # !! This calculation might have been already done, # for instance when using the .0e format: # signif_dgt_to_limit() was called before, which # prompted a similar calculation: common_exp = first_digit(round(exp_ref_value, -digits_limit)) else: # None, g, G # The rules from # https://docs.python.org/3.4/library/string.html#format-specification-mini-language # are applied. # Python's native formatting (whose result could be parsed # in order to determine whether a common exponent should # be used) is not used because there is shared information # between the nominal value and the standard error (same # last digit, common exponent) and extracting this # information from Python would entail parsing its # formatted string, which is in principle inefficient # (internally, Python performs calculations that yield a # string, and the string would be parsed back into # separate parts and numbers, which is in principle # unnecessary). # Should the scientific notation be used? The same rule as # for floats is used ("-4 <= exponent of rounded value < # p"), on the nominal value. if not real_values: use_exp = False else: # Common exponent *if* used: common_exp = first_digit(round(exp_ref_value, -digits_limit)) # print "COMMON EXP TEST VALUE", common_exp # print "LIMIT EXP", common_exp-digits_limit+1 # print "WITH digits_limit", digits_limit # The number of significant digits of the reference value # rounded at digits_limit is exponent-digits_limit+1: if -4 <= common_exp < common_exp-digits_limit+1: use_exp = False else: use_exp = True ######################################## # Calculation of signif_limit (position of the significant # digits limit in the final fixed point representations; this # is either a non-positive number, or None), of # nom_val_mantissa ("mantissa" for the nominal value, # i.e. value possibly corrected for a factorized exponent), # and std_dev_mantissa (similarly for the standard # deviation). common_exp is also set to None if no common # exponent should be used. if use_exp: # Not 10.**(-common_exp), for limit values of common_exp: factor = 10.**common_exp nom_val_mantissa = nom_val/factor std_dev_mantissa = std_dev/factor # Limit for the last digit of the mantissas: signif_limit = digits_limit - common_exp else: # No common exponent common_exp = None nom_val_mantissa = nom_val std_dev_mantissa = std_dev signif_limit = digits_limit ## print "SIGNIF_LIMIT", signif_limit ######################################## # Format of the main (i.e. with no exponent) parts (the None # presentation type is similar to the g format type): main_pres_type = 'fF'[(pres_type or 'g').isupper()] # The precision of the main parts must be adjusted so as # to take into account the special role of the decimal # point: if signif_limit is not None: # If signif_limit is pertinent # The decimal point location is always included in the # printed digits (e.g., printing 3456 with only 2 # significant digits requires to print at least four # digits, like in 3456 or 3500). # # The max() is important for example for # 1234567.89123+/-12345.678 with the f format: in this # case, signif_limit is +3 (2 significant digits necessary # for the error, as per the PDG rules), but the (Python # float formatting) precision to be used for the main # parts is 0 (all digits must be shown). # # The 1 for the None pres_type represents "at least one # digit past the decimal point" of Python # (https://docs.python.org/3.4/library/string.html#format-specification-mini-language). This # is only applied for null uncertainties. prec = max(-signif_limit, 1 if pres_type is None and not std_dev else 0) ## print "PREC", prec ######################################## # print ( # "FORMAT_NUM parameters: nom_val_mantissa={}," # " std_dev_mantissa={}, common_exp={}," # " match.groupdict()={}, prec={}, main_pres_type={}," # " options={}".format( # nom_val_mantissa, std_dev_mantissa, common_exp, # match.groupdict(), # prec, # main_pres_type, # options)) # Final formatting: return format_num(nom_val_mantissa, std_dev_mantissa, common_exp, match.groupdict(), prec=prec, main_pres_type=main_pres_type, options=options) # Alternate name for __format__, for use with Python < 2.6 (and # other Python versions if the user so chooses: this helps moving # code from Python 2.6 to more recent versions): @set_doc(""" Return the same result as self.__format__(format_spec), or equivalently as the format(self, format_spec) of Python 2.6+. This method is meant to be used for formatting numbers with uncertainties in Python < 2.6, with '... %s ...' % num.format('.2e'). """) def format(*args, **kwargs): return args[0].__format__(*args[1:], **kwargs) def std_score(self, value): """ Return 'value' - nominal value, in units of the standard deviation. Raises a ValueError exception if the standard deviation is zero. """ try: # The ._nominal_value is a float: there is no integer division, # here: return (value - self._nominal_value) / self.std_dev except ZeroDivisionError: raise ValueError("The standard deviation is zero:" " undefined result") def __deepcopy__(self, memo): """ Hook for the standard copy module. The returned AffineScalarFunc is a completely fresh copy, which is fully independent of any variable defined so far. New variables are specially created for the returned AffineScalarFunc object. """ return AffineScalarFunc(self._nominal_value, copy.deepcopy(self._linear_part)) def __getstate__(self): """ Hook for the pickle module. The slot attributes of the parent classes are returned, as well as those of the __dict__ attribute of the object (if any). """ # In general (case where this class is subclassed), data # attribute are stored in two places: possibly in __dict_, and # in slots. Data from both locations is returned by this # method. all_attrs = {} # Support for subclasses that do not use __slots__ (except # through inheritance): instances have a __dict__ # attribute. The keys in this __dict__ are shadowed by the # slot attribute names (reference: # http://stackoverflow.com/questions/15139067/attribute-access-in-python-first-slots-then-dict/15139208#15139208). # The method below not only preserves this behavior, but also # saves the full contents of __dict__. This is robust: # unpickling gives back the original __dict__ even if __dict__ # contains keys that are shadowed by slot names: try: all_attrs['__dict__'] = self.__dict__ except AttributeError: pass # All the slot attributes are gathered. # Classes that do not define __slots__ have the __slots__ of # one of their parents (the first parent with their own # __slots__ in MRO). This is why the slot names are first # gathered (with repetitions removed, in general), and their # values obtained later. all_slots = set() for cls in type(self).mro(): # In the diamond inheritance pattern, some parent classes # may not have __slots__: slot_names = getattr(cls, '__slots__', ()) # Slot names can be given in various forms (string, # sequence, iterable): if isinstance(slot_names, basestring): all_slots.add(slot_names) # Single name else: all_slots.update(slot_names) # The slot values are stored: for name in all_slots: try: # !! It might happen that '__dict__' is itself a slot # name. In this case, its value is saved # again. Alternatively, the loop could be done on # all_slots - {'__dict__'}: all_attrs[name] = getattr(self, name) except AttributeError: pass # Undefined slot attribute return all_attrs def __setstate__(self, data_dict): """ Hook for the pickle module. """ for (name, value) in data_dict.items(): # Contrary to the default __setstate__(), this does not # necessarily save to the instance dictionary (because the # instance might contain slots): setattr(self, name, value) # Nicer name, for users: isinstance(ufloat(...), UFloat) is # True. Also: isinstance(..., UFloat) is the test for "is this a # number with uncertainties from the uncertainties package?": UFloat = AffineScalarFunc ############################################################################### # Some operators can have undefined derivatives but still give # meaningful values when some of their arguments have a zero # uncertainty. Such operators return NaN when their derivative is # not finite. This way, if the uncertainty of the associated # variable is not 0, a NaN uncertainty is produced, which # indicates an error; if the uncertainty is 0, then the total # uncertainty can be returned as 0. # Exception catching is used so as to not slow down regular # operation too much: def nan_if_exception(f): ''' Wrapper around f(x, y) that let f return NaN when f raises one of a few numerical exceptions. ''' def wrapped_f(*args, **kwargs): try: return f(*args, **kwargs) except (ValueError, ZeroDivisionError, OverflowError): return float('nan') return wrapped_f def get_ops_with_reflection(): """ Return operators with a reflection, along with their partial derivatives. Operators are things like +, /, etc. Those considered here have two arguments and can be called through Python's reflected methods __r…__ (e.g. __radd__). See the code for details. """ # Operators with a reflection: # We do not include divmod(). This operator could be included, by # allowing its result (a tuple) to be differentiated, in # derivative_value(). However, a similar result can be achieved # by the user by calculating separately the division and the # result. # {operator(x, y): (derivative wrt x, derivative wrt y)}: # Note that unknown partial derivatives can be numerically # calculated by expressing them as something like # "partial_derivative(float.__...__, 1)(x, y)": # String expressions are used, so that reversed operators are easy # to code, and execute relatively efficiently: derivatives_list = { 'add': ("1.", "1."), # 'div' is the '/' operator when __future__.division is not in # effect. Since '/' is applied to # AffineScalarFunc._nominal_value numbers, it is applied on # floats, and is therefore the "usual" mathematical division. 'div': ("1/y", "-x/y**2"), 'floordiv': ("0.", "0."), # Non exact: there is a discontinuity # The derivative wrt the 2nd arguments is something like (..., x//y), # but it is calculated numerically, for convenience: 'mod': ("1.", "partial_derivative(float.__mod__, 1)(x, y)"), 'mul': ("y", "x"), 'sub': ("1.", "-1."), 'truediv': ("1/y", "-x/y**2") } # Conversion to Python functions: ops_with_reflection = {} for (op, derivatives) in derivatives_list.items(): ops_with_reflection[op] = [ eval("lambda x, y: %s" % expr) for expr in derivatives ] ops_with_reflection["r"+op] = [ eval("lambda y, x: %s" % expr) for expr in reversed(derivatives)] # The derivatives of pow() are more complicated: # The case x**y is constant one the line x = 0 and in y = 0; # the corresponding derivatives must be zero in these # cases. If the function is actually not defined (e.g. 0**-3), # then an exception will be raised when the nominal value is # calculated. These derivatives are transformed to NaN if an # error happens during their calculation: def pow_deriv_0(x, y): if y == 0: return 0. elif x != 0 or y % 1 == 0: return y*x**(y-1) else: return float('nan') def pow_deriv_1(x, y): if x == 0 and y > 0: return 0. else: return log(x)*x**y ops_with_reflection['pow'] = [pow_deriv_0, pow_deriv_1] ops_with_reflection['rpow'] = [lambda y, x: pow_deriv_1(x, y), lambda y, x: pow_deriv_0(x, y)] # Undefined derivatives are converted to NaN when the function # itself can be calculated: for op in ['pow']: ops_with_reflection[op] = [ nan_if_exception(func) for func in ops_with_reflection[op]] ops_with_reflection['r'+op] = [ nan_if_exception(func) for func in ops_with_reflection['r'+op]] return ops_with_reflection # Operators that have a reflection, along with their derivatives: ops_with_reflection = get_ops_with_reflection() # Some effectively modified operators (for the automated tests): modified_operators = [] modified_ops_with_reflection = [] # Custom versions of some operators (instead of extending some float # __*__ operators to AffineScalarFunc, the operators in custom_ops # are used): if sys.version_info < (3,): custom_ops = {} else: # !!! This code is not run by the tests. It would be nice to have # it be tested. def no_complex_result(func): ''' Return a function that does like func, but that raises a ValueError if the result is complex. ''' def no_complex_func(*args, **kwargs): ''' Like %s, but raises a ValueError exception if the result is complex. ''' % func.__name__ value = func(*args, **kwargs) if isinstance(value, complex): raise ValueError('The uncertainties module does not handle' ' complex results') else: return value return no_complex_func # This module does not handle uncertainties on complex numbers: # complex results for the nominal value of some operations cannot # be calculated with an uncertainty: custom_ops = { 'pow': no_complex_result(float.__pow__), 'rpow': no_complex_result(float.__rpow__) } def add_operators_to_AffineScalarFunc(): """ Adds many operators (__add__, etc.) to the AffineScalarFunc class. """ ######################################## #! Derivatives are set to return floats. For one thing, # uncertainties generally involve floats, as they are based on # small variations of the parameters. It is also better to # protect the user from unexpected integer result that behave # badly with the division. ## Operators that return a numerical value: def _simple_add_deriv(x): if x >= 0: return 1. else: return -1. # Single-argument operators that should be adapted from floats to # AffineScalarFunc objects, associated to their derivative: simple_numerical_operators_derivatives = { 'abs': _simple_add_deriv, 'neg': lambda x: -1., 'pos': lambda x: 1., 'trunc': lambda x: 0. } for (op, derivative) in ( iter(simple_numerical_operators_derivatives.items())): attribute_name = "__%s__" % op # float objects don't exactly have the same attributes between # different versions of Python (for instance, __trunc__ was # introduced with Python 2.6): try: setattr(AffineScalarFunc, attribute_name, wrap(getattr(float, attribute_name), [derivative])) except AttributeError: # Version of Python where floats don't have attribute_name: pass else: modified_operators.append(op) ######################################## # Final definition of the operators for AffineScalarFunc objects: # Reversed versions (useful for float*AffineScalarFunc, for instance): for (op, derivatives) in ops_with_reflection.items(): attribute_name = '__%s__' % op # float objects don't exactly have the same attributes between # different versions of Python (for instance, __div__ and # __rdiv__ were removed, in Python 3): # float objects don't exactly have the same attributes between # different versions of Python (for instance, __trunc__ was # introduced with Python 2.6): try: if op not in custom_ops: func_to_wrap = getattr(float, attribute_name) else: func_to_wrap = custom_ops[op] except AttributeError: # Version of Python with floats that don't have attribute_name: pass else: setattr(AffineScalarFunc, attribute_name, wrap(func_to_wrap, derivatives)) modified_ops_with_reflection.append(op) ######################################## # Conversions to pure numbers are meaningless. Note that the # behavior of float(1j) is similar. for coercion_type in ('complex', 'int', 'long', 'float'): def raise_error(self): raise TypeError("can't convert an affine function (%s)" ' to %s; use x.nominal_value' # In case AffineScalarFunc is sub-classed: % (self.__class__, coercion_type)) setattr(AffineScalarFunc, '__%s__' % coercion_type, raise_error) add_operators_to_AffineScalarFunc() # Actual addition of class attributes class NegativeStdDev(Exception): '''Raise for a negative standard deviation''' pass class Variable(AffineScalarFunc): """ Representation of a float-like scalar random variable, along with its uncertainty. Objects are meant to represent variables that are independent from each other (correlations are handled through the AffineScalarFunc class). """ # To save memory in large arrays: __slots__ = ('_std_dev', 'tag') def __init__(self, value, std_dev, tag=None): """ The nominal value and the standard deviation of the variable are set. The value is converted to float. The standard deviation std_dev can be NaN. It should normally be a float or an integer. 'tag' is a tag that the user can associate to the variable. This is useful for tracing variables. The meaning of the nominal value is described in the main module documentation. """ #! The value, std_dev, and tag are assumed by __copy__() not to # be copied. Either this should be guaranteed here, or __copy__ # should be updated. # Only float-like values are handled. One reason is that the # division operator on integers would not produce a # differentiable functions: for instance, Variable(3, 0.1)/2 # has a nominal value of 3/2 = 1, but a "shifted" value # of 3.1/2 = 1.55. value = float(value) # If the variable changes by dx, then the value of the affine # function that gives its value changes by 1*dx: # ! Memory cycles are created. However, they are garbage # collected, if possible. Using a weakref.WeakKeyDictionary # takes much more memory. Thus, this implementation chooses # more cycles and a smaller memory footprint instead of no # cycles and a larger memory footprint. super(Variable, self).__init__(value, LinearCombination({self: 1.})) self.std_dev = std_dev # Assignment through a Python property self.tag = tag @property def std_dev(self): return self._std_dev # Standard deviations can be modified (this is a feature). # AffineScalarFunc objects that depend on the Variable have their # std_dev automatically modified (recalculated with the new # std_dev of their Variables): @std_dev.setter def std_dev(self, std_dev): # We force the error to be float-like. Since it is considered # as a standard deviation, it must be either positive or NaN: # (Note: if NaN < 0 is False, there is no need to test # separately for NaN. But this is not guaranteed, even if it # should work on most platforms.) if std_dev < 0 and not isinfinite(std_dev): raise NegativeStdDev("The standard deviation cannot be negative") self._std_dev = CallableStdDev(std_dev) # Support for legacy method: def set_std_dev(self, value): # Obsolete deprecation('instead of set_std_dev(), please use' ' .std_dev = ...') self.std_dev = value # The following method is overridden so that we can represent the tag: def __repr__(self): num_repr = super(Variable, self).__repr__() if self.tag is None: return num_repr else: return "< %s = %s >" % (self.tag, num_repr) def __hash__(self): # All Variable objects are by definition independent # variables, so they never compare equal; therefore, their # id() are allowed to differ # (http://docs.python.org/reference/datamodel.html#object.__hash__): return id(self) def __copy__(self): """ Hook for the standard copy module. """ # !!!!!! The comment below might not be valid anymore now that # Variables do not contain derivatives anymore. # This copy implicitly takes care of the reference of the # variable to itself (in self.derivatives): the new Variable # object points to itself, not to the original Variable. # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html #! The following assumes that the arguments to Variable are # *not* copied upon construction, since __copy__ is not supposed # to copy "inside" information: return Variable(self.nominal_value, self.std_dev, self.tag) def __deepcopy__(self, memo): """ Hook for the standard copy module. A new variable is created. """ # This deep copy implicitly takes care of the reference of the # variable to itself (in self.derivatives): the new Variable # object points to itself, not to the original Variable. # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html return self.__copy__() ############################################################################### # Utilities def nominal_value(x): """ Return the nominal value of x if it is a quantity with uncertainty (i.e., an AffineScalarFunc object); otherwise, returns x unchanged. This utility function is useful for transforming a series of numbers, when only some of them generally carry an uncertainty. """ if isinstance(x, AffineScalarFunc): return x.nominal_value else: return x def std_dev(x): """ Return the standard deviation of x if it is a quantity with uncertainty (i.e., an AffineScalarFunc object); otherwise, returns the float 0. This utility function is useful for transforming a series of numbers, when only some of them generally carry an uncertainty. """ if isinstance(x, AffineScalarFunc): return x.std_dev else: return 0. def covariance_matrix(nums_with_uncert): """ Return a matrix that contains the covariances between the given sequence of numbers with uncertainties (AffineScalarFunc objects). The resulting matrix implicitly depends on their ordering in 'nums_with_uncert'. The covariances are floats (never int objects). The returned covariance matrix is the exact linear approximation result, if the nominal values of the numbers with uncertainties and of their variables are their mean. Otherwise, the returned covariance matrix should be close to its linear approximation value. The returned matrix is a list of lists. """ # See PSI.411 in EOL's notes. covariance_matrix = [] for (i1, expr1) in enumerate(nums_with_uncert, 1): derivatives1 = expr1.derivatives # Optimization vars1 = set(derivatives1) # !! Python 2.7+: viewkeys() would work coefs_expr1 = [] for expr2 in nums_with_uncert[:i1]: derivatives2 = expr2.derivatives # Optimization coefs_expr1.append(sum( ((derivatives1[var]*derivatives2[var]*var._std_dev**2) # var is a variable common to both numbers with # uncertainties: for var in vars1.intersection(derivatives2)), # The result is always a float (sum() with no terms # returns an integer): 0.)) covariance_matrix.append(coefs_expr1) # We symmetrize the matrix: for (i, covariance_coefs) in enumerate(covariance_matrix): covariance_coefs.extend([covariance_matrix[j][i] for j in range(i+1, len(covariance_matrix))]) return covariance_matrix try: import numpy except ImportError: pass else: def correlation_matrix(nums_with_uncert): ''' Return the correlation matrix of the given sequence of numbers with uncertainties, as a NumPy array of floats. ''' cov_mat = numpy.array(covariance_matrix(nums_with_uncert)) std_devs = numpy.sqrt(cov_mat.diagonal()) return cov_mat/std_devs/std_devs[numpy.newaxis].T __all__.append('correlation_matrix') ############################################################################### # Parsing of values with uncertainties: # Parsing of (part of) numbers. The reason why the decimal part is # parsed (if any), instead of using the parsing built in float(), is # that the presence (or not) of a decimal point does matter, in the # semantics of some representations (e.g. .1(2.) = .1+/-2, whereas # .1(2) = .1+/-0.2), so just getting the numerical value of the part # in parentheses would not be sufficient. POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE = r'((\d*)(\.\d*)?|nan|NAN|inf|INF)' # Regexp for a number with uncertainty (e.g., "-1.234(2)e-6"), where # the uncertainty is optional (in which case the uncertainty is # implicit). The uncertainty can also be nan or NAN: # # !! WARNING: in Python 2, the code relies on "… % " returning # a Unicode string (even if the template is not Unicode): NUMBER_WITH_UNCERT_RE_STR = u''' ([+-])? # Sign %s # Main number (?:\\(%s\\))? # Optional uncertainty (?: (?:[eE]|\\s*×\\s*10) (.*) )? # Optional exponent ''' % (POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE, POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE) NUMBER_WITH_UNCERT_RE_MATCH = re.compile( u"%s$" % NUMBER_WITH_UNCERT_RE_STR, re.VERBOSE).match # Number with uncertainty with a factored exponent (e.g., of the form # (... +/- ...)e10): this is a loose matching, so as to accommodate # for multiple formats: NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH = re.compile(u''' \\( (?P.*) \\) (?:[eE]|\\s*×\\s*10) (?P.*) $''', re.VERBOSE).match class NotParenUncert(ValueError): ''' Raised when a string representing an exact number or a number with an uncertainty indicated between parentheses was expected but not found. ''' def parse_error_in_parentheses(representation): # !!!! The code seems to handle superscript exponents, but the # docstring doesn't reflect this!? """ Return (value, error) from a string representing a number with uncertainty like 12.34(5), 12.34(142), 12.5(3.4), 12.3(4.2)e3, or 13.4(nan)e10. If no parenthesis is given, an uncertainty of one on the last digit is assumed. The digits between parentheses correspond to the same number of digits at the end of the nominal value (the decimal point in the uncertainty is optional). Example: 12.34(142) = 12.34±1.42. Raises ValueError if the string cannot be parsed. """ match = NUMBER_WITH_UNCERT_RE_MATCH(representation) if match: # The 'main' part is the nominal value, with 'int'eger part, and # 'dec'imal part. The 'uncert'ainty is similarly broken into its # integer and decimal parts. (sign, main, _, main_dec, uncert, uncert_int, uncert_dec, exponent) = match.groups() else: raise NotParenUncert("Unparsable number representation: '%s'." " See the documentation of ufloat_fromstr()." % representation) # Global exponent: if exponent: factor = 10.**nrmlze_superscript(exponent) else: factor = 1 # Nominal value: value = float((sign or '')+main)*factor if uncert is None: # No uncertainty was found: an uncertainty of 1 on the last # digit is assumed: uncert_int = '1' # The other parts of the uncertainty are None # Do we have a fully explicit uncertainty? if uncert_dec is not None or uncert in {'nan', 'NAN', 'inf', 'INF'}: uncert_value = float(uncert) else: # uncert_int represents an uncertainty on the last digits: # The number of digits after the period defines the power of # 10 that must be applied to the provided uncertainty: if main_dec is None: num_digits_after_period = 0 else: num_digits_after_period = len(main_dec)-1 uncert_value = int(uncert_int)/10.**num_digits_after_period # We apply the exponent to the uncertainty as well: uncert_value *= factor return (value, uncert_value) # Regexp for catching the two variable parts of -1.2×10⁻¹²: PRETTY_PRINT_MATCH = re.compile(u'(.*?)\\s*×\\s*10(.*)').match def to_float(value_str): ''' Converts a string representing a float to a float. The usual valid Python float() representations are correctly parsed. In addition, the pretty-print notation -1.2×10⁻¹² is also converted. ValueError is raised if no float can be obtained. ''' try: return float(value_str) except ValueError: pass # The pretty-print notation is tried: match = PRETTY_PRINT_MATCH(value_str) if match: try: return float(match.group(1))*10.**nrmlze_superscript(match.group(2)) except ValueError: raise ValueError('Mantissa or exponent incorrect in pretty-print' ' form %s' % value_str) else: raise ValueError('No valid Python float or pretty-print form' ' recognized in %s' % value_str) cannot_parse_ufloat_msg_pat = ( 'Cannot parse %s: see the documentation for ufloat_fromstr() for a' ' list of accepted formats') # The following function is not exposed because it can in effect be # obtained by doing x = ufloat_fromstr(representation) and reading # x.nominal_value and x.std_dev: def str_to_number_with_uncert(representation): """ Given a string that represents a number with uncertainty, returns the nominal value and the uncertainty. See the documentation for ufloat_fromstr() for a list of accepted formats. When no numerical error is given, an uncertainty of 1 on the last digit is implied. Raises ValueError if the string cannot be parsed. representation -- string with no leading or trailing spaces. """ # The "p" format can add parentheses around the whole printed result: we # remove them: if representation.startswith('(') and representation.endswith(')'): representation = representation[1:-1] match = NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH(representation) # The representation is simplified, but the global factor is # calculated: if match: # We have a form with a factored exponent: (1.23 +/- 0.01)e10, # etc. exp_value_str = match.group('exp_value') try: exponent = nrmlze_superscript(exp_value_str) except ValueError: raise ValueError(cannot_parse_ufloat_msg_pat % representation) factor = 10.**exponent representation = match.group('simple_num_with_uncert') else: factor = 1 # No global exponential factor match = re.match(u'(.*)(?:\\+/-|±)(.*)', representation) if match: (nom_value, uncert) = match.groups() try: # Simple form 1234.45+/-1.2 or 1234.45±1.2, or 1.23e-10+/-1e-23 # or -1.2×10⁻¹²±1e23: parsed_value = (to_float(nom_value)*factor, to_float(uncert)*factor) except ValueError: raise ValueError(cannot_parse_ufloat_msg_pat % representation) else: # Form with error parentheses or no uncertainty: try: parsed_value = parse_error_in_parentheses(representation) except NotParenUncert: raise ValueError(cannot_parse_ufloat_msg_pat % representation) return parsed_value def ufloat_fromstr(representation, tag=None): """ Return a new random variable (Variable object) from a string. Strings 'representation' of the form '12.345+/-0.015', '12.345(15)', '12.3' or u'1.2±0.1' (Unicode string) are recognized (see more complete list below). In the last case, an uncertainty of +/-1 is assigned to the last digit. Invalid representations raise a ValueError. This function tries to parse back most of the formats that are made available by this module. Examples of valid string representations: 12.3e10+/-5e3 (-3.1415 +/- 0.0001)e+02 # Factored exponent # Pretty-print notation (only with a unicode string): 12.3e10 ± 5e3 # ± symbol (12.3 ± 5.0) × 10⁻¹² # Times symbol, superscript 12.3 ± 5e3 # Mixed notation (± symbol, but e exponent) # Double-exponent values: (-3.1415 +/- 1e-4)e+200 (1e-20 +/- 3)e100 0.29 31. -31. 31 -3.1e10 -1.23(3.4) -1.34(5) 1(6) 3(4.2) -9(2) 1234567(1.2) 12.345(15) -12.3456(78)e-6 12.3(0.4)e-5 169.0(7) 169.1(15) .123(4) .1(.4) # NaN uncertainties: 12.3(nan) 12.3(NAN) 3±nan Surrounding spaces are ignored. About the "shorthand" notation: 1.23(3) = 1.23 ± 0.03 but 1.23(3.) = 1.23 ± 3.00. Thus, the presence of a decimal point in the uncertainty signals an absolute uncertainty (instead of an uncertainty on the last digits of the nominal value). """ (nominal_value, std_dev) = str_to_number_with_uncert( representation.strip()) return ufloat(nominal_value, std_dev, tag) def ufloat_obsolete(representation, tag=None): ''' Legacy version of ufloat(). Will eventually be removed. representation -- either a (nominal_value, std_dev) tuple, or a string representation of a number with uncertainty, in a format recognized by ufloat_fromstr(). ''' if isinstance(representation, tuple): return ufloat(representation[0], representation[1], tag) else: return ufloat_fromstr(representation, tag) # The arguments are named for the new version, instead of bearing # names that are closer to their obsolete use (e.g., std_dev could be # instead std_dev_or_tag, since it can be the tag, in the obsolete # ufloat((3, 0.14), "pi") form). This has the advantage of allowing # new code to use keyword arguments as in ufloat(nominal_value=3, # std_dev=0.14), without breaking when the obsolete form is not # supported anymore. def ufloat(nominal_value, std_dev=None, tag=None): """ Return a new random variable (Variable object). The only non-obsolete use is: - ufloat(nominal_value, std_dev), - ufloat(nominal_value, std_dev, tag=...). Other input parameters are temporarily supported: - ufloat((nominal_value, std_dev)), - ufloat((nominal_value, std_dev), tag), - ufloat(str_representation), - ufloat(str_representation, tag). Valid string representations str_representation are listed in the documentation for ufloat_fromstr(). nominal_value -- nominal value of the random variable. It is more meaningful to use a value close to the central value or to the mean. This value is propagated by mathematical operations as if it was a float. std_dev -- standard deviation of the random variable. The standard deviation must be convertible to a positive float, or be NaN. tag -- optional string tag for the variable. Variables don't have to have distinct tags. Tags are useful for tracing what values (and errors) enter in a given result (through the error_components() method). """ try: # Standard case: return Variable(nominal_value, std_dev, tag=tag) # Exception types raised by, respectively: tuple or string that # can be converted through float() (case of a number with no # uncertainty), and string that cannot be converted through # float(): except (TypeError, ValueError): if tag is not None: tag_arg = tag # tag keyword used: else: tag_arg = std_dev # 2 positional arguments form try: final_ufloat = ufloat_obsolete(nominal_value, tag_arg) except: # The input is incorrect, not obsolete raise else: # Obsolete, two-argument call: deprecation( 'either use ufloat(nominal_value, std_dev),' ' ufloat(nominal_value, std_dev, tag), or the' ' ufloat_fromstr() function, for string representations.') return final_ufloat uncertainties-3.1.7/uncertainties/lib1to2/000077500000000000000000000000001425362552000205345ustar00rootroot00000000000000uncertainties-3.1.7/uncertainties/lib1to2/__init__.py000066400000000000000000000000001425362552000226330ustar00rootroot00000000000000uncertainties-3.1.7/uncertainties/lib1to2/fixes/000077500000000000000000000000001425362552000216525ustar00rootroot00000000000000uncertainties-3.1.7/uncertainties/lib1to2/fixes/__init__.py000066400000000000000000000000001425362552000237510ustar00rootroot00000000000000uncertainties-3.1.7/uncertainties/lib1to2/fixes/fix_std_dev.py000066400000000000000000000020151425362552000245200ustar00rootroot00000000000000''' Fixer for lib2to3. Transforms .std_dev() calls into .std_dev attribute access. (c) 2013 by Eric O. LEBIGOT. ''' from lib2to3.fixer_base import BaseFix from lib2to3.fixer_util import Name, Assign class FixStdDev(BaseFix): PATTERN = """ power< any* trailer< '.' 'std_dev' > trailer< '(' ')' > > | power< any* trailer< '.' 'set_std_dev' > trailer< '(' set_arg=any ')' > > """ def transform(self, node, results): if 'set_arg' in results: # Case of .set_std_dev() # set_std_dev => std_dev attribute = node.children[-2] # .set_std_dev attribute.children[1].replace(Name('std_dev')) # Call "(arg)": removed node.children[-1].remove() # Replacement by an assignment: node.replace(Assign(node.clone(), results['set_arg'].clone())) else: # '.std_dev' is followed by a call with no argument: the call # is removed: node.children[-1].remove() uncertainties-3.1.7/uncertainties/lib1to2/fixes/fix_std_devs.py000066400000000000000000000007521425362552000247110ustar00rootroot00000000000000''' Fixer for lib2to3. Transform .std_devs() calls into .std_devs attribute access. (c) 2016 by Eric O. LEBIGOT. ''' from lib2to3.fixer_base import BaseFix from lib2to3.fixer_util import Name, Assign class FixStdDevs(BaseFix): PATTERN = """ power< any* trailer< '.' 'std_devs' > trailer< '(' ')' > > """ def transform(self, node, results): # '.std_dev' is followed by a call with no argument: the call # is removed: node.children[-1].remove() uncertainties-3.1.7/uncertainties/lib1to2/fixes/fix_uarray_umatrix.py000066400000000000000000000047741425362552000261620ustar00rootroot00000000000000''' Fixer for lib2to3. Transforms uarray(tuple) into uarray(nominal_values, std_devs) and uarray(single_arg) into uarray(*single_arg). (c) 2013 by Eric O. LEBIGOT (EOL). ''' from lib2to3.fixer_base import BaseFix from lib2to3.fixer_util import String, ArgList, Comma, syms ############################################################################### # lib2to3 grammar parts. #! Warning: indentation is meaningful! # (tuple): tuple_call = """ trailer< '(' atom< '(' testlist_gexp< arg0=any ',' arg1=any > ')' > ')' >""" ############################################################################### class FixUarrayUmatrix(BaseFix): # Non dotted access, then dotted access. # Tuple call, then single-argument call PATTERN = """ power< 'uarray' {tuple_call} any* > | power< object=NAME trailer< '.' 'uarray' > {tuple_call} any* > | power< 'uarray' trailer< '(' args=any ')' > any* > | power< object=NAME trailer< '.' 'uarray' > trailer< '(' args=any ')' > any* > """.format(tuple_call=tuple_call) # Same pattern, for umatrix(): PATTERN = '{}|{}'.format(PATTERN, PATTERN.replace('uarray', 'umatrix')) def transform(self, node, results): if 'object' in results: # If dotted access: unc.uarray() args = node.children[2] else: args = node.children[1] if 'args' in results: # Non-tuple argument # A star will be inserted in from of the single argument: # ! The following keeps spaces in front of the argument, # if any (but this is safer than adding forcefully a star # in front of the value of the argument: the argument can # be a name (where it works), but also anything else, # including a lib2to3.pytree.Node that has no value.) This # is OK, as the syntax f(* (2, 1)) is valid. args_node = results['args'] # We must make sure that there is a single argument: if args_node.type == syms.arglist: return # Nothing modified # Single argument (in position 1): new_args = [String('*'), args.children[1].clone()] else: # Tuple argument # New arguments: new_args = [results['arg0'].clone(), Comma(), results['arg1'].clone()] # Argument list update: args.replace(ArgList(new_args)) uncertainties-3.1.7/uncertainties/lib1to2/fixes/fix_ufloat.py000066400000000000000000000057411425362552000243730ustar00rootroot00000000000000''' Fixer for lib2to3. Transforms ufloat(tuple,...) and ufloat(string,...) into ufloat(nominal_value, std_dev,...) and ufloat_fromstr (c) 2013 by Eric O. LEBIGOT. ''' from lib2to3.fixer_base import BaseFix from lib2to3.fixer_util import ArgList, Call, Comma, Name, syms ############################################################################### # lib2to3 grammar parts. #! Warning: indentation is meaningful! # (tuple): tuple_call = """ trailer< '(' atom< '(' testlist_gexp< arg0=any ',' arg1=any > ')' > ')' >""" # (tuple, any): tuple_any_call = """ trailer< '(' arglist< atom< '(' testlist_gexp< arg0=any ',' arg1=any > ')' > ',' tag=any > ')' >""" ############################################################################### class FixUfloat(BaseFix): # Non dotted access, then dotted access. # Tuple call, then string call. # No-tag call, then tag call. PATTERN = """ power< 'ufloat' {tuple_call} any* > | power< 'ufloat' {tuple_any_call} any* > | power< 'ufloat' trailer< '(' string=STRING ')' > any* > | power< 'ufloat' trailer< '(' arglist< string=STRING ',' tag=any > ')' > any* > | power< object=NAME trailer< '.' 'ufloat' > {tuple_call} any* > | power< object=NAME trailer< '.' 'ufloat' > {tuple_any_call} any* > | power< object=NAME trailer< '.' 'ufloat' > trailer< '(' string=STRING ')' > any* > | power< object=NAME trailer< '.' 'ufloat' > trailer< '(' arglist< string=STRING ',' tag=any > ')' > any* > """.format(tuple_call=tuple_call, tuple_any_call=tuple_any_call) def transform(self, node, results): # Handling of the first argument: if 'string' in results: # String as first argument new_func_name = 'ufloat_fromstr' # New arguments: new_args=[results['string'].clone()] else: # Tuple as first argument new_func_name = 'ufloat' # New arguments: new_args = [results['arg0'].clone(), Comma(), results['arg1'].clone()] # Handling of the second argument (call with a tag): if 'tag' in results: new_args.extend([Comma(), results['tag'].clone()]) if 'object' in results: # If dotted access: unc.ufloat() func_name = node.children[1].children[1] args = node.children[2] else: func_name = node.children[0] args = node.children[1] # Function name update: func_name.value = new_func_name #! func_name.changed() # Necessary when only .value is changed # Argument list update: args.replace(ArgList(new_args)) uncertainties-3.1.7/uncertainties/lib1to2/test_1to2.py000066400000000000000000000156221425362552000227400ustar00rootroot00000000000000#!/usr/bin/env python ''' Unit tests for the uncertainties.lib1to2 code update package. Meant to be run through nosetests. (c) 2013-2020 by Eric O. LEBIGOT (EOL). ''' # Code inspired by: # # - lib2to3.tests.test_fixers.py from builtins import str import sys import os # !! Would it be possible to use an import hook so as to stop the # import if the Python version is not high enough, instead of having # like here a whole indented block? if sys.version_info < (2, 7) or "TRAVIS" in os.environ or "APPVEYOR" in os.environ: # This package uses lib2to3, which requires Python 2.6+. # lib2to3.tests.support is missing from 2.7.3 Travis Python packages. # !! Nosetests for Python 2.6 also fails (it looks like it tries # to run tests via lib2to3/tests/test_refactor.py): pass else: import os try: # lib2to3 test support seems to have moved to a new place in 2013: import test.test_lib2to3.support as support except ImportError: # Pre-~2013 path for lib2to3 test support import lib2to3.tests.support as support # The lib1to2.fixes package given to lib2to3 is the *local* package # (not to another installed module). This is important for the # __import__() used via support.get_refactorer(). sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) def check_refactor(refactorer, source, expected): """ Raises an AssertionError if the given lib2to3.refactor.RefactoringTool does not refactor 'source' into 'expected'. source, expected -- strings (typically with Python code). """ # !! str() is from future's builtins and is only needed for Python 2, # where it is mostly equivalent to unicode(): new = str( refactorer.refactor_string(support.reformat(source), '')) assert support.reformat(expected) == new, ( "Refactoring failed: '{}' => '{}' instead of '{}'".format( source, new.strip(), expected)) # print 'Checked:', source, '=>', expected def check_all(fixer, tests): ''' Takes a fixer name (module from fixes) and a mapping that maps code using the obsolete syntax into updated code, and checks whether the code is correctly updated. ''' refactorer = support.get_refactorer( fixer_pkg='lib1to2', fixers=[fixer]) for (input_str, out_str) in tests.items(): check_refactor(refactorer, input_str, out_str) def test_fix_std_dev(): 'Tests the transformation of std_dev() into std_dev.' tests = { 'x.std_dev()': 'x.std_dev', 'y.std_dev(); unc.std_dev(z)': 'y.std_dev; unc.std_dev(z)', 'uncertainties.std_dev(x)': 'uncertainties.std_dev(x)', 'std_dev(x)': 'std_dev(x)', 'obj.x.std_dev()': 'obj.x.std_dev', """ long_name.std_dev( # No argument! )""": """ long_name.std_dev""", # set_std_dev => .std_dev: 'x.set_std_dev(3)': 'x.std_dev = 3', 'y = set_std_dev(3)': 'y = set_std_dev(3)', # None 'func = x.set_std_dev': 'func = x.set_std_dev', 'obj.x.set_std_dev(sin(y))': 'obj.x.std_dev = sin(y)' } check_all('std_dev', tests) def test_ufloat(): ''' Test of the transformation of ufloat(tuple,...) and ufloat(string,...) into ufloat(nominal_value, std_dev, tag=...). ''' tests = { # Tuples: 'ufloat((3, 0.14))': 'ufloat(3, 0.14)', 'ufloat((3, 0.14), "pi")': 'ufloat(3, 0.14, "pi")', "ufloat((3, 0.14), 'pi')": "ufloat(3, 0.14, 'pi')", "x = ufloat((3, 0.14), tag='pi')": "x = ufloat(3, 0.14, tag='pi')", # Simple expressions that can be transformed: 'ufloat((n, s), tag="var")': 'ufloat(n, s, tag="var")', # Simple expressions that cannot be transformed automatically: 'ufloat(str_repr, tag="var")': 'ufloat(str_repr, tag="var")', 'ufloat(*tuple_repr, tag="var")': 'ufloat(*tuple_repr, tag="var")', 'ufloat(*t[0, 0])': 'ufloat(*t[0, 0])', # Strings: 'ufloat("-1.23(3.4)")': 'ufloat_fromstr("-1.23(3.4)")', "ufloat('-1.23(3.4)')": "ufloat_fromstr('-1.23(3.4)')", 'ufloat("-1.23(3.4)", "var")': 'ufloat_fromstr("-1.23(3.4)", "var")', 'ufloat("-1.23(3.4)", tag="var")': 'ufloat_fromstr("-1.23(3.4)", tag="var")' } # Automatic addition of a dotted access: tests.update(dict( # !! Dictionary comprehension usable with Python 2.7+ (orig.replace('ufloat', 'unc.ufloat'), new.replace('ufloat', 'unc.ufloat')) for (orig, new) in tests.items())) # Test for space consistency: tests[' t = u.ufloat("3")'] = ' t = u.ufloat_fromstr("3")' # Exponentiation test: tests.update(dict( # !! Dictionary comprehension usable with Python 2.7+ (orig+'**2', new+'**2') for (orig, new) in tests.items())) # Exponent test: tests['2**ufloat("3")'] = '2**ufloat_fromstr("3")' # Opposite test: tests['-ufloat("3")'] = '-ufloat_fromstr("3")' check_all('ufloat', tests) def test_uarray_umatrix(): ''' Test of the transformation of uarray(tuple,...) into uarray(nominal_values, std_devs). Also performs the same tests on umatrix(). ''' tests = { 'uarray((arange(3), std_devs))': 'uarray(arange(3), std_devs)', 'uarray(tuple_arg)': 'uarray(*tuple_arg)', # Unmodified, correct code: 'uarray(values, std_devs)': 'uarray(values, std_devs)', # Spaces tests: 'uarray( ( arange(3), std_devs ) ) ': 'uarray( arange(3), std_devs) ', 'uarray( tuple_arg )': 'uarray(* tuple_arg)' } # Automatic addition of a dotted access: tests.update(dict( # !! Dictionary comprehension usable with Python 2.7+ (orig.replace('uarray', 'un.uarray'), new.replace('uarray', 'un.uarray')) for (orig, new) in tests.items())) # Exponentiation test: tests.update(dict( # !! Dictionary comprehension usable with Python 2.7+ (orig+'**2', new+'**2') for (orig, new) in tests.items())) # Test for space consistency: tests[' t = u.uarray(args)'] = ' t = u.uarray(*args)' # Same tests, but for umatrix: tests.update(dict( (orig.replace('uarray', 'umatrix'), new.replace('uarray', 'umatrix')) for (orig, new) in tests.items())) check_all('uarray_umatrix', tests) uncertainties-3.1.7/uncertainties/test_umath.py000066400000000000000000000271041425362552000220130ustar00rootroot00000000000000""" Tests of the code in uncertainties.umath. These tests can be run through the Nose testing framework. (c) 2010-2016 by Eric O. LEBIGOT (EOL). """ from __future__ import division from __future__ import absolute_import # Standard modules import sys import math # Local modules: from uncertainties import ufloat import uncertainties.core as uncert_core import uncertainties.umath_core as umath_core from . import test_uncertainties ############################################################################### # Unit tests def test_fixed_derivatives_math_funcs(): """ Comparison between function derivatives and numerical derivatives. This comparison is useful for derivatives that are analytical. """ for name in umath_core.many_scalars_to_scalar_funcs: # print "Checking %s..." % name func = getattr(umath_core, name) # Numerical derivatives of func: the nominal value of func() results # is used as the underlying function: numerical_derivatives = uncert_core.NumericalDerivatives( lambda *args: func(*args)) test_uncertainties.compare_derivatives(func, numerical_derivatives) # Functions that are not in umath_core.many_scalars_to_scalar_funcs: ## # modf(): returns a tuple: def frac_part_modf(x): return umath_core.modf(x)[0] def int_part_modf(x): return umath_core.modf(x)[1] test_uncertainties.compare_derivatives( frac_part_modf, uncert_core.NumericalDerivatives( lambda x: frac_part_modf(x))) test_uncertainties.compare_derivatives( int_part_modf, uncert_core.NumericalDerivatives( lambda x: int_part_modf(x))) ## # frexp(): returns a tuple: def mantissa_frexp(x): return umath_core.frexp(x)[0] def exponent_frexp(x): return umath_core.frexp(x)[1] test_uncertainties.compare_derivatives( mantissa_frexp, uncert_core.NumericalDerivatives( lambda x: mantissa_frexp(x))) test_uncertainties.compare_derivatives( exponent_frexp, uncert_core.NumericalDerivatives( lambda x: exponent_frexp(x))) def test_compound_expression(): """ Test equality between different formulas. """ x = ufloat(3, 0.1) # Prone to numerical errors (but not much more than floats): assert umath_core.tan(x) == umath_core.sin(x)/umath_core.cos(x) def test_numerical_example(): "Test specific numerical examples" x = ufloat(3.14, 0.01) result = umath_core.sin(x) # In order to prevent big errors such as a wrong, constant value # for all analytical and numerical derivatives, which would make # test_fixed_derivatives_math_funcs() succeed despite incorrect # calculations: assert ("%.6f +/- %.6f" % (result.nominal_value, result.std_dev) == "0.001593 +/- 0.010000") # Regular calculations should still work: assert("%.11f" % umath_core.sin(3) == "0.14112000806") def test_monte_carlo_comparison(): """ Full comparison to a Monte-Carlo calculation. Both the nominal values and the covariances are compared between the direct calculation performed in this module and a Monte-Carlo simulation. """ try: import numpy import numpy.random except ImportError: import warnings warnings.warn("Test not performed because NumPy is not available") return # Works on numpy.arrays of Variable objects (whereas umath_core.sin() # does not): sin_uarray_uncert = numpy.vectorize(umath_core.sin, otypes=[object]) # Example expression (with correlations, and multiple variables combined # in a non-linear way): def function(x, y): """ Function that takes two NumPy arrays of the same size. """ # The uncertainty due to x is about equal to the uncertainty # due to y: return 10 * x**2 - x * sin_uarray_uncert(y**3) x = ufloat(0.2, 0.01) y = ufloat(10, 0.001) function_result_this_module = function(x, y) nominal_value_this_module = function_result_this_module.nominal_value # Covariances "f*f", "f*x", "f*y": covariances_this_module = numpy.array(uncert_core.covariance_matrix( (x, y, function_result_this_module))) def monte_carlo_calc(n_samples): """ Calculate function(x, y) on n_samples samples and returns the median, and the covariances between (x, y, function(x, y)). """ # Result of a Monte-Carlo simulation: x_samples = numpy.random.normal(x.nominal_value, x.std_dev, n_samples) y_samples = numpy.random.normal(y.nominal_value, y.std_dev, n_samples) # !! astype() is a fix for median() in NumPy 1.8.0: function_samples = function(x_samples, y_samples).astype(float) cov_mat = numpy.cov([x_samples, y_samples], function_samples) return (numpy.median(function_samples), cov_mat) (nominal_value_samples, covariances_samples) = monte_carlo_calc(1000000) ## Comparison between both results: # The covariance matrices must be close: # We rely on the fact that covariances_samples very rarely has # null elements: # !!! The test could be done directly with NumPy's comparison # tools, no? See assert_allclose, assert_array_almost_equal_nulp # or assert_array_max_ulp. This is relevant for all vectorized # occurrences of numbers_close. assert numpy.vectorize(test_uncertainties.numbers_close)( covariances_this_module, covariances_samples, 0.06).all(), ( "The covariance matrices do not coincide between" " the Monte-Carlo simulation and the direct calculation:\n" "* Monte-Carlo:\n%s\n* Direct calculation:\n%s" % (covariances_samples, covariances_this_module) ) # The nominal values must be close: assert test_uncertainties.numbers_close( nominal_value_this_module, nominal_value_samples, # The scale of the comparison depends on the standard # deviation: the nominal values can differ by a fraction of # the standard deviation: math.sqrt(covariances_samples[2, 2]) / abs(nominal_value_samples) * 0.5), ( "The nominal value (%f) does not coincide with that of" " the Monte-Carlo simulation (%f), for a standard deviation of %f." % (nominal_value_this_module, nominal_value_samples, math.sqrt(covariances_samples[2, 2])) ) def test_math_module(): "Operations with the math module" x = ufloat(-1.5, 0.1) # The exponent must not be differentiated, when calculating the # following (the partial derivative with respect to the exponent # is not defined): assert (x**2).nominal_value == 2.25 # Regular operations are chosen to be unchanged: assert isinstance(umath_core.sin(3), float) # factorial() must not be "damaged" by the umath_core module, so as # to help make it a drop-in replacement for math (even though # factorial() does not work on numbers with uncertainties # because it is restricted to integers, as for # math.factorial()): assert umath_core.factorial(4) == 24 # fsum is special because it does not take a fixed number of # variables: assert umath_core.fsum([x, x]).nominal_value == -3 # Functions that give locally constant results are tested: they # should give the same result as their float equivalent: for name in umath_core.locally_cst_funcs: try: func = getattr(umath_core, name) except AttributeError: continue # Not in the math module, so not in umath_core either assert func(x) == func(x.nominal_value) # The type should be left untouched. For example, isnan() # should always give a boolean: assert type(func(x)) == type(func(x.nominal_value)) # The same exceptions should be generated when numbers with uncertainties # are used: # The type of the expected exception is first determined, because # it varies between versions of Python (OverflowError in Python # 2.6+, ValueError in Python 2.5,...): try: math.log(0) except Exception as err_math: # Python 3 does not make exceptions local variables: they are # restricted to their except block: err_math_args = err_math.args exception_class = err_math.__class__ try: umath_core.log(0) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: raise Exception('%s exception expected' % exception_class.__name__) try: umath_core.log(ufloat(0, 0)) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: raise Exception('%s exception expected' % exception_class.__name__) try: umath_core.log(ufloat(0, 1)) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: raise Exception('%s exception expected' % exception_class.__name__) def test_hypot(): ''' Special cases where derivatives cannot be calculated: ''' x = ufloat(0, 1) y = ufloat(0, 2) # Derivatives that cannot be calculated simply return NaN, with no # exception being raised, normally: result = umath_core.hypot(x, y) assert test_uncertainties.isnan(result.derivatives[x]) assert test_uncertainties.isnan(result.derivatives[y]) def test_power_all_cases(): ''' Test special cases of umath_core.pow(). ''' test_uncertainties.power_all_cases(umath_core.pow) # test_power_special_cases() is similar to # test_uncertainties.py:test_power_special_cases(), but with small # differences: the built-in pow() and math.pow() are slightly # different: def test_power_special_cases(): ''' Checks special cases of umath_core.pow(). ''' test_uncertainties.power_special_cases(umath_core.pow) # We want the same behavior for numbers with uncertainties and for # math.pow() at their nominal values. positive = ufloat(0.3, 0.01) negative = ufloat(-0.3, 0.01) # The type of the expected exception is first determined, because # it varies between versions of Python (OverflowError in Python # 2.6+, ValueError in Python 2.5,...): try: math.pow(0, negative.nominal_value) except Exception as err_math: # Python 3 does not make exceptions local variables: they are # restricted to their except block: err_math_args = err_math.args exception_class = err_math.__class__ # http://stackoverflow.com/questions/10282674/difference-between-the-built-in-pow-and-math-pow-for-floats-in-python try: umath_core.pow(ufloat(0, 0.1), negative) except exception_class as err: # "as err", for Python 2.6+ pass else: raise Exception('%s exception expected' % exception_class.__name__) try: result = umath_core.pow(negative, positive) except exception_class: # Assumed: same exception as for pow(0, negative) # The reason why it should also fail in Python 3 is that the # result of Python 3 is a complex number, which uncertainties # does not handle (no uncertainties on complex numbers). In # Python 2, this should always fail, since Python 2 does not # know how to calculate it. pass else: raise Exception('%s exception expected' % exception_class.__name__) def test_power_wrt_ref(): ''' Checks special cases of the umath_core.pow() power operator. ''' test_uncertainties.power_wrt_ref(umath_core.pow, math.pow) uncertainties-3.1.7/uncertainties/test_uncertainties.py000066400000000000000000002457531425362552000235660ustar00rootroot00000000000000# coding=utf-8 """ Tests of the code in uncertainties/__init__.py. These tests can be run through the Nose testing framework. (c) 2010-2016 by Eric O. LEBIGOT (EOL). """ from __future__ import division from __future__ import print_function # Standard modules from builtins import str from builtins import zip from builtins import map from builtins import range import copy import weakref import math from math import isnan, isinf import random import sys # 3rd-party modules # import nose.tools # Local modules import uncertainties.core as uncert_core from uncertainties.core import ufloat, AffineScalarFunc, ufloat_fromstr from uncertainties import umath # The following information is useful for making sure that the right # version of Python is running the tests (for instance with the Travis # Continuous Integration system): print("Testing with Python", sys.version) ############################################################################### # Utilities for unit testing def numbers_close(x, y, tolerance=1e-6): """ Returns True if the given floats are close enough. The given tolerance is the relative difference allowed, or the absolute difference, if one of the numbers is 0. NaN is allowed: it is considered close to itself. """ # !!! Python 3.5+ has math.isclose(): maybe it could be used here. # Instead of using a try and ZeroDivisionError, we do a test, # NaN could appear silently: if x != 0 and y != 0: if isinf(x): return isinf(y) elif isnan(x): return isnan(y) else: # Symmetric form of the test: return 2*abs(x-y)/(abs(x)+abs(y)) < tolerance else: # Either x or y is zero return abs(x or y) < tolerance def ufloats_close(x, y, tolerance=1e-6): ''' Tests if two numbers with uncertainties are close, as random variables: this is stronger than testing whether their nominal value and standard deviation are close. The tolerance is applied to both the nominal value and the standard deviation of the difference between the numbers. ''' diff = x-y return (numbers_close(diff.nominal_value, 0, tolerance) and numbers_close(diff.std_dev, 0, tolerance)) class DerivativesDiffer(Exception): pass def compare_derivatives(func, numerical_derivatives, num_args_list=None): """ Checks the derivatives of a function 'func' (as returned by the wrap() wrapper), by comparing them to the 'numerical_derivatives' functions. Raises a DerivativesDiffer exception in case of problem. These functions all take the number of arguments listed in num_args_list. If num_args is None, it is automatically obtained. Tests are done on random arguments. """ try: funcname = func.name except AttributeError: funcname = func.__name__ # print "Testing", func.__name__ if not num_args_list: # Detecting automatically the correct number of arguments is not # always easy (because not all values are allowed, etc.): num_args_table = { 'atanh': [1], 'log': [1, 2] # Both numbers of arguments are tested } if funcname in num_args_table: num_args_list = num_args_table[funcname] else: num_args_list = [] # We loop until we find reasonable function arguments: # We get the number of arguments by trial and error: for num_args in range(10): try: #! Giving integer arguments is good for preventing # certain functions from failing even though num_args # is their correct number of arguments # (e.g. math.ldexp(x, i), where i must be an integer) func(*(1,)*num_args) except TypeError: pass # Not the right number of arguments else: # No error # num_args is a good number of arguments for func: num_args_list.append(num_args) if not num_args_list: raise Exception("Can't find a reasonable number of arguments" " for function '%s'." % funcname) for num_args in num_args_list: # Argument numbers that will have a random integer value: integer_arg_nums = set() if funcname == 'ldexp': # The second argument must be an integer: integer_arg_nums.add(1) while True: try: # We include negative numbers, for more thorough tests: args = [] for arg_num in range(num_args): if arg_num in integer_arg_nums: args.append(random.choice(range(-10, 10))) else: args.append( uncert_core.Variable(random.random()*4-2, 0)) # 'args', but as scalar values: args_scalar = [uncert_core.nominal_value(v) for v in args] func_approx = func(*args) # Some functions yield simple Python constants, after # wrapping in wrap(): no test has to be performed. # Some functions also yield tuples... if isinstance(func_approx, AffineScalarFunc): # We compare all derivatives: for (arg_num, (arg, numerical_deriv)) in ( enumerate(zip(args, numerical_derivatives))): # Some arguments might not be differentiable: if isinstance(arg, int): continue fixed_deriv_value = func_approx.derivatives[arg] num_deriv_value = numerical_deriv(*args_scalar) # This message is useful: the user can see that # tests are really performed (instead of not being # performed, silently): print("Testing derivative #%d of %s at %s" % ( arg_num, funcname, args_scalar)) if not numbers_close(fixed_deriv_value, num_deriv_value, 1e-4): # It is possible that the result is NaN: if not isnan(func_approx): raise DerivativesDiffer( "Derivative #%d of function '%s' may be" " wrong: at args = %s," " value obtained = %.16f," " while numerical approximation = %.16f." % (arg_num, funcname, args, fixed_deriv_value, num_deriv_value)) except ValueError as err: # Arguments out of range, or of wrong type # Factorial(real) lands here: if str(err).startswith('factorial'): integer_arg_nums = set([0]) continue # We try with different arguments # Some arguments might have to be integers, for instance: except TypeError as err: if len(integer_arg_nums) == num_args: raise Exception("Incorrect testing procedure: unable to " "find correct argument values for %s: %s" % (funcname, err)) # Another argument might be forced to be an integer: integer_arg_nums.add(random.choice(range(num_args))) else: # We have found reasonable arguments, and the test passed: break ############################################################################### def test_value_construction(): ''' Tests the various means of constructing a constant number with uncertainty *without a string* (see test_ufloat_fromstr(), for this). ''' ## Simple construction: x = ufloat(3, 0.14) assert x.nominal_value == 3 assert x.std_dev == 0.14 assert x.tag is None # ... with tag as positional argument: x = ufloat(3, 0.14, 'pi') assert x.nominal_value == 3 assert x.std_dev == 0.14 assert x.tag == 'pi' # ... with tag keyword: x = ufloat(3, 0.14, tag='pi') assert x.nominal_value == 3 assert x.std_dev == 0.14 assert x.tag == 'pi' ## Comparison with the obsolete tuple form: # The following tuple is stored in a variable instead of being # repeated in the calls below, so that the automatic code update # does not replace ufloat((3, 0.14)) by ufloat(3, 14): the goal # here is to make sure that the obsolete form gives the same # result as the new form. representation = (3, 0.14) # Obsolete representation x = ufloat(3, 0.14) x2 = ufloat(representation) # Obsolete assert x.nominal_value == x2.nominal_value assert x.std_dev == x2.std_dev assert x.tag is None assert x2.tag is None # With tag as positional argument: x = ufloat(3, 0.14, "pi") x2 = ufloat(representation, "pi") # Obsolete assert x.nominal_value == x2.nominal_value assert x.std_dev == x2.std_dev assert x.tag == 'pi' assert x2.tag == 'pi' # With tag keyword: x = ufloat(3, 0.14, tag="pi") x2 = ufloat(representation, tag="pi") # Obsolete assert x.nominal_value == x2.nominal_value assert x.std_dev == x2.std_dev assert x.tag == 'pi' assert x2.tag == 'pi' # Negative standard deviations should be caught in a nice way # (with the right exception): try: x = ufloat(3, -0.1) except uncert_core.NegativeStdDev: pass try: # Obsolete form: x = ufloat((3, -0.1)) except uncert_core.NegativeStdDev: pass ## Incorrect forms should not raise any deprecation warning, but ## raise an exception: try: ufloat(1) # Form that has never been allowed except: pass else: raise Exception("An exception should be raised") def test_ufloat_fromstr(): "Input of numbers with uncertainties as a string" # String representation, and numerical values: tests = { "-1.23(3.4)": (-1.23, 3.4), # (Nominal value, error) " -1.23(3.4) ": (-1.23, 3.4), # Spaces ignored "-1.34(5)": (-1.34, 0.05), "1(6)": (1, 6), "3(4.2)": (3, 4.2), "-9(2)": (-9, 2), "1234567(1.2)": (1234567, 1.2), "12.345(15)": (12.345, 0.015), "-12.3456(78)e-6": (-12.3456e-6, 0.0078e-6), "0.29": (0.29, 0.01), "31.": (31, 1), "-31.": (-31, 1), # The following tests that the ufloat() routine does # not consider '31' like the tuple ('3', '1'), which would # make it expect two numbers (instead of 2 1-character # strings): "31": (31, 1), "-3.1e10": (-3.1e10, 0.1e10), "169.0(7)": (169, 0.7), "-0.1+/-1": (-0.1, 1), "-13e-2+/-1e2": (-13e-2, 1e2), '-14.(15)': (-14, 15), '-100.0(15)': (-100, 1.5), '14.(15)': (14, 15), # Global exponent: '(3.141+/-0.001)E+02': (314.1, 0.1), ## Pretty-print notation: # ± sign, global exponent (not pretty-printed): u'(3.141±0.001)E+02': (314.1, 0.1), # ± sign, individual exponent: u'3.141E+02±0.001e2': (314.1, 0.1), # ± sign, times symbol, superscript (= full pretty-print): u'(3.141 ± 0.001) × 10²': (314.1, 0.1), ## Others # Forced parentheses: '(2 +/- 0.1)': (2, 0.1), # NaN uncertainty: u'(3.141±nan)E+02': (314.1, float('nan')), '3.141e+02+/-nan': (314.1, float('nan')), '3.4(nan)e10': (3.4e10, float('nan')), # NaN value: 'nan+/-3.14e2': (float('nan'), 314), # "Double-floats" '(-3.1415 +/- 1e-4)e+200': (-3.1415e200, 1e196), '(-3.1415e-10 +/- 1e-4)e+200': (-3.1415e190, 1e196), # Special float representation: '-3(0.)': (-3, 0) } for (representation, values) in tests.items(): # We test the fact that surrounding spaces are removed: representation = u' {} '.format(representation) # Without tag: num = ufloat_fromstr(representation) assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag is None # With a tag as positional argument: num = ufloat_fromstr(representation, 'test variable') assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag == 'test variable' # With a tag as keyword argument: num = ufloat_fromstr(representation, tag='test variable') assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag == 'test variable' ## Obsolete forms num = ufloat(representation) # Obsolete assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag is None # Call with a tag list argument: num = ufloat(representation, 'test variable') # Obsolete assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag == 'test variable' # Call with a tag keyword argument: num = ufloat(representation, tag='test variable') # Obsolete assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag == 'test variable' ############################################################################### # Test of correctness of the fixed (usually analytical) derivatives: def test_fixed_derivatives_basic_funcs(): """ Pre-calculated derivatives for operations on AffineScalarFunc. """ def check_op(op, num_args): """ Makes sure that the derivatives for function '__op__' of class AffineScalarFunc, which takes num_args arguments, are correct. If num_args is None, a correct value is calculated. """ op_string = "__%s__" % op func = getattr(AffineScalarFunc, op_string) numerical_derivatives = uncert_core.NumericalDerivatives( # The __neg__ etc. methods of AffineScalarFunc only apply, # by definition, to AffineScalarFunc objects: we first map # possible scalar arguments (used for calculating # derivatives) to AffineScalarFunc objects: lambda *args: func(*map(uncert_core.to_affine_scalar, args))) compare_derivatives(func, numerical_derivatives, [num_args]) # Operators that take 1 value: for op in uncert_core.modified_operators: check_op(op, 1) # Operators that take 2 values: for op in uncert_core.modified_ops_with_reflection: check_op(op, 2) # Additional, more complex checks, for use with the nose unit testing # framework. def test_copy(): "Standard copy module integration" import gc x = ufloat(3, 0.1) assert x == x y = copy.copy(x) assert x != y assert not(x == y) assert y in y.derivatives.keys() # y must not copy the dependence on x z = copy.deepcopy(x) assert x != z # Copy tests on expressions: t = x + 2*z # t depends on x: assert x in t.derivatives # The relationship between the copy of an expression and the # original variables should be preserved: t_copy = copy.copy(t) # Shallow copy: the variables on which t depends are not copied: assert x in t_copy.derivatives assert (uncert_core.covariance_matrix([t, z]) == uncert_core.covariance_matrix([t_copy, z])) # However, the relationship between a deep copy and the original # variables should be broken, since the deep copy created new, # independent variables: t_deepcopy = copy.deepcopy(t) assert x not in t_deepcopy.derivatives assert (uncert_core.covariance_matrix([t, z]) != uncert_core.covariance_matrix([t_deepcopy, z])) # Test of implementations with weak references: # Weak references: destroying a variable should never destroy the # integrity of its copies (which would happen if the copy keeps a # weak reference to the original, in its derivatives member: the # weak reference to the original would become invalid): del x gc.collect() assert y in list(y.derivatives.keys()) ## Classes for the pickling tests (put at the module level, so that ## they can be unpickled): # Subclass without slots: class NewVariable_dict(uncert_core.Variable): pass # Subclass with slots defined by a tuple: class NewVariable_slots_tuple(uncert_core.Variable): __slots__ = ('new_attr',) # Subclass with slots defined by a string: class NewVariable_slots_str(uncert_core.Variable): __slots__ = 'new_attr' def test_pickling(): "Standard pickle module integration." import pickle x = ufloat(2, 0.1) x_unpickled = pickle.loads(pickle.dumps(x)) assert x != x_unpickled # Pickling creates copies ## Tests with correlations and AffineScalarFunc objects: f = 2*x assert isinstance(f, AffineScalarFunc) (f_unpickled, x_unpickled2) = pickle.loads(pickle.dumps((f, x))) # Correlations must be preserved: assert f_unpickled - x_unpickled2 - x_unpickled2 == 0 ## Tests with subclasses: for subclass in (NewVariable_dict, NewVariable_slots_tuple, NewVariable_slots_str): x = subclass(3, 0.14) # Pickling test with possibly uninitialized slots: pickle.loads(pickle.dumps(x)) # Unpickling test: x.new_attr = 'New attr value' x_unpickled = pickle.loads(pickle.dumps(x)) # Must exist (from the slots of the parent class): x_unpickled.nominal_value x_unpickled.new_attr # Must exist ## # Corner case test: when an attribute is present both in __slots__ # and in __dict__, it is first looked up from the slots # (references: # http://docs.python.org/2/reference/datamodel.html#invoking-descriptors, # http://stackoverflow.com/a/15139208/42973). As a consequence, # the pickling process must pickle the correct value (i.e., not # the value from __dict__): x = NewVariable_dict(3, 0.14) x._nominal_value = 'in slots' # Corner case: __dict__ key which is also a slot name (it is # shadowed by the corresponding slot, so this is very unusual, # though): x.__dict__['_nominal_value'] = 'in dict' # Additional __dict__ attribute: x.dict_attr = 'dict attribute' x_unpickled = pickle.loads(pickle.dumps(x)) # We make sure that the data is still there and untouched: assert x_unpickled._nominal_value == 'in slots' assert x_unpickled.__dict__ == x.__dict__ ## # Corner case that should have no impact on the code but which is # not prevented by the documentation: case of constant linear # terms (the potential gotcha is that if the linear_combo # attribute is empty, __getstate__()'s result could be false, and # so __setstate__() would not be called and the original empty # linear combination would not be set in linear_combo. x = uncert_core.LinearCombination({}) assert pickle.loads(pickle.dumps(x)).linear_combo == {} def test_int_div(): "Integer division" # We perform all operations on floats, because derivatives can # otherwise be meaningless: x = ufloat(3.9, 2)//2 assert x.nominal_value == 1. # All errors are supposed to be small, so the ufloat() # in x violates the assumption. Therefore, the following is # correct: assert x.std_dev == 0.0 def test_comparison_ops(): "Test of comparison operators" import random # Operations on quantities equivalent to Python numbers must still # be correct: a = ufloat(-3, 0) b = ufloat(10, 0) c = ufloat(10, 0) assert a < b assert a < 3 assert 3 < b # This is first given to int.__lt__() assert b == c x = ufloat(3, 0.1) # One constraint is that usual Python code for inequality testing # still work in a reasonable way (for instance, it is generally # desirable that functions defined by different formulas on # different intervals can still do "if 0 < x < 1:...". This # supposes again that errors are "small" (as for the estimate of # the standard error). assert x > 1 # The limit case is not obvious: assert not(x >= 3) assert not(x < 3) assert x == x # Comparaison between Variable and AffineScalarFunc: assert x == x + 0 # Comparaison between 2 _different_ AffineScalarFunc objects # representing the same value: assert x/2 == x/2 # With uncorrelated result that have the same behavior (value and # standard error): assert 2*ufloat(1, 0.1) != ufloat(2, 0.2) # Comparaison between 2 _different_ Variable objects # that are uncorrelated: assert x != ufloat(3, 0.1) assert x != ufloat(3, 0.2) # Comparison to other types should work: assert x != None # Not comparable assert x-x == 0 # Comparable, even though the types are different assert x != [1, 2] #################### # Checks of the semantics of logical operations: they return True # iff they are always True when the parameters vary in an # infinitesimal interval inside sigma (sigma == 0 is a special # case): def test_all_comparison_ops(x, y): """ Takes two Variable objects. Fails if any comparison operation fails to follow the proper semantics: a comparison only returns True if the correspond float comparison results are True for all the float values taken by the variables (of x and y) when they vary in an infinitesimal neighborhood within their uncertainty. This test is stochastic: it may, exceptionally, fail for correctly implemented comparison operators. """ import random def random_float(var): """ Returns a random value for Variable var, in an infinitesimal interval withing its uncertainty. The case of a zero uncertainty is special. """ return ((random.random()-0.5) * min(var.std_dev, 1e-5) + var.nominal_value) # All operations are tested: for op in ["__%s__" % name for name in('ne', 'eq', 'lt', 'le', 'gt', 'ge')]: try: float_func = getattr(float, op) except AttributeError: # Python 2.3's floats don't have __ne__ continue # Determination of the correct truth value of func(x, y): sampled_results = [] # The "main" value is an important particular case, and # the starting value for the final result # (correct_result): sampled_results.append(float_func(x.nominal_value, y.nominal_value)) for check_num in range(50): # Many points checked sampled_results.append(float_func(random_float(x), random_float(y))) min_result = min(sampled_results) max_result = max(sampled_results) if min_result == max_result: correct_result = min_result else: # Almost all results must be True, for the final value # to be True: num_min_result = sampled_results.count(min_result) # 1 exception is considered OK: correct_result = (num_min_result == 1) try: assert correct_result == getattr(x, op)(y) except AssertionError: print("Sampling results:", sampled_results) raise Exception("Semantic value of %s %s (%s) %s not" " correctly reproduced." % (x, op, y, correct_result)) # With different numbers: test_all_comparison_ops(ufloat(3, 0.1), ufloat(-2, 0.1)) test_all_comparison_ops(ufloat(0, 0), # Special number ufloat(1, 1)) test_all_comparison_ops(ufloat(0, 0), # Special number ufloat(0, 0.1)) # With identical numbers: test_all_comparison_ops(ufloat(0, 0), ufloat(0, 0)) test_all_comparison_ops(ufloat(1, 1), ufloat(1, 1)) def test_logic(): "Boolean logic: __nonzero__, bool." x = ufloat(3, 0) y = ufloat(0, 0) z = ufloat(0, 0.1) t = ufloat(-1, 2) assert bool(x) == True assert bool(y) == False assert bool(z) == True assert bool(t) == True # Only infinitseimal neighborhood are used def test_obsolete(): 'Tests some obsolete creation of number with uncertainties' x = ufloat(3, 0.1) # Obsolete function, protected against automatic modification: x.set_std_dev.__call__(0.2) # Obsolete x_std_dev = x.std_dev assert x_std_dev() == 0.2 # Obsolete call def test_basic_access_to_data(): "Access to data from Variable and AffineScalarFunc objects." x = ufloat(3.14, 0.01, "x var") assert x.tag == "x var" assert x.nominal_value == 3.14 assert x.std_dev == 0.01 # Case of AffineScalarFunc objects: y = x + 0 assert type(y) == AffineScalarFunc assert y.nominal_value == 3.14 assert y.std_dev == 0.01 # Details on the sources of error: a = ufloat(-1, 0.001) y = 2*x + 3*x + 2 + a error_sources = y.error_components() assert len(error_sources) == 2 # 'a' and 'x' assert error_sources[x] == 0.05 assert error_sources[a] == 0.001 # Derivative values should be available: assert y.derivatives[x] == 5 # Modification of the standard deviation of variables: x.std_dev = 1 assert y.error_components()[x] == 5 # New error contribution! # Calculated values with uncertainties should not have a settable # standard deviation: y = 2*x try: y.std_dev = 1 except AttributeError: pass else: raise Exception( "std_dev should not be settable for calculated results") # Calculation of deviations in units of the standard deviations: assert 10/x.std_dev == x.std_score(10 + x.nominal_value) # "In units of the standard deviation" is not always meaningful: x.std_dev = 0 try: x.std_score(1) except ValueError: pass # Normal behavior def test_correlations(): "Correlations between variables" a = ufloat(1, 0) x = ufloat(4, 0.1) y = x*2 + a # Correlations cancel "naive" additions of uncertainties: assert y.std_dev != 0 normally_zero = y - (x*2 + 1) assert normally_zero.nominal_value == 0 assert normally_zero.std_dev == 0 def test_no_coercion(): """ Coercion of Variable object to a simple float. The coercion should be impossible, like for complex numbers. """ x = ufloat(4, 1) try: assert float(x) == 4 except TypeError: pass else: raise Exception("Conversion to float() should fail with TypeError") def test_wrapped_func_no_args_no_kwargs(): ''' Wrap a function that takes only positional-or-keyword parameters. ''' def f_auto_unc(x, y): return 2*x+umath.sin(y) # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y): assert not isinstance(x, uncert_core.UFloat) assert not isinstance(y, uncert_core.UFloat) return f_auto_unc(x, y) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap(f, [lambda x, y: 2, lambda x, y: math.cos(y)]) assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) ## Automatic additional derivatives for non-defined derivatives: f_wrapped = uncert_core.wrap(f, [lambda x, y: 2]) # No derivative for y assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) def test_wrapped_func_args_no_kwargs(): ''' Wrap a function that takes only positional-or-keyword and var-positional parameters. ''' def f_auto_unc(x, y, *args): return 2*x+umath.sin(y)+3*args[1] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, *args): assert not any(isinstance(value, uncert_core.UFloat) for value in [x, y] + list(args)) return f_auto_unc(x, y, *args) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) s = 'string arg' z = uncert_core.ufloat(100, 3) args = [s, z, s] # var-positional parameters ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2, lambda x, y, *args: math.cos(y), None, lambda x, y, *args: 3]) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y: f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2]) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) def test_wrapped_func_no_args_kwargs(): ''' Wrap a function that takes only positional-or-keyword and var-keyword parameters. ''' def f_auto_unc(x, y, **kwargs): return 2*x+umath.sin(y)+3*kwargs['z'] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, **kwargs): assert not any(isinstance(value, uncert_core.UFloat) for value in [x, y] + list(kwargs.values())) return f_auto_unc(x, y, **kwargs) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) s = 'string arg' z = uncert_core.ufloat(100, 3) kwargs = {'s': s, 'z': z} # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None]) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {'z': None}) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {'z': lambda x, y, **kwargs: 3}) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap( f, [lambda x, y, **kwargs: 2, lambda x, y, **kwargs: math.cos(y)], {'z:': lambda x, y, **kwargs: 3}) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: f_wrapped = uncert_core.wrap(f, [lambda x, y, **kwargs: 2]) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) def test_wrapped_func_args_kwargs(): ''' Wrap a function that takes positional-or-keyword, var-positional and var-keyword parameters. ''' def f_auto_unc(x, y, *args, **kwargs): return 2*x+umath.sin(y)+4*args[1]+3*kwargs['z'] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, *args, **kwargs): assert not any(isinstance(value, uncert_core.UFloat) for value in [x, y]+list(args)+list(kwargs.values())) return f_auto_unc(x, y, *args, **kwargs) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) t = uncert_core.ufloat(1000, 4) s = 'string arg' z = uncert_core.ufloat(100, 3) args = [s, t, s] kwargs = {'u': s, 'z': z} # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None, None, None, lambda x, y, *args, **kwargs: 4]) assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {'z': None}) assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {'z': lambda x, y, *args, **kwargs: 3}) assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap( f, [lambda x, y, *args, **kwargs: 2, lambda x, y, *args, **kwargs: math.cos(y)], {'z:': lambda x, y, *args, **kwargs: 3}) assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: f_wrapped = uncert_core.wrap(f, [lambda x, y, *args, **kwargs: 2]) assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) def test_wrapped_func(): """ Test uncertainty-aware functions obtained through wrapping. """ ######################################## # Function which can automatically handle numbers with # uncertainties: def f_auto_unc(angle, *list_var): return umath.cos(angle) + sum(list_var) def f(angle, *list_var): # We make sure that this function is only ever called with # numbers with no uncertainty (since it is wrapped): assert not isinstance(angle, uncert_core.UFloat) assert not any(isinstance(arg, uncert_core.UFloat) for arg in list_var) return f_auto_unc(angle, *list_var) f_wrapped = uncert_core.wrap(f) my_list = [1, 2, 3] ######################################## # Test of a wrapped function that only calls the original # function: it should obtain the exact same result: assert f_wrapped(0, *my_list) == f(0, *my_list) # 1 == 1 +/- 0, so the type must be checked too: assert type(f_wrapped(0, *my_list)) == type(f(0, *my_list)) ######################################## # Call with uncertainties: angle = uncert_core.ufloat(1, 0.1) list_value = uncert_core.ufloat(3, 0.2) # The random variables must be the same (full correlation): assert ufloats_close(f_wrapped(angle, *[1, angle]), f_auto_unc(angle, *[1, angle])) assert ufloats_close(f_wrapped(angle, *[list_value, angle]), f_auto_unc(angle, *[list_value, angle])) ######################################## # Non-numerical arguments, and explicit and implicit derivatives: def f(x, y, z, t, u): return x+2*z+3*t+4*u f_wrapped = uncert_core.wrap( f, [lambda *args: 1, None, lambda *args:2, None]) # No deriv. for u assert f_wrapped(10, 'string argument', 1, 0, 0) == 12 x = uncert_core.ufloat(10, 1) assert numbers_close(f_wrapped(x, 'string argument', x, x, x).std_dev, (1+2+3+4)*x.std_dev) def test_wrap_with_kwargs(): ''' Tests wrap() on functions with keyword arguments. Includes both wrapping a function that takes optional keyword arguments and calling a wrapped function with keyword arguments (optional or not). ''' # Version of f() that automatically works with numbers with # uncertainties: def f_auto_unc(x, y, *args, **kwargs): return x + umath.sin(y) + 2*args[0] + 3*kwargs['t'] # We also add keyword arguments in the function which is wrapped: def f(x, y, *args, **kwargs): # We make sure that f is not called directly with a number with # uncertainty: for value in [x, y]+list(args)+list(kwargs.values()): assert not isinstance(value, uncert_core.UFloat) return f_auto_unc(x, y, *args, **kwargs) f_wrapped = uncert_core.wrap(f) x = ufloat(1, 0.1) y = ufloat(10, 0.11) z = ufloat(100, 0.111) t = ufloat(0.1, 0.1111) assert ufloats_close(f_wrapped(x, y, z, t=t), f_auto_unc(x, y, z, t=t), tolerance=1e-5) ######################################## # We make sure that analytical derivatives are indeed used. We # also test the automatic handling of additional *args arguments # beyond the number of supplied derivatives. f_wrapped2 = uncert_core.wrap( f, [None, lambda x, y, *args, **kwargs: math.cos(y)]) # The derivatives must be perfectly identical: # The *args parameter of f() is given as a keyword argument, so as # to try to confuse the code: assert (f_wrapped2(x, y, z, t=t).derivatives[y] == f_auto_unc(x, y, z, t=t).derivatives[y]) # Derivatives supplied through the keyword-parameter dictionary of # derivatives, and also derivatives supplied for the # var-positional arguments (*args[0]): f_wrapped3 = uncert_core.wrap( f, [None, None, lambda x, y, *args, **kwargs: 2], {'t': lambda x, y, *args, **kwargs: 3}) # The derivatives should be exactly the same, because they are # obtained with the exact same analytic formula: assert (f_wrapped3(x, y, z, t=t).derivatives[z] == f_auto_unc(x, y, z, t=t).derivatives[z]) assert (f_wrapped3(x, y, z, t=t).derivatives[t] == f_auto_unc(x, y, z, t=t).derivatives[t]) ######################################## # Making sure that user-supplied derivatives are indeed called: class FunctionCalled(Exception): ''' Raised to signal that a function is indeed called. ''' pass def failing_func(x, y, *args, **kwargs): raise FunctionCalled f_wrapped4 = uncert_core.wrap( f, [None, failing_func], {'t': failing_func}) try: f_wrapped4(x, 3.14, z, t=t) except FunctionCalled: pass else: raise Exception('User-supplied derivative should be called') try: f_wrapped4(x, y, z, t=3.14) except FunctionCalled: pass else: raise Exception('User-supplied derivative should be called') try: f_wrapped4(x, 3.14, z, t=3.14) except FunctionCalled: raise Exception('User-supplied derivative should *not* be called') ############################################################################### def test_access_to_std_dev(): "Uniform access to the standard deviation" x = ufloat(1, 0.1) y = 2*x # std_dev for Variable and AffineScalarFunc objects: assert uncert_core.std_dev(x) == x.std_dev assert uncert_core.std_dev(y) == y.std_dev # std_dev for other objects: assert uncert_core.std_dev([]) == 0 assert uncert_core.std_dev(None) == 0 ############################################################################### def test_covariances(): "Covariance matrix" x = ufloat(1, 0.1) y = -2*x+10 z = -3*x covs = uncert_core.covariance_matrix([x, y, z]) # Diagonal elements are simple: assert numbers_close(covs[0][0], 0.01) assert numbers_close(covs[1][1], 0.04) assert numbers_close(covs[2][2], 0.09) # Non-diagonal elements: assert numbers_close(covs[0][1], -0.02) ############################################################################### def test_power_all_cases(): ''' Checks all cases for the value and derivatives of x**p. ''' power_all_cases(pow) def power_all_cases(op): ''' Checks all cases for the value and derivatives of power-like operator op (op is typically the built-in pow(), or math.pow()). Checks only the details of special results like 0, 1 or NaN). Different cases for the value of x**p and its derivatives are tested by dividing the (x, p) plane with: - x < 0, x = 0, x > 0 - p integer or not, p < 0, p = 0, p > 0 (not all combinations are distinct: for instance x > 0 gives identical formulas for all p). ''' zero = ufloat(0, 0.1) zero2 = ufloat(0, 0.1) one = ufloat(1, 0.1) positive = ufloat(0.3, 0.01) positive2 = ufloat(0.3, 0.01) negative = ufloat(-0.3, 0.01) integer = ufloat(-3, 0) non_int_larger_than_one = ufloat(3.1, 0.01) positive_smaller_than_one = ufloat(0.3, 0.01) ## negative**integer result = op(negative, integer) assert not isnan(result.derivatives[negative]) assert isnan(result.derivatives[integer]) # Limit cases: result = op(negative, one) assert result.derivatives[negative] == 1 assert isnan(result.derivatives[one]) result = op(negative, zero) assert result.derivatives[negative] == 0 assert isnan(result.derivatives[zero]) ## negative**non-integer ## zero**... result = op(zero, non_int_larger_than_one) assert isnan(result.derivatives[zero]) assert result.derivatives[non_int_larger_than_one] == 0 # Special cases: result = op(zero, one) assert result.derivatives[zero] == 1 assert result.derivatives[one] == 0 result = op(zero, 2*one) assert result.derivatives[zero] == 0 assert result.derivatives[one] == 0 result = op(zero, positive_smaller_than_one) assert isnan(result.derivatives[zero]) assert result.derivatives[positive_smaller_than_one] == 0 result = op(zero, zero2) assert result.derivatives[zero] == 0 assert isnan(result.derivatives[zero2]) ## positive**...: this is a quite regular case where the value and ## the derivatives are all defined. result = op(positive, positive2) assert not isnan(result.derivatives[positive]) assert not isnan(result.derivatives[positive2]) result = op(positive, zero) assert result.derivatives[positive] == 0 assert not isnan(result.derivatives[zero]) result = op(positive, negative) assert not isnan(result.derivatives[positive]) assert not isnan(result.derivatives[negative]) ############################################################################### def test_power_special_cases(): ''' Checks special cases of x**p. ''' power_special_cases(pow) # We want the same behavior for numbers with uncertainties and for # math.pow() at their nominal values: positive = ufloat(0.3, 0.01) negative = ufloat(-0.3, 0.01) # http://stackoverflow.com/questions/10282674/difference-between-the-built-in-pow-and-math-pow-for-floats-in-python try: pow(ufloat(0, 0), negative) except ZeroDivisionError: pass else: raise Exception("A proper exception should have been raised") try: pow(ufloat(0, 0.1), negative) except ZeroDivisionError: pass else: raise Exception('A proper exception should have been raised') try: result = pow(negative, positive) except ValueError: # The reason why it should also fail in Python 3 is that the # result of Python 3 is a complex number, which uncertainties # does not handle (no uncertainties on complex numbers). In # Python 2, this should always fail, since Python 2 does not # know how to calculate it. pass else: raise Exception('A proper exception should have been raised') def power_special_cases(op): ''' Checks special cases of the uncertainty power operator op (where op is typically the built-in pow or uncertainties.umath.pow). The values x = 0, x = 1 and x = NaN are special, as are null, integral and NaN values of p. ''' zero = ufloat(0, 0) one = ufloat(1, 0) p = ufloat(0.3, 0.01) assert op(0, p) == 0 assert op(zero, p) == 0 # The outcome of 1**nan and nan**0 was undefined before Python # 2.6 (http://docs.python.org/library/math.html#math.pow): assert op(float('nan'), zero) == 1.0 assert op(one, float('nan')) == 1.0 # …**0 == 1.0: assert op(p, 0) == 1.0 assert op(zero, 0) == 1.0 assert op((-p), 0) == 1.0 # …**zero: assert op((-10.3), zero) == 1.0 assert op(0, zero) == 1.0 assert op(0.3, zero) == 1.0 assert op((-p), zero) == 1.0 assert op(zero, zero) == 1.0 assert op(p, zero) == 1.0 # one**… == 1.0 assert op(one, -3) == 1.0 assert op(one, -3.1) == 1.0 assert op(one, 0) == 1.0 assert op(one, 3) == 1.0 assert op(one, 3.1) == 1.0 # … with two numbers with uncertainties: assert op(one, (-p)) == 1.0 assert op(one, zero) == 1.0 assert op(one, p) == 1.0 # 1**… == 1.0: assert op(1., (-p)) == 1.0 assert op(1., zero) == 1.0 assert op(1., p) == 1.0 def test_power_wrt_ref(): ''' Checks special cases of the built-in pow() power operator. ''' power_wrt_ref(pow, pow) def power_wrt_ref(op, ref_op): ''' Checks special cases of the uncertainty power operator op (where op is typically the built-in pow or uncertainties.umath.pow), by comparing its results to the reference power operator ref_op (which is typically the built-in pow or math.pow). ''' # Negative numbers with uncertainty can be exponentiated to an # integral power: assert op(ufloat(-1.1, 0.1), -9).nominal_value == ref_op(-1.1, -9) # Case of numbers with no uncertainty: should give the same result # as numbers with uncertainties: assert op(ufloat(-1, 0), 9) == ref_op(-1, 9) assert op(ufloat(-1.1, 0), 9) == ref_op(-1.1, 9) ############################################################################### def test_PDG_precision(): ''' Test of the calculation of the number of significant digits for the uncertainty. ''' # The 3 cases of the rounding rules are covered in each case: tests = { # Very big floats: 1.7976931348623157e308: (2, 1.7976931348623157e308), 0.5e308: (1, 0.5e308), 0.9976931348623157e+308: (2, 1e308), # Very small floats: 1.3e-323: (2, 1.3e-323), 5e-324: (1, 5e-324), 9.99e-324: (2, 1e-323) } for (std_dev, result) in tests.items(): assert uncert_core.PDG_precision(std_dev) == result def test_repr(): '''Test the representation of numbers with uncertainty.''' # The uncertainty is a power of 2, so that it can be exactly # represented: x = ufloat(3.14159265358979, 0.25) assert repr(x) == '3.14159265358979+/-0.25' x = ufloat(3.14159265358979, 0) assert repr(x) == '3.14159265358979+/-0' # Tagging: x = ufloat(3, 1, "length") assert repr(x) == '< length = 3.0+/-1.0 >' def test_format(): '''Test the formatting of numbers with uncertainty.''' # The way NaN is formatted with F, E and G depends on the version # of Python (NAN for Python 2.5+ at least): NaN_EFG = '%F' % float('nan') # !! The way NaN is formatted with F, E and G might depend on the # version of Python, if it is like NaN (could be tested with # Python 2.3 or 2.4 vs Python 2.7): Inf_EFG = '%F' % float('inf') # Tests of each point of the docstring of # AffineScalarFunc.__format__() in turn, mostly in the same order. # The LaTeX tests do not use the customization of # uncert_core.GROUP_SYMBOLS and uncert_core.EXP_PRINT: this # way, problems in the customization themselves are caught. tests = { # (Nominal value, uncertainty): {format: result,...} # Usual float formatting, and individual widths, etc.: (3.1415, 0.0001): { '*^+7.2f': '*+3.14*+/-*0.00**', '+07.2f': '+003.14+/-0000.00', # 0 fill '>10f': ' 3.141500+/- 0.000100', # Width and align '11.3e': ' 3.142e+00+/- 0.000e+00', # Duplicated exponent '0.4e': '3.1415e+00+/-0.0000e+00' # Forced double exponent }, # Full generalization of float formatting: (3.1415, 0.0001): { '+09.2uf': '+03.14150+/-000.00010', # Alignment is not available with the % formatting # operator of Python < 2.6: '*^+9.2uf': '+3.14150*+/-*0.00010*', '>9f': ' 3.14150+/- 0.00010' # Width and align }, # Number of digits of the uncertainty fixed: (123.456789, 0.00123): { '.1uf': '123.457+/-0.001', '.2uf': '123.4568+/-0.0012', '.3uf': '123.45679+/-0.00123', '.2ue': '(1.234568+/-0.000012)e+02' }, # Sign handling: (-123.456789, 0.00123): { '.1uf': '-123.457+/-0.001', '.2uf': '-123.4568+/-0.0012', '.3uf': '-123.45679+/-0.00123', '.2ue': '(-1.234568+/-0.000012)e+02' }, # Uncertainty larger than the nominal value: (12.3, 456.78): { '': '12+/-457', '.1uf': '12+/-457', '.4uf': '12.3+/-456.8' }, # ... Same thing, but with an exponent: (12.3, 456.78): { '.1ue': '(0+/-5)e+02', '.4ue': '(0.123+/-4.568)e+02', '.4ueS': '0.123(4.568)e+02' }, (23456.789123, 1234.56789123): { '.6gS': '23456.8(1234.6)' }, # Test of the various float formats: the nominal value should # have a similar representation as if it were directly # represented as a float: (1234567.89, 0.1): { '.0e': '(1+/-0)e+06', 'e': '(1.23456789+/-0.00000010)e+06', 'E': '(1.23456789+/-0.00000010)E+06', 'f': '1234567.89+/-0.10', 'F': '1234567.89+/-0.10', 'g': '1234567.89+/-0.10', 'G': '1234567.89+/-0.10', '%': '(123456789+/-10)%' }, (1234567.89, 4.3): { 'g': '1234568+/-4' }, (1234567.89, 43): { # Case where g triggers the exponent notation 'g': '(1.23457+/-0.00004)e+06', 'G': '(1.23457+/-0.00004)E+06' }, (3.1415, 0.0001): { '+09.2uf': '+03.14150+/-000.00010' }, (1234.56789, 0.1): { '.0f': '(1234+/-0.)', # Approximate error indicated with "." 'e': '(1.23456+/-0.00010)e+03', 'E': '(1.23456+/-0.00010)E+03', 'f': '1234.57+/-0.10', 'F': '1234.57+/-0.10', 'f': '1234.57+/-0.10', 'F': '1234.57+/-0.10', '%': '123457+/-10%' }, # Percent notation: (0.42, 0.0055): { # Because '%' does 0.0055*100, the value # 0.5499999999999999 is obtained, which rounds to 0.5. The # original rounded value is 0.006. The same behavior is # found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'. '.1u%': '(42.0+/-0.5)%', '.1u%S': '42.0(5)%', '%P': u'(42.0±0.5)%' }, # Particle Data Group automatic convention, including limit cases: (1.2345678, 0.354): {'': '1.23+/-0.35'}, (1.2345678, 0.3549): {'': '1.23+/-0.35'}, (1.2345678, 0.355): {'': '1.2+/-0.4'}, (1.5678, 0.355): {'': '1.6+/-0.4'}, (1.2345678, 0.09499): {'': '1.23+/-0.09'}, (1.2345678, 0.095): {'': '1.23+/-0.10'}, # Automatic extension of the uncertainty up to the decimal # point: (1000, 123): { '.1uf': '1000+/-123', # The nominal value has 1 <= mantissa < 10. The precision # is the number of significant digits of the uncertainty: '.1ue': '(1.0+/-0.1)e+03' }, # Spectroscopic notation: (-1.23, 3.4): { 'S': '-1.2(3.4)', '.2ufS': '-1.2(3.4)', '.3ufS': '-1.23(3.40)', }, (-123.456, 0.123): { 'S': '-123.46(12)', '.1ufS': '-123.5(1)', '.2ufS': '-123.46(12)', '.3ufS': '-123.456(123)', }, (-123.456, 0.567): { 'S': '-123.5(6)', '.1ufS': '-123.5(6)', '.2ufS': '-123.46(57)', '.3ufS': '-123.456(567)', }, (-123.456, 0.004): { # The decimal point shows that the uncertainty is not # exact: '.2fS': '-123.46(0.00)' }, # LaTeX notation: # (1234.56789, 0.1): { 'eL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}', 'EL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}', 'fL': '1234.57 \pm 0.10', 'FL': '1234.57 \pm 0.10', 'fL': '1234.57 \pm 0.10', 'FL': '1234.57 \pm 0.10', '%L': r'\left(123457 \pm 10\right) \%' }, # # ... combined with the spectroscopic notation: (-1.23, 3.4): { 'SL': '-1.2(3.4)', 'LS': '-1.2(3.4)', '.2ufSL': '-1.2(3.4)', '.2ufLS': '-1.2(3.4)' }, # Special cases for the uncertainty (0, nan) and format # strings (extension S, L, U,..., global width, etc.). # # Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001 # instead of 1.4 for Python 3.1. The problem does not appear # with 1.2, so 1.2 is used. (-1.2e-12, 0): { '12.2gPL': u' -1.2×10⁻¹²± 0', # Pure "width" formats are not accepted by the % operator, # and only %-compatible formats are accepted, for Python < # 2.6: '13S': ' -1.2(0)e-12', '10P': u'-1.2×10⁻¹²± 0', 'L': r'\left(-1.2 \pm 0\right) \times 10^{-12}', # No factored exponent, LaTeX '1L': r'-1.2 \times 10^{-12} \pm 0', 'SL': r'-1.2(0) \times 10^{-12}', 'SP': u'-1.2(0)×10⁻¹²' }, # Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001 # instead of 1.4 for Python 3.1. The problem does not appear # with 1.2, so 1.2 is used. (-1.2e-12, float('nan')): { '.2uG': '(-1.2+/-%s)E-12' % NaN_EFG, # u ignored, format used '15GS': ' -1.2(%s)E-12' % NaN_EFG, 'SL': r'-1.2(\mathrm{nan}) \times 10^{-12}', # LaTeX NaN # Pretty-print priority, but not for NaN: 'PSL': u'-1.2(\mathrm{nan})×10⁻¹²', 'L': r'\left(-1.2 \pm \mathrm{nan}\right) \times 10^{-12}', # Uppercase NaN and LaTeX: '.1EL': (r'\left(-1.2 \pm \mathrm{%s}\right) \times 10^{-12}' % NaN_EFG), '10': ' -1.2e-12+/- nan', '15S': ' -1.2(nan)e-12' }, (3.14e-10, 0.01e-10): { # Character (Unicode) strings: u'P': u'(3.140±0.010)×10⁻¹⁰', # PDG rules: 2 digits u'PL': u'(3.140±0.010)×10⁻¹⁰', # Pretty-print has higher priority # Truncated non-zero uncertainty: '.1e': '(3.1+/-0.0)e-10', '.1eS': '3.1(0.0)e-10' }, # Some special cases: (1, float('nan')): { 'g': '1+/-nan', 'G': '1+/-%s' % NaN_EFG, '%': '(100.000000+/-nan)%', # The % format type is like f # Should be the same as '+05', for floats, but is not, in # Python 2.7: '+05g': '+0001+/-00nan', # 5 is the *minimal* width, 6 is the default number of # digits after the decimal point: '+05%': '(+100.000000+/-00nan)%', # There is a difference between '{}'.format(1.) and # '{:g}'.format(1.), which is not fully obvious in the # documentation, which indicates that a None format type # is like g. The reason is that the empty format string is # actually interpreted as str(), and that str() does not # have to behave like g ('{}'.format(1.234567890123456789) # and '{:g}'.format(1.234567890123456789) are different). '': '1.0+/-nan', # This is ugly, but consistent with # '{:+05}'.format(float('nan')) and format(1.) (which # differs from format(1)!): '+05': '+01.0+/-00nan' }, (9.9, 0.1): { '.1ue': '(9.9+/-0.1)e+00', '.0fS': '10(0.)' }, (9.99, 0.1): { # The precision has an effect on the exponent, like for # floats: '.2ue': '(9.99+/-0.10)e+00', # Same exponent as for 9.99 alone '.1ue': '(1.00+/-0.01)e+01' # Same exponent as for 9.99 alone }, # 0 uncertainty: nominal value displayed like a float: (1.2345, 0): { '.2ue': '(1.23+/-0)e+00', '1.2ue': '1.23e+00+/-0', # No factored exponent '.2uf': '1.23+/-0', '.2ufS': '1.23(0)', '.2fS': '1.23(0)', 'g': '1.2345+/-0', '': '1.2345+/-0' }, # Alignment and filling characters (supported in Python 2.6+): (3.1415e10, 0): { '<15': '31415000000.0 +/-0 ', '<20S': '31415000000.0(0) ', # Trying to trip the format parsing with a fill character # which is an alignment character: '=>15': '==31415000000.0+/-==============0' }, (1234.56789, 0): { '1.2ue': '1.23e+03+/-0', # u ignored '1.2e': '1.23e+03+/-0', # Default precision = 6 'eL': r'\left(1.234568 \pm 0\right) \times 10^{3}', 'EL': r'\left(1.234568 \pm 0\right) \times 10^{3}', 'fL': '1234.567890 \pm 0', 'FL': '1234.567890 \pm 0', '%L': r'\left(123456.789000 \pm 0\right) \%' }, (1e5, 0): { 'g': '100000+/-0' }, (1e6, 0): { # A default precision of 6 is used because the uncertainty # cannot be used for defining a default precision (it does # not have a magnitude): 'g': '(1+/-0)e+06' }, (1e6+10, 0): { # A default precision of 6 is used because the uncertainty # cannot be used for defining a default precision (it does # not have a magnitude): 'g': '(1.00001+/-0)e+06' }, # Rounding of the uncertainty that "changes" the number of # significant digits: (1, 0.994): { '.3uf': '1.000+/-0.994', '.2uf': '1.00+/-0.99', '.1uf': '1+/-1' # Discontinuity in the number of digits }, (12.3, 2.3): { '.2ufS': '12.3(2.3)' # Decimal point on the uncertainty }, (12.3, 2.3): { '.1ufS': '12(2)' # No decimal point on the uncertainty }, (0, 0): { # Make defining the first significant digit problematic '.1f': '0.0+/-0', # Simple float formatting 'g': '0+/-0' }, (1.2e-34, 5e-67): { '.6g': '(1.20000+/-0.00000)e-34', '13.6g': ' 1.20000e-34+/- 0.00000e-34', '13.6G': ' 1.20000E-34+/- 0.00000E-34', '.6GL': r'\left(1.20000 \pm 0.00000\right) \times 10^{-34}', '.6GLp': r'\left(1.20000 \pm 0.00000\right) \times 10^{-34}', }, (float('nan'), 100): { # NaN *nominal value* '': 'nan+/-100.0', # Like '{}'.format(100.) 'g': 'nan+/-100', # Like '{:g}'.format(100.) '.1e': '(nan+/-1.0)e+02', # Similar to 1±nan '.1E': '(%s+/-1.0)E+02' % NaN_EFG, '.1ue': '(nan+/-1)e+02', '10.1e': ' nan+/- 1.0e+02' }, (float('nan'), 1e8): { # NaN *nominal value* '': 'nan+/-100000000.0', # Like '{}'.format(1e8) 'g': '(nan+/-1)e+08', # Like '{:g}'.format(1e8) '.1e': '(nan+/-1.0)e+08', '.1E': '(%s+/-1.0)E+08' % NaN_EFG, '.1ue': '(nan+/-1)e+08', '10.1e': ' nan+/- 1.0e+08' # 'nane+08' would be strange }, (float('nan'), 123456789): { # NaN *nominal value* '': 'nan+/-123456789.0', # Similar to '{}'.format(123456789.) 'g': '(nan+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.) '.1e': '(nan+/-1.2)e+08', '.1E': '(%s+/-1.2)E+08' % NaN_EFG, '.1ue': '(nan+/-1)e+08', '.1ueL': r'\left(\mathrm{nan} \pm 1\right) \times 10^{8}', '10.1e': ' nan+/- 1.2e+08', '10.1eL': r'\mathrm{nan} \pm 1.2 \times 10^{8}' }, (float('nan'), float('nan')): { # *Double* NaN '': 'nan+/-nan', '.1e': 'nan+/-nan', '.1E': '%s+/-%s' % (NaN_EFG, NaN_EFG), '.1ue': 'nan+/-nan', 'EL': r'\mathrm{%s} \pm \mathrm{%s}' % (NaN_EFG, NaN_EFG) }, (float('inf'), 100): { # Inf *nominal value* '': 'inf+/-100.0', # Like '{}'.format(100.) 'g': 'inf+/-100', # Like '{:g}'.format(100.) '.1e': '(inf+/-1.0)e+02', # Similar to 1±inf '.1E': '(%s+/-1.0)E+02' % Inf_EFG, '.1ue': '(inf+/-1)e+02', '10.1e': ' inf+/- 1.0e+02' }, (float('inf'), 1e8): { # Inf *nominal value* '': 'inf+/-100000000.0', # Like '{}'.format(1e8) 'g': '(inf+/-1)e+08', # Like '{:g}'.format(1e8) '.1e': '(inf+/-1.0)e+08', '.1E': '(%s+/-1.0)E+08' % Inf_EFG, '.1ue': '(inf+/-1)e+08', '10.1e': ' inf+/- 1.0e+08' # 'infe+08' would be strange }, (float('inf'), 123456789): { # Inf *nominal value* '': 'inf+/-123456789.0', # Similar to '{}'.format(123456789.) 'g': '(inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.) '.1e': '(inf+/-1.2)e+08', '.1ep': '(inf+/-1.2)e+08', '.1E': '(%s+/-1.2)E+08' % Inf_EFG, '.1ue': '(inf+/-1)e+08', '.1ueL': r'\left(\infty \pm 1\right) \times 10^{8}', '.1ueLp': r'\left(\infty \pm 1\right) \times 10^{8}', '10.1e': ' inf+/- 1.2e+08', '10.1eL': r' \infty \pm 1.2 \times 10^{8}' }, (float('inf'), float('inf')): { # *Double* Inf '': 'inf+/-inf', '.1e': 'inf+/-inf', '.1E': '%s+/-%s' % (Inf_EFG, Inf_EFG), '.1ue': 'inf+/-inf', 'EL': r'\infty \pm \infty', 'ELp': r'\left(\infty \pm \infty\right)', }, # Like the tests for +infinity, but for -infinity: (float('-inf'), 100): { # Inf *nominal value* '': '-inf+/-100.0', # Like '{}'.format(100.) 'g': '-inf+/-100', # Like '{:g}'.format(100.) '.1e': '(-inf+/-1.0)e+02', # Similar to 1±inf '.1E': '(-%s+/-1.0)E+02' % Inf_EFG, '.1ue': '(-inf+/-1)e+02', '10.1e': ' -inf+/- 1.0e+02' }, (float('-inf'), 1e8): { # Inf *nominal value* '': '-inf+/-100000000.0', # Like '{}'.format(1e8) 'g': '(-inf+/-1)e+08', # Like '{:g}'.format(1e8) '.1e': '(-inf+/-1.0)e+08', '.1E': '(-%s+/-1.0)E+08' % Inf_EFG, '.1ue': '(-inf+/-1)e+08', '10.1e': ' -inf+/- 1.0e+08' # 'infe+08' would be strange }, (float('-inf'), 123456789): { # Inf *nominal value* '': '-inf+/-123456789.0', # Similar to '{}'.format(123456789.) 'g': '(-inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.) '.1e': '(-inf+/-1.2)e+08', '.1E': '(-%s+/-1.2)E+08' % Inf_EFG, '.1ue': '(-inf+/-1)e+08', '.1ueL': r'\left(-\infty \pm 1\right) \times 10^{8}', '10.1e': ' -inf+/- 1.2e+08', '10.1eL': r' -\infty \pm 1.2 \times 10^{8}' }, (float('-inf'), float('inf')): { # *Double* Inf '': '-inf+/-inf', '.1e': '-inf+/-inf', '.1E': '-%s+/-%s' % (Inf_EFG, Inf_EFG), '.1ue': '-inf+/-inf', 'EL': r'-\infty \pm \infty' }, # The Particle Data Group convention trumps the "at least one # digit past the decimal point" for Python floats, but only # with a non-zero uncertainty: (724.2, 26.4): { '': '724+/-26', 'p': '(724+/-26)' }, (724, 0): { '': '724.0+/-0' }, # More NaN and infinity, in particular with LaTeX and various # options: (float('-inf'), float('inf')): { 'S': '-inf(inf)', 'LS': '-\infty(\infty)', 'L': '-\infty \pm \infty', 'LP': u'-\infty±\infty', # The following is consistent with Python's own # formatting, which depends on the version of Python: # formatting float("-inf") with format(..., "020") gives # '-0000000000000000inf' with Python 2.7, but # '-00000000000000.0inf' with Python 2.6. However, Python # 2.6 gives the better, Python 2.7 form when format()ting # with "020g" instead, so this formatting would be better, # in principle, and similarly for "%020g" % ... Thus, # Python's format() breaks the official rule according to # which no format type is equivalent to "g", for # floats. If the better behavior was needed, internal # formatting could in principle force the "g" formatting # type when none is given; however, Python does not # actually fully treat the none format type in the same # was as the "g" format, so this solution cannot be used, # as it would break other formatting behaviors in this # code. It is thus best to mimic the native behavior of # none type formatting (even if it does not look so good # in Python 2.6). '020S': format(float("-inf"), '015')+'(inf)' }, (-float('nan'), float('inf')): { 'S': 'nan(inf)', 'LS': '\mathrm{nan}(\infty)', 'L': '\mathrm{nan} \pm \infty', 'LP': u'\mathrm{nan}±\infty' }, # Leading zeroes in the shorthand notation: (-2, 3): { "020S": "-000000000002.0(3.0)" } } # ',' format option: introduced in Python 2.7 if sys.version_info >= (2, 7): tests.update({ (1234.56789, 0.012): { ',.1uf': '1,234.57+/-0.01' }, (123456.789123, 1234.5678): { ',f': '123,457+/-1,235', # Particle Data Group convention ',.4f': '123,456.7891+/-1,234.5678' } }) # True if we can detect that the Jython interpreter is running this code: try: jython_detected = sys.subversion[0] == 'Jython' except AttributeError: jython_detected = False for (values, representations) in tests.items(): value = ufloat(*values) for (format_spec, result) in representations.items(): # print "FORMATTING {} WITH '{}'".format(repr(value), format_spec) # Jython 2.5.2 does not always represent NaN as nan or NAN # in the CPython way: for example, '%.2g' % float('nan') # is '\ufffd'. The test is skipped, in this case: if jython_detected and ( isnan(value.std_dev) or isnan(value.nominal_value)): continue # Call that works with Python < 2.6 too: representation = value.format(format_spec) assert representation == result, ( # The representation is used, for terminal that do not # support some characters like ±, and superscripts: 'Incorrect representation %r for format %r of %r:' ' %r expected.' % (representation, format_spec, value, result)) # An empty format string is like calling str() # (http://docs.python.org/2/library/string.html#formatspec): if not format_spec: assert representation == str(value), ( 'Empty format should give the same thing as str():' ' %s obtained instead of %s' % (representation, str(value))) # Parsing back into a number with uncertainty (unless the # LaTeX or comma notation is used): if (not set(format_spec).intersection('L,*%') # * = fill with * # "0nan" and '0nan' not in representation.lower() # "0inf" and '0inf' not in representation.lower() # Specific case: and '=====' not in representation): value_back = ufloat_fromstr(representation) # The original number and the new one should be consistent # with each other: try: # The nominal value can be rounded to 0 when the # uncertainty is larger (because p digits on the # uncertainty can still show 0.00... for the # nominal value). The relative error is infinite, # so this should not cause an error: if value_back.nominal_value: assert numbers_close(value.nominal_value, value_back.nominal_value, 2.4e-1) # If the uncertainty is zero, then the relative # change can be large: assert numbers_close(value.std_dev, value_back.std_dev, 3e-1) except AssertionError: # !! The following string formatting requires # str() to work (to not raise an exception) on the # values (which have a non-standard class): raise AssertionError( 'Original value %s and value %s parsed from %r' ' (obtained through format specification %r)' ' are not close enough' % (value, value_back, representation, format_spec)) def test_unicode_format(): '''Test of the unicode formatting of numbers with uncertainties''' x = ufloat(3.14159265358979, 0.25) assert isinstance(u'Résultat = %s' % x.format(''), str) assert isinstance(u'Résultat = %s' % x.format('P'), str) def test_custom_pretty_print_and_latex(): '''Test of the pretty-print and LaTeX format customizations''' x = ufloat(2, 0.1)*1e-11 # We will later restore the defaults: PREV_CUSTOMIZATIONS = { var: getattr(uncert_core, var).copy() for var in ['PM_SYMBOLS', 'MULT_SYMBOLS', 'GROUP_SYMBOLS']} # Customizations: for format in ["pretty-print", "latex"]: uncert_core.PM_SYMBOLS[format] = u" ± " uncert_core.MULT_SYMBOLS[format] = u"⋅" uncert_core.GROUP_SYMBOLS[format] = ( "[", "]" ) assert u"{:P}".format(x) == u'[2.00 ± 0.10]⋅10⁻¹¹' assert u"{:L}".format(x) == u'[2.00 ± 0.10] ⋅ 10^{-11}' # We restore the defaults: for (var, setting) in PREV_CUSTOMIZATIONS.items(): setattr(uncert_core, var, setting) ############################################################################### # The tests below require NumPy, which is an optional package: try: import numpy except ImportError: pass else: def arrays_close(m1, m2, precision=1e-4): """ Returns True iff m1 and m2 are almost equal, where elements can be either floats or AffineScalarFunc objects. Two independent AffineScalarFunc objects are deemed equal if both their nominal value and uncertainty are equal (up to the given precision). m1, m2 -- NumPy arrays. precision -- precision passed through to uncertainties.test_uncertainties.numbers_close(). """ # ! numpy.allclose() is similar to this function, but does not # work on arrays that contain numbers with uncertainties, because # of the isinf() function. for (elmt1, elmt2) in zip(m1.flat, m2.flat): # For a simpler comparison, both elements are # converted to AffineScalarFunc objects: elmt1 = uncert_core.to_affine_scalar(elmt1) elmt2 = uncert_core.to_affine_scalar(elmt2) if not numbers_close(elmt1.nominal_value, elmt2.nominal_value, precision): return False if not numbers_close(elmt1.std_dev, elmt2.std_dev, precision): return False return True def test_numpy_comparison(): "Comparison with a NumPy array." x = ufloat(1, 0.1) # Comparison with a different type: assert x != [x, x] # NumPy arrays can be compared, through element-wise # comparisons. Numbers with uncertainties should yield the # same kind of results as pure floats (i.e., a NumPy array, # etc.). # We test the comparison operators both for the uncertainties # package *and* the NumPy package: # Equalities, etc.: assert len(x == numpy.arange(10)) == 10 assert len(numpy.arange(10) == x) == 10 assert len(x != numpy.arange(10)) == 10 assert len(numpy.arange(10) != x) == 10 assert len(x == numpy.array([x, x, x])) == 3 assert len(numpy.array([x, x, x]) == x) == 3 assert numpy.all(x == numpy.array([x, x, x])) # Inequalities: assert len(x < numpy.arange(10)) == 10 assert len(numpy.arange(10) > x) == 10 assert len(x <= numpy.arange(10)) == 10 assert len(numpy.arange(10) >= x) == 10 assert len(x > numpy.arange(10)) == 10 assert len(numpy.arange(10) < x) == 10 assert len(x >= numpy.arange(10)) == 10 assert len(numpy.arange(10) <= x) == 10 # More detailed test, that shows that the comparisons are # meaningful (x >= 0, but not x <= 1): assert numpy.all((x >= numpy.arange(3)) == [True, False, False]) def test_correlated_values(): """ Correlated variables. Test through the input of the (full) covariance matrix. """ u = uncert_core.ufloat(1, 0.1) cov = uncert_core.covariance_matrix([u]) # "1" is used instead of u.nominal_value because # u.nominal_value might return a float. The idea is to force # the new variable u2 to be defined through an integer nominal # value: u2, = uncert_core.correlated_values([1], cov) expr = 2*u2 # Calculations with u2 should be possible, like with u #################### # Covariances between output and input variables: x = ufloat(1, 0.1) y = ufloat(2, 0.3) z = -3*x+y covs = uncert_core.covariance_matrix([x, y, z]) # Test of the diagonal covariance elements: assert arrays_close( numpy.array([v.std_dev**2 for v in (x, y, z)]), numpy.array(covs).diagonal()) # "Inversion" of the covariance matrix: creation of new # variables: (x_new, y_new, z_new) = uncert_core.correlated_values( [x.nominal_value, y.nominal_value, z.nominal_value], covs, tags = ['x', 'y', 'z']) # Even the uncertainties should be correctly reconstructed: assert arrays_close(numpy.array((x, y, z)), numpy.array((x_new, y_new, z_new))) # ... and the covariances too: assert arrays_close( numpy.array(covs), numpy.array(uncert_core.covariance_matrix([x_new, y_new, z_new]))) assert arrays_close( numpy.array([z_new]), numpy.array([-3*x_new+y_new])) #################### # ... as well as functional relations: u = ufloat(1, 0.05) v = ufloat(10, 0.1) sum_value = u+2*v # Covariance matrices: cov_matrix = uncert_core.covariance_matrix([u, v, sum_value]) # Correlated variables can be constructed from a covariance # matrix, if NumPy is available: (u2, v2, sum2) = uncert_core.correlated_values( [x.nominal_value for x in [u, v, sum_value]], cov_matrix) # arrays_close() is used instead of numbers_close() because # it compares uncertainties too: assert arrays_close(numpy.array([u]), numpy.array([u2])) assert arrays_close(numpy.array([v]), numpy.array([v2])) assert arrays_close(numpy.array([sum_value]), numpy.array([sum2])) assert arrays_close(numpy.array([0]), numpy.array([sum2-(u2+2*v2)])) # Spot checks of the correlation matrix: corr_matrix = uncert_core.correlation_matrix([u, v, sum_value]) assert numbers_close(corr_matrix[0,0], 1) assert numbers_close(corr_matrix[1,2], 2*v.std_dev/sum_value.std_dev) #################### # Test of numerical robustness despite wildly different # orders of magnitude (see # https://github.com/lebigot/uncertainties/issues/95): cov = numpy.diag([1e-70, 1e-70, 1e10]) cov[0, 1] = cov[1, 0] = 0.9e-70 cov[[0, 1], 2] = -3e-34 cov[2, [0, 1]] = -3e-34 variables = uncert_core.correlated_values([0]*3, cov) # Since the numbers are very small, we need to compare them # in a stricter way, that handles the case of a 0 variance # in `variables`: assert numbers_close( 1e66*cov[0,0], 1e66*variables[0].s**2, tolerance=1e-5) assert numbers_close( 1e66*cov[1,1], 1e66*variables[1].s**2, tolerance=1e-5) #################### # 0 variances are a bit special, since the correlation matrix # cannot be calculated naively, so we test that there is no # specific problem in this case: cov = numpy.diag([0, 0, 10]) nom_values = [1, 2, 3] variables = uncert_core.correlated_values(nom_values, cov) for (variable, nom_value, variance) in zip( variables, nom_values, cov.diagonal()): assert numbers_close(variable.n, nom_value) assert numbers_close(variable.s**2, variance) assert arrays_close( cov, numpy.array(uncert_core.covariance_matrix(variables))) def test_correlated_values_correlation_mat(): ''' Tests the input of correlated value. Test through their correlation matrix (instead of the covariance matrix). ''' x = ufloat(1, 0.1) y = ufloat(2, 0.3) z = -3*x+y cov_mat = uncert_core.covariance_matrix([x, y, z]) std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal()) corr_mat = cov_mat/std_devs/std_devs[numpy.newaxis].T # We make sure that the correlation matrix is indeed diagonal: assert (corr_mat-corr_mat.T).max() <= 1e-15 # We make sure that there are indeed ones on the diagonal: assert (corr_mat.diagonal()-1).max() <= 1e-15 # We try to recover the correlated variables through the # correlation matrix (not through the covariance matrix): nominal_values = [v.nominal_value for v in (x, y, z)] std_devs = [v.std_dev for v in (x, y, z)] x2, y2, z2 = uncert_core.correlated_values_norm( list(zip(nominal_values, std_devs)), corr_mat) # arrays_close() is used instead of numbers_close() because # it compares uncertainties too: # Test of individual variables: assert arrays_close(numpy.array([x]), numpy.array([x2])) assert arrays_close(numpy.array([y]), numpy.array([y2])) assert arrays_close(numpy.array([z]), numpy.array([z2])) # Partial correlation test: assert arrays_close(numpy.array([0]), numpy.array([z2-(-3*x2+y2)])) # Test of the full covariance matrix: assert arrays_close( numpy.array(cov_mat), numpy.array(uncert_core.covariance_matrix([x2, y2, z2]))) uncertainties-3.1.7/uncertainties/umath.py000066400000000000000000000024471425362552000207570ustar00rootroot00000000000000''' Mathematical operations that generalize many operations from the standard math module so that they also work on numbers with uncertainties. Examples: from umath import sin # Manipulation of numbers with uncertainties: x = uncertainties.ufloat(3, 0.1) print sin(x) # prints 0.141120008...+/-0.098999... # The umath functions also work on regular Python floats: print sin(3) # prints 0.141120008... This is a Python float. Importing all the functions from this module into the global namespace is possible. This is encouraged when using a Python shell as a calculator. Example: import uncertainties from uncertainties.umath import * # Imports tan(), etc. x = uncertainties.ufloat(3, 0.1) print tan(x) # tan() is the uncertainties.umath.tan function The numbers with uncertainties handled by this module are objects from the uncertainties module, from either the Variable or the AffineScalarFunc class. (c) 2009-2016 by Eric O. LEBIGOT (EOL) . Please send feature requests, bug reports, or feedback to this address. This software is released under a dual license. (1) The BSD license. (2) Any other license, as long as it is obtained from the original author.''' from .umath_core import * from .umath_core import __all__ # For a correct help(umath) uncertainties-3.1.7/uncertainties/umath_core.py000066400000000000000000000347331425362552000217720ustar00rootroot00000000000000# !!!!!!!!!!! Add a header to the documentation, that starts with something # like "uncertainties.UFloat-compatible version of...", for all functions. """ Implementation of umath.py, with internals. """ # This module exists so as to define __all__, which in turn defines # which functions are visible to the user in umath.py through from # umath import * and Python shell completion. from __future__ import division # Many analytical derivatives depend on this # Standard modules from builtins import map import math import sys import itertools # Local modules import uncertainties.core as uncert_core from uncertainties.core import (to_affine_scalar, AffineScalarFunc, LinearCombination) ############################################################################### # We wrap the functions from the math module so that they keep track of # uncertainties by returning a AffineScalarFunc object. # Some functions from the math module cannot be adapted in a standard # way so to work with AffineScalarFunc objects (either as their result # or as their arguments): # (1) Some functions return a result of a type whose value and # variations (uncertainties) cannot be represented by AffineScalarFunc # (e.g., math.frexp, which returns a tuple). The exception raised # when not wrapping them with wrap() is more obvious than the # one obtained when wrapping them (in fact, the wrapped functions # attempts operations that are not supported, such as calculation a # subtraction on a result of type tuple). # (2) Some functions don't take continuous scalar arguments (which can # be varied during differentiation): math.fsum, math.factorial... # Such functions can either be: # - wrapped in a special way. # - excluded from standard wrapping by adding their name to # no_std_wrapping # Math functions that have a standard interface: they take # one or more float arguments, and return a scalar: many_scalars_to_scalar_funcs = [] # Some functions require a specific treatment and must therefore be # excluded from standard wrapping. Functions # no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial'] # Functions with numerical derivatives: # # !! Python2.7+: {..., ...} num_deriv_funcs = set(['fmod', 'gamma', 'lgamma']) # Functions are by definition locally constant (on real # numbers): their value does not depend on the uncertainty (because # this uncertainty is supposed to lead to a good linear approximation # of the function in the uncertainty region). The type of their output # for floats is preserved, as users should not care about deviations # in their value: their value is locally constant due to the nature of # the function (0 derivative). This situation is similar to that of # comparisons (==, >, etc.). # # !! Python 2.7+: {..., ...} locally_cst_funcs = set(['ceil', 'floor', 'isinf', 'isnan', 'trunc']) # Functions that do not belong in many_scalars_to_scalar_funcs, but # that have a version that handles uncertainties. These functions are # also not in numpy (see unumpy/core.py). non_std_wrapped_funcs = [] # Function that copies the relevant attributes from generalized # functions from the math module: # This is a copy&paste job from the functools module, changing # the default arugment for assigned def wraps(wrapper, wrapped, assigned=('__doc__',), updated=('__dict__',)): """Update a wrapper function to look like the wrapped function. wrapper -- function to be updated wrapped -- original function assigned -- tuple naming the attributes assigned directly from the wrapped function to the wrapper function updated -- tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function. """ for attr in assigned: setattr(wrapper, attr, getattr(wrapped, attr)) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) # Return the wrapper so this can be used as a decorator via partial() return wrapper ######################################## # Wrapping of math functions: # Fixed formulas for the derivatives of some functions from the math # module (some functions might not be present in all version of # Python). Singular points are not taken into account. The user # should never give "large" uncertainties: problems could only appear # if this assumption does not hold. # Functions not mentioned in _fixed_derivatives have their derivatives # calculated numerically. # Functions that have singularities (possibly at infinity) benefit # from analytical calculations (instead of the default numerical # calculation) because their derivatives generally change very fast. # Even slowly varying functions (e.g., abs()) yield more precise # results when differentiated analytically, because of the loss of # precision in numerical calculations. #def log_1arg_der(x): # """ # Derivative of log(x) (1-argument form). # """ # return 1/x def log_der0(*args): """ Derivative of math.log() with respect to its first argument. Works whether 1 or 2 arguments are given. """ if len(args) == 1: return 1/args[0] else: return 1/args[0]/math.log(args[1]) # 2-argument form # The following version goes about as fast: ## A 'try' is used for the most common case because it is fast when no ## exception is raised: #try: # return log_1arg_der(*args) # Argument number check #except TypeError: # return 1/args[0]/math.log(args[1]) # 2-argument form def _deriv_copysign(x,y): if x >= 0: return math.copysign(1, y) else: return -math.copysign(1, y) def _deriv_fabs(x): if x >= 0: return 1 else: return -1 def _deriv_pow_0(x, y): if y == 0: return 0. elif x != 0 or y % 1 == 0: return y*math.pow(x, y-1) else: return float('nan') def _deriv_pow_1(x, y): if x == 0 and y > 0: return 0. else: return math.log(x) * math.pow(x, y) erf_coef = 2/math.sqrt(math.pi) # Optimization for erf() fixed_derivatives = { # In alphabetical order, here: 'acos': [lambda x: -1/math.sqrt(1-x**2)], 'acosh': [lambda x: 1/math.sqrt(x**2-1)], 'asin': [lambda x: 1/math.sqrt(1-x**2)], 'asinh': [lambda x: 1/math.sqrt(1+x**2)], 'atan': [lambda x: 1/(1+x**2)], 'atan2': [lambda y, x: x/(x**2+y**2), # Correct for x == 0 lambda y, x: -y/(x**2+y**2)], # Correct for x == 0 'atanh': [lambda x: 1/(1-x**2)], 'copysign': [_deriv_copysign, lambda x, y: 0], 'cos': [lambda x: -math.sin(x)], 'cosh': [math.sinh], 'degrees': [lambda x: math.degrees(1)], 'erf': [lambda x: math.exp(-x**2)*erf_coef], 'erfc': [lambda x: -math.exp(-x**2)*erf_coef], 'exp': [math.exp], 'expm1': [math.exp], 'fabs': [_deriv_fabs], 'hypot': [lambda x, y: x/math.hypot(x, y), lambda x, y: y/math.hypot(x, y)], 'log': [log_der0, lambda x, y: -math.log(x, y)/y/math.log(y)], 'log10': [lambda x: 1/x/math.log(10)], 'log1p': [lambda x: 1/(1+x)], 'pow': [_deriv_pow_0, _deriv_pow_1], 'radians': [lambda x: math.radians(1)], 'sin': [math.cos], 'sinh': [math.cosh], 'sqrt': [lambda x: 0.5/math.sqrt(x)], 'tan': [lambda x: 1+math.tan(x)**2], 'tanh': [lambda x: 1-math.tanh(x)**2] } # Many built-in functions in the math module are wrapped with a # version which is uncertainty aware: this_module = sys.modules[__name__] def wrap_locally_cst_func(func): ''' Return a function that returns the same arguments as func, but after converting any AffineScalarFunc object to its nominal value. This function is useful for wrapping functions that are locally constant: the uncertainties should have no role in the result (since they are supposed to keep the function linear and hence, here, constant). ''' def wrapped_func(*args, **kwargs): args_float = map(uncert_core.nominal_value, args) # !! In Python 2.7+, dictionary comprehension: {argname:...} kwargs_float = dict( (arg_name, uncert_core.nominal_value(value)) for (arg_name, value) in kwargs.items()) return func(*args_float, **kwargs_float) return wrapped_func # for (name, attr) in vars(math).items(): for name in dir(math): if name in fixed_derivatives: # Priority to functions in fixed_derivatives derivatives = fixed_derivatives[name] elif name in num_deriv_funcs: # Functions whose derivatives are calculated numerically by # this module fall here (isinf, fmod,...): derivatives = [] # Means: numerical calculation required elif name not in locally_cst_funcs: continue # 'name' not wrapped by this module (__doc__, e, etc.) func = getattr(math, name) if name in locally_cst_funcs: wrapped_func = wrap_locally_cst_func(func) else: # Function with analytical or numerical derivatives: # Errors during the calculation of the derivatives are converted # to a NaN result: it is assumed that a mathematical calculation # that cannot be calculated indicates a non-defined derivative # (the derivatives in fixed_derivatives must be written this way): wrapped_func = uncert_core.wrap( func, map(uncert_core.nan_if_exception, derivatives)) # !! The same effect could be achieved with globals()[...] = ... setattr(this_module, name, wraps(wrapped_func, func)) many_scalars_to_scalar_funcs.append(name) ############################################################################### ######################################## # Special cases: some of the functions from no_std_wrapping: ########## # The math.factorial function is not converted to an uncertainty-aware # function, because it does not handle non-integer arguments: it does # not make sense to give it an argument with a numerical error # (whereas this would be relevant for the gamma function). ########## # fsum takes a single argument, which cannot be differentiated. # However, each of the arguments inside this single list can # be a variable. We handle this in a specific way: # Only for Python 2.6+: # For drop-in compatibility with the math module: factorial = math.factorial non_std_wrapped_funcs.append('factorial') # We wrap math.fsum original_func = math.fsum # For optimization purposes # The function below exists so that temporary variables do not # pollute the module namespace: def wrapped_fsum(): """ Return an uncertainty-aware version of math.fsum, which must be contained in _original_func. """ # The fsum function is flattened, in order to use the # wrap() wrapper: flat_fsum = lambda *args: original_func(args) flat_fsum_wrap = uncert_core.wrap( flat_fsum, itertools.repeat(lambda *args: 1)) return wraps(lambda arg_list: flat_fsum_wrap(*arg_list), original_func) # !!!!!!!! Documented? fsum = wrapped_fsum() non_std_wrapped_funcs.append('fsum') ########## # Some functions that either return multiple arguments (modf, frexp) # or take some non-float arguments (which should not be converted to # numbers with uncertainty). # ! The arguments have the same names as in the math module # documentation, so that the docstrings are consistent with them. @uncert_core.set_doc(math.modf.__doc__) def modf(x): """ Version of modf that works for numbers with uncertainty, and also for regular numbers. """ # The code below is inspired by uncert_core.wrap(). It is # simpler because only 1 argument is given, and there is no # delegation to other functions involved (as for __mul__, etc.). aff_func = to_affine_scalar(x) # Uniform treatment of all numbers (frac_part, int_part) = math.modf(aff_func.nominal_value) if aff_func._linear_part: # If not a constant # The derivative of the fractional part is simply 1: the # linear part of modf(x)[0] is the linear part of x: return (AffineScalarFunc(frac_part, aff_func._linear_part), int_part) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: return (frac_part, int_part) many_scalars_to_scalar_funcs.append('modf') @uncert_core.set_doc(math.ldexp.__doc__) def ldexp(x, i): # Another approach would be to add an additional argument to # uncert_core.wrap() so that some arguments are automatically # considered as constants. aff_func = to_affine_scalar(x) # y must be an integer, for math.ldexp if aff_func._linear_part: return AffineScalarFunc( math.ldexp(aff_func.nominal_value, i), LinearCombination([(2**i, aff_func._linear_part)])) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: # aff_func.nominal_value is not passed instead of x, because # we do not have to care about the type of the return value of # math.ldexp, this way (aff_func.nominal_value might be the # value of x coerced to a difference type [int->float, for # instance]): return math.ldexp(x, i) many_scalars_to_scalar_funcs.append('ldexp') @uncert_core.set_doc(math.frexp.__doc__) def frexp(x): """ Version of frexp that works for numbers with uncertainty, and also for regular numbers. """ # The code below is inspired by uncert_core.wrap(). It is # simpler because only 1 argument is given, and there is no # delegation to other functions involved (as for __mul__, etc.). aff_func = to_affine_scalar(x) if aff_func._linear_part: (mantissa, exponent) = math.frexp(aff_func.nominal_value) return ( AffineScalarFunc( mantissa, # With frexp(x) = (m, e), x = m*2**e, so m = x*2**-e # and therefore dm/dx = 2**-e (as e in an integer that # does not vary when x changes): LinearCombination([2**-exponent, aff_func._linear_part])), # The exponent is an integer and is supposed to be # continuous (errors must be small): exponent) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: return math.frexp(x) non_std_wrapped_funcs.append('frexp') ############################################################################### # Exported functions: __all__ = many_scalars_to_scalar_funcs + non_std_wrapped_funcs uncertainties-3.1.7/uncertainties/unumpy/000077500000000000000000000000001425362552000206155ustar00rootroot00000000000000uncertainties-3.1.7/uncertainties/unumpy/__init__.py000066400000000000000000000054311425362552000227310ustar00rootroot00000000000000""" Utilities for NumPy arrays and matrices that contain numbers with uncertainties. This package contains: 1) utilities that help with the creation and manipulation of NumPy arrays and matrices of numbers with uncertainties; 2) generalizations of multiple NumPy functions so that they also work with arrays that contain numbers with uncertainties. - Arrays of numbers with uncertainties can be built as follows: arr = unumpy.uarray([1, 2], [0.01, 0.002]) # (values, uncertainties) NumPy arrays of numbers with uncertainties can also be built directly through NumPy, thanks to NumPy's support of arrays of arbitrary objects: arr = numpy.array([uncertainties.ufloat(1, 0.1),...]) - Matrices of numbers with uncertainties are best created in one of two ways: mat = unumpy.umatrix(([1, 2], [0.01, 0.002])) # (values, uncertainties) Matrices can also be built by converting arrays of numbers with uncertainties, through the unumpy.matrix class: mat = unumpy.matrix(arr) unumpy.matrix objects behave like numpy.matrix objects of numbers with uncertainties, but with better support for some operations (such as matrix inversion): # The inverse or pseudo-inverse of a unumpy.matrix can be calculated: print mat.I # Would not work with numpy.matrix([[ufloat(...),...]]).I - Nominal values and uncertainties of arrays can be directly accessed: print unumpy.nominal_values(arr) # [ 1. 2.] print unumpy.std_devs(mat) # [ 0.01 0.002] - This module defines uncertainty-aware mathematical functions that generalize those from uncertainties.umath so that they work on NumPy arrays of numbers with uncertainties instead of just scalars: print unumpy.cos(arr) # Array with the cosine of each element NumPy's function names are used, and not those of the math module (for instance, unumpy.arccos is defined, like in NumPy, and is not named acos like in the standard math module). The definitions of the mathematical quantities calculated by these functions are available in the documentation of uncertainties.umath. - The unumpy.ulinalg module contains more uncertainty-aware functions for arrays that contain numbers with uncertainties (see the documentation for this module). This module requires the NumPy package. (c) 2009-2016 by Eric O. LEBIGOT (EOL) . Please send feature requests, bug reports, or feedback to this address. This software is released under a dual license. (1) The BSD license. (2) Any other license, as long as it is obtained from the original author.""" # Local modules: from .core import * from . import ulinalg # Local sub-module # __all__ is set so that pydoc shows all important functions: __all__ = core.__all__ # "import numpy" makes numpy.linalg available. This behavior is # copied here, for maximum compatibility: __all__.append('ulinalg') uncertainties-3.1.7/uncertainties/unumpy/core.py000066400000000000000000000671401425362552000221270ustar00rootroot00000000000000""" Core functions used by unumpy and some of its submodules. (c) 2010-2016 by Eric O. LEBIGOT (EOL). """ # The functions found in this module cannot be defined in unumpy or # its submodule: this creates import loops, when unumpy explicitly # imports one of the submodules in order to make it available to the # user. from __future__ import division # Standard modules: from builtins import next from builtins import zip from builtins import range import sys import inspect # 3rd-party modules: import numpy from numpy.core import numeric # Local modules: import uncertainties.umath_core as umath_core import uncertainties.core as uncert_core from uncertainties.core import deprecation __all__ = [ # Factory functions: 'uarray', 'umatrix', # Utilities: 'nominal_values', 'std_devs', # Classes: 'matrix' ] ############################################################################### # Utilities: # nominal_values() and std_devs() are defined as functions (instead of # as additional methods of the unumpy.matrix class) because the user # might well directly build arrays of numbers with uncertainties # without going through the factory functions found in this module # (uarray() and umatrix()). Thus, # numpy.array([uncert_core.ufloat((1, 0.1))]) would not # have a nominal_values() method. Adding such a method to, say, # unumpy.matrix, would break the symmetry between NumPy arrays and # matrices (no nominal_values() method), and objects defined in this # module. # ! Warning: the __doc__ is set, but help(nominal_values) does not # display it, but instead displays the documentation for the type of # nominal_values (i.e. the documentation of its class): to_nominal_values = numpy.vectorize( uncert_core.nominal_value, otypes=[float], # Because vectorize() has side effects (dtype setting) doc=("Return the nominal value of the numbers with uncertainties contained" " in a NumPy (or unumpy) array (this includes matrices).")) to_std_devs = numpy.vectorize( uncert_core.std_dev, otypes=[float], # Because vectorize() has side effects (dtype setting) doc=("Return the standard deviation of the numbers with uncertainties" " contained in a NumPy array, or zero for other objects.")) def unumpy_to_numpy_matrix(arr): """ If arr in a unumpy.matrix, it is converted to a numpy.matrix. Otherwise, it is returned unchanged. """ if isinstance(arr, matrix): return arr.view(numpy.matrix) else: return arr def nominal_values(arr): """ Return the nominal values of the numbers in NumPy array arr. Elements that are not numbers with uncertainties (derived from a class from this module) are passed through untouched (because a numpy.array can contain numbers with uncertainties and pure floats simultaneously). If arr is of type unumpy.matrix, the returned array is a numpy.matrix, because the resulting matrix does not contain numbers with uncertainties. """ return unumpy_to_numpy_matrix(to_nominal_values(arr)) def std_devs(arr): """ Return the standard deviations of the numbers in NumPy array arr. Elements that are not numbers with uncertainties (derived from a class from this module) are passed through untouched (because a numpy.array can contain numbers with uncertainties and pure floats simultaneously). If arr is of type unumpy.matrix, the returned array is a numpy.matrix, because the resulting matrix does not contain numbers with uncertainties. """ return unumpy_to_numpy_matrix(to_std_devs(arr)) ############################################################################### def derivative(u, var): """ Return the derivative of u along var, if u is an uncert_core.AffineScalarFunc instance, and if var is one of the variables on which it depends. Otherwise, return 0. """ if isinstance(u, uncert_core.AffineScalarFunc): try: return u.derivatives[var] except KeyError: return 0. else: return 0. def wrap_array_func(func): # !!! This function is not used in the code, except in the tests. # # !!! The implementation seems superficially similar to # uncertainties.core.wrap(): is there code/logic duplication # (which should be removed)? """ Return a version of the function func() that works even when func() is given a NumPy array that contains numbers with uncertainties, as first argument. This wrapper is similar to uncertainties.core.wrap(), except that it handles an array argument instead of float arguments, and that the result can be an array. However, the returned function is more restricted: the array argument cannot be given as a keyword argument with the name in the original function (it is not a drop-in replacement). func -- function whose first argument is a single NumPy array, and which returns a NumPy array. """ @uncert_core.set_doc("""\ Version of %s(...) that works even when its first argument is a NumPy array that contains numbers with uncertainties. Warning: elements of the first argument array that are not AffineScalarFunc objects must not depend on uncert_core.Variable objects in any way. Otherwise, the dependence of the result in uncert_core.Variable objects will be incorrect. Original documentation: %s""" % (func.__name__, func.__doc__)) def wrapped_func(arr, *args, **kwargs): # Nominal value: arr_nominal_value = nominal_values(arr) func_nominal_value = func(arr_nominal_value, *args, **kwargs) # The algorithm consists in numerically calculating the derivatives # of func: # Variables on which the array depends are collected: variables = set() for element in arr.flat: # floats, etc. might be present if isinstance(element, uncert_core.AffineScalarFunc): # !!!! The following forces an evaluation of the # derivatives!? Isn't this very slow, when # working with a large number of arrays? # # !! set() is only needed for Python 2 compatibility: variables |= set(element.derivatives.keys()) # If the matrix has no variables, then the function value can be # directly returned: if not variables: return func_nominal_value # Calculation of the derivatives of each element with respect # to the variables. Each element must be independent of the # others. The derivatives have the same shape as the output # array (which might differ from the shape of the input array, # in the case of the pseudo-inverse). derivatives = numpy.vectorize(lambda _: {})(func_nominal_value) for var in variables: # A basic assumption of this package is that the user # guarantees that uncertainties cover a zone where # evaluated functions are linear enough. Thus, numerical # estimates of the derivative should be good over the # standard deviation interval. This is true for the # common case of a non-zero standard deviation of var. If # the standard deviation of var is zero, then var has no # impact on the uncertainty of the function func being # calculated: an incorrect derivative has no impact. One # scenario can give incorrect results, however, but it # should be extremely uncommon: the user defines a # variable x with 0 standard deviation, sets y = func(x) # through this routine, changes the standard deviation of # x, and prints y; in this case, the uncertainty on y # might be incorrect, because this program had no idea of # the scale on which func() is linear, when it calculated # the numerical derivative. # The standard deviation might be numerically too small # for the evaluation of the derivative, though: we set the # minimum variable shift. shift_var = max(var._std_dev/1e5, 1e-8*abs(var._nominal_value)) # An exceptional case is that of var being exactly zero. # In this case, an arbitrary shift is used for the # numerical calculation of the derivative. The resulting # derivative value might be quite incorrect, but this does # not matter as long as the uncertainty of var remains 0, # since it is, in this case, a constant. if not shift_var: shift_var = 1e-8 # Shift of all the elements of arr when var changes by shift_var: shift_arr = array_derivative(arr, var)*shift_var # Origin value of array arr when var is shifted by shift_var: shifted_arr_values = arr_nominal_value + shift_arr func_shifted = func(shifted_arr_values, *args, **kwargs) numerical_deriv = (func_shifted-func_nominal_value)/shift_var # Update of the list of variables and associated # derivatives, for each element: for (derivative_dict, derivative_value) in ( zip(derivatives.flat, numerical_deriv.flat)): if derivative_value: derivative_dict[var] = derivative_value # numbers with uncertainties are built from the result: return numpy.vectorize(uncert_core.AffineScalarFunc)( func_nominal_value, numpy.vectorize(uncert_core.LinearCombination)(derivatives)) wrapped_func = uncert_core.set_doc("""\ Version of %s(...) that works even when its first argument is a NumPy array that contains numbers with uncertainties. Warning: elements of the first argument array that are not AffineScalarFunc objects must not depend on uncert_core.Variable objects in any way. Otherwise, the dependence of the result in uncert_core.Variable objects will be incorrect. Original documentation: %s""" % (func.__name__, func.__doc__))(wrapped_func) # It is easier to work with wrapped_func, which represents a # wrapped version of 'func', when it bears the same name as # 'func' (the name is used by repr(wrapped_func)). wrapped_func.__name__ = func.__name__ return wrapped_func ############################################################################### # Arrays def uarray(nominal_values, std_devs=None): """ Return a NumPy array of numbers with uncertainties initialized with the given nominal values and standard deviations. nominal_values, std_devs -- valid arguments for numpy.array, with identical shapes (list of numbers, list of lists, numpy.ndarray, etc.). std_devs=None is only used for supporting legacy code, where nominal_values can be the tuple of nominal values and standard deviations. """ if std_devs is None: # Obsolete, single tuple argument call deprecation('uarray() should now be called with two arguments.') (nominal_values, std_devs) = nominal_values return (numpy.vectorize( # ! Looking up uncert_core.Variable beforehand through # '_Variable = uncert_core.Variable' does not result in a # significant speed up: lambda v, s: uncert_core.Variable(v, s), otypes=[object]) (nominal_values, std_devs)) ############################################################################### def array_derivative(array_like, var): """ Return the derivative of the given array with respect to the given variable. The returned derivative is a NumPy ndarray of the same shape as array_like, that contains floats. array_like -- array-like object (list, etc.) that contains scalars or numbers with uncertainties. var -- Variable object. """ return numpy.vectorize(lambda u: derivative(u, var), # The type is set because an # integer derivative should not # set the output type of the # array: otypes=[float])(array_like) def func_with_deriv_to_uncert_func(func_with_derivatives): # This function is used for instance for the calculation of the # inverse and pseudo-inverse of a matrix with uncertainties. """ Return a function that can be applied to array-like objects that contain numbers with uncertainties (lists, lists of lists, NumPy arrays, etc.). func_with_derivatives -- defines a function that takes an array-like object containing scalars and returns an array. Both the value and the derivatives of this function with respect to multiple scalar parameters are calculated by this func_with_derivatives() argument. func_with_derivatives(arr, input_type, derivatives, *args, **kwargs) must return an iterator. The first element returned by this iterator is the value of the function at the n-dimensional array-like 'arr' (with the correct type). The following elements are arrays that represent the derivative of the function for each derivative array from the iterator 'derivatives'. func_with_derivatives() takes the following arguments: arr -- NumPy ndarray of scalars where the function must be evaluated. input_type -- data type of the input array-like object. This type is used for determining the type that the function should return. derivatives -- iterator that returns the derivatives of the argument of the function with respect to multiple scalar variables. func_with_derivatives() returns the derivatives of the defined function with respect to these variables. args -- additional arguments that define the result (example: for the pseudo-inverse numpy.linalg.pinv: numerical cutoff). Examples of func_with_derivatives: inv_with_derivatives(). """ def wrapped_func(array_like, *args, **kwargs): """ array_like -- n-dimensional array-like object that contains numbers with uncertainties (list, NumPy ndarray or matrix, etc.). args -- additional arguments that are passed directly to func_with_derivatives. """ # The calculation below is not lazy, contrary to the linear # error propagation done in AffineScalarFunc. Making it lazy # in the same way would be quite a specific task: basically # this would amount to generalizing scalar coefficients in # core.LinearCombination to more general matrix # multiplications, and to replace Variable differentials by # full matrices of coefficients. This does not look very # efficient, as matrices are quite big, and since caching the # result of a few matrix functions that are not typically # stringed one after the other (unlike a big sum of numbers) # should not be needed. # So that .flat works even if array_like is a list: array_version = numpy.asanyarray(array_like) # Variables on which the array depends are collected: variables = set() for element in array_version.flat: # floats, etc. might be present if isinstance(element, uncert_core.AffineScalarFunc): # !!! set() is only needed for Python 2 compatibility: variables |= set(element.derivatives.keys()) array_nominal = nominal_values(array_version) # Function value, then derivatives at array_nominal (the # derivatives are with respect to the variables contained in # array_like): func_then_derivs = func_with_derivatives( array_nominal, type(array_like), (array_derivative(array_version, var) for var in variables), *args, **kwargs) func_nominal_value = next(func_then_derivs) if not variables: return func_nominal_value # The result is built progressively, with the contribution of # each variable added in turn: # Calculation of the derivatives of the result with respect to # the variables. derivatives = ( numpy.array( [{} for _ in range(func_nominal_value.size)], dtype=object) .reshape(func_nominal_value.shape)) # Memory-efficient approach. A memory-hungry approach would # be to calculate the matrix derivatives will respect to all # variables and then combine them into a matrix of # AffineScalarFunc objects. The approach followed here is to # progressively build the matrix of derivatives, by # progressively adding the derivatives with respect to # successive variables. for (var, deriv_wrt_var) in zip(variables, func_then_derivs): # Update of the list of variables and associated # derivatives, for each element: for (derivative_dict, derivative_value) in zip( derivatives.flat, deriv_wrt_var.flat): if derivative_value: derivative_dict[var] = derivative_value # An array of numbers with uncertainties is built from the # result: result = numpy.vectorize(uncert_core.AffineScalarFunc)( func_nominal_value, numpy.vectorize(uncert_core.LinearCombination)(derivatives)) # NumPy matrices that contain numbers with uncertainties are # better as unumpy matrices: if isinstance(result, numpy.matrix): result = result.view(matrix) return result return wrapped_func ########## Matrix inverse def inv_with_derivatives(arr, input_type, derivatives): """ Defines the matrix inverse and its derivatives. See the definition of func_with_deriv_to_uncert_func() for its detailed semantics. """ inverse = numpy.linalg.inv(arr) # The inverse of a numpy.matrix is a numpy.matrix. It is assumed # that numpy.linalg.inv is such that other types yield # numpy.ndarrays: if issubclass(input_type, numpy.matrix): inverse = inverse.view(numpy.matrix) yield inverse # It is mathematically convenient to work with matrices: inverse_mat = numpy.asmatrix(inverse) # Successive derivatives of the inverse: for derivative in derivatives: derivative_mat = numpy.asmatrix(derivative) yield -inverse_mat * derivative_mat * inverse_mat inv = func_with_deriv_to_uncert_func(inv_with_derivatives) inv.__doc__ = """\ Version of numpy.linalg.inv that works with array-like objects that contain numbers with uncertainties. The result is a unumpy.matrix if numpy.linalg.pinv would return a matrix for the array of nominal values. Analytical formulas are used. Original documentation: %s """ % numpy.linalg.inv.__doc__ ########## Matrix pseudo-inverse def pinv_with_derivatives(arr, input_type, derivatives, rcond): """ Defines the matrix pseudo-inverse and its derivatives. Works with real or complex matrices. See the definition of func_with_deriv_to_uncert_func() for its detailed semantics. """ inverse = numpy.linalg.pinv(arr, rcond) # The pseudo-inverse of a numpy.matrix is a numpy.matrix. It is # assumed that numpy.linalg.pinv is such that other types yield # numpy.ndarrays: if issubclass(input_type, numpy.matrix): inverse = inverse.view(numpy.matrix) yield inverse # It is mathematically convenient to work with matrices: inverse_mat = numpy.asmatrix(inverse) # Formula (4.12) from The Differentiation of Pseudo-Inverses and # Nonlinear Least Squares Problems Whose Variables # Separate. Author(s): G. H. Golub and V. Pereyra. Source: SIAM # Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973), # pp. 413-432 # See also # http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse # Shortcuts. All the following factors should be numpy.matrix objects: PA = arr*inverse_mat AP = inverse_mat*arr factor21 = inverse_mat*inverse_mat.H factor22 = numpy.eye(arr.shape[0])-PA factor31 = numpy.eye(arr.shape[1])-AP factor32 = inverse_mat.H*inverse_mat # Successive derivatives of the inverse: for derivative in derivatives: derivative_mat = numpy.asmatrix(derivative) term1 = -inverse_mat*derivative_mat*inverse_mat derivative_mat_H = derivative_mat.H term2 = factor21*derivative_mat_H*factor22 term3 = factor31*derivative_mat_H*factor32 yield term1+term2+term3 # Default rcond argument for the generalization of numpy.linalg.pinv: # # Most common modern case first: try: pinv_default = ( inspect.signature(numpy.linalg.pinv).parameters["rcond"].default) except AttributeError: # No inspect.signature() before Python 3.3 try: # In numpy 1.17+, pinv is wrapped using a decorator which unfortunately # results in the metadata (argument defaults) being lost. However, we # can still get at the original function using the __wrapped__ # attribute (which is what inspect.signature() does). pinv_default = numpy.linalg.pinv.__wrapped__.__defaults__[0] except AttributeError: # Function not wrapped in NumPy < 1.17 pinv_default = numpy.linalg.pinv.__defaults__[0] # Python 1, 2.6+: pinv_with_uncert = func_with_deriv_to_uncert_func(pinv_with_derivatives) def pinv(array_like, rcond=pinv_default): return pinv_with_uncert(array_like, rcond) pinv = uncert_core.set_doc(""" Version of numpy.linalg.pinv that works with array-like objects that contain numbers with uncertainties. The result is a unumpy.matrix if numpy.linalg.pinv would return a matrix for the array of nominal values. Analytical formulas are used. Original documentation: %s """ % numpy.linalg.pinv.__doc__)(pinv) ########## Matrix class class CallableStdDevs(numpy.matrix): ''' Class for standard deviation results, which used to be callable. Provided for compatibility with old code. Issues an obsolescence warning upon call. New objects must be created by passing an existing ''' def __new__(cls, matrix): # The following prevents a copy of the original matrix, which # could be expensive, and is unnecessary (the CallableStdDevs # is just a wrapping around the original matrix, which can be # modified): matrix.__class__ = cls return matrix def __call__ (self): deprecation('the std_devs attribute should not be called' ' anymore: use .std_devs instead of .std_devs().') return self class matrix(numpy.matrix): # The name of this class is the same as NumPy's, which is why it # does not follow PEP 8. """ Class equivalent to numpy.matrix, but that behaves better when the matrix contains numbers with uncertainties. """ def __rmul__(self, other): # ! NumPy's matrix __rmul__ uses an apparently restrictive # dot() function that cannot handle the multiplication of a # scalar and of a matrix containing objects (when the # arguments are given in this order). We go around this # limitation: if numeric.isscalar(other): return numeric.dot(self, other) else: return numeric.dot(other, self) # The order is important def getI(self): """Matrix inverse or pseudo-inverse.""" m, n = self.shape return (inv if m == n else pinv)(self) I = numpy.matrix.I.getter(getI) # !!! The following function is not in the official documentation # of the module. Maybe this is because arrays with uncertainties # do not have any equivalent in this module, and they should be # the first ones to have such methods? @property def nominal_values(self): """ Nominal value of all the elements of the matrix. """ return nominal_values(self) # !!! The following function is not in the official documentation # of the module. Maybe this is because arrays with uncertainties # do not have any equivalent in this module, and they should be # the first ones to have such methods? @property def std_devs(self): return CallableStdDevs(std_devs(self)) def umatrix(nominal_values, std_devs=None): """ Constructs a matrix that contains numbers with uncertainties. The arguments are the same as for uarray(...): nominal values, and standard deviations. The returned matrix can be inverted, thanks to the fact that it is a unumpy.matrix object instead of a numpy.matrix one. """ if std_devs is None: # Obsolete, single tuple argument call deprecation('umatrix() should now be called with two arguments.') (nominal_values, std_devs) = nominal_values return uarray(nominal_values, std_devs).view(matrix) ############################################################################### def define_vectorized_funcs(): """ Defines vectorized versions of functions from uncertainties.umath_core. Some functions have their name translated, so as to follow NumPy's convention (example: math.acos -> numpy.arccos). """ this_module = sys.modules[__name__] # NumPy does not always use the same function names as the math # module: func_name_translations = dict([ (f_name, 'arc'+f_name[1:]) for f_name in ['acos', 'acosh', 'asin', 'atan', 'atan2', 'atanh']]) new_func_names = [ func_name_translations.get(function_name, function_name) # The functions from umath_core.non_std_wrapped_funcs # (available from umath) are normally not in # NumPy, so they are not included here: for function_name in umath_core.many_scalars_to_scalar_funcs] for (function_name, unumpy_name) in zip( umath_core.many_scalars_to_scalar_funcs, new_func_names): # ! The newly defined functions (uncertainties.unumpy.cos, etc.) # do not behave exactly like their NumPy equivalent (numpy.cos, # etc.): cos(0) gives an array() and not a # numpy.float... (equality tests succeed, though). func = getattr(umath_core, function_name) # Data type of the result of the unumpy function: otypes = ( # It is much more convenient to preserve the type of # functions that return a number without # uncertainty. Thus, for example, unumpy.isnan() can # return an array with a boolean data type (instead of # object), which allows the result to be used with NumPy's # boolean indexing. {} if function_name in umath_core.locally_cst_funcs # If by any chance a function returns, in a particular # case, an integer instead of a number with uncertainty, # side-effects in vectorize() would fix the resulting # dtype to integer, which is not what is wanted (as # vectorize(), at least in NumPy around 2010 maybe, # decided about the output data type by looking at the # type of first element only). else {'otypes': [object]}) setattr( this_module, unumpy_name, #!!!! For umath_core.locally_cst_funcs, would it make sense # to optimize this by using instead the equivalent (? see # above) vectorized NumPy function on the nominal values? numpy.vectorize(func, doc="""\ Vectorized version of umath.%s. Original documentation: %s""" % (function_name, func.__doc__), **otypes)) __all__.append(unumpy_name) define_vectorized_funcs() uncertainties-3.1.7/uncertainties/unumpy/test_ulinalg.py000066400000000000000000000054551425362552000236720ustar00rootroot00000000000000""" Tests for uncertainties.unumpy.ulinalg. These tests can be run through the Nose testing framework. (c) 2010-2016 by Eric O. LEBIGOT (EOL) . """ # Some tests are already performed in test_unumpy (unumpy contains a # matrix inversion, for instance). They are not repeated here. from __future__ import division try: import numpy except ImportError: import sys sys.exit() # There is no reason to test the interface to NumPy from uncertainties import unumpy, ufloat from uncertainties.unumpy.test_unumpy import arrays_close def test_list_inverse(): "Test of the inversion of a square matrix" mat_list = [[1, 1], [1, 0]] # numpy.linalg.inv(mat_list) does calculate the inverse even # though mat_list is a list of lists (and not a matrix). Can # ulinalg do the same? Here is a test: mat_list_inv = unumpy.ulinalg.inv(mat_list) # More type testing: mat_matrix = numpy.asmatrix(mat_list) assert isinstance(unumpy.ulinalg.inv(mat_matrix), type(numpy.linalg.inv(mat_matrix))) # unumpy.ulinalg should behave in the same way as numpy.linalg, # with respect to types: mat_list_inv_numpy = numpy.linalg.inv(mat_list) assert type(mat_list_inv) == type(mat_list_inv_numpy) # The resulting matrix does not have to be a matrix that can # handle uncertainties, because the input matrix does not have # uncertainties: assert not isinstance(mat_list_inv, unumpy.matrix) # Individual element check: assert isinstance(mat_list_inv[1,1], float) assert mat_list_inv[1,1] == -1 x = ufloat(1, 0.1) y = ufloat(2, 0.1) mat = unumpy.matrix([[x, x], [y, 0]]) # Internal consistency: ulinalg.inv() must coincide with the # unumpy.matrix inverse, for square matrices (.I is the # pseudo-inverse, for non-square matrices, but inv() is not). assert arrays_close(unumpy.ulinalg.inv(mat), mat.I) def test_list_pseudo_inverse(): "Test of the pseudo-inverse" x = ufloat(1, 0.1) y = ufloat(2, 0.1) mat = unumpy.matrix([[x, x], [y, 0]]) # Internal consistency: the inverse and the pseudo-inverse yield # the same result on square matrices: assert arrays_close(mat.I, unumpy.ulinalg.pinv(mat), 1e-4) assert arrays_close(unumpy.ulinalg.inv(mat), # Support for the optional pinv argument is # tested: unumpy.ulinalg.pinv(mat, 1e-15), 1e-4) # Non-square matrices: x = ufloat(1, 0.1) y = ufloat(2, 0.1) mat1 = unumpy.matrix([[x, y]]) # "Long" matrix mat2 = unumpy.matrix([[x, y], [1, 3+x], [y, 2*x]]) # "Tall" matrix # Internal consistency: assert arrays_close(mat1.I, unumpy.ulinalg.pinv(mat1, 1e-10)) assert arrays_close(mat2.I, unumpy.ulinalg.pinv(mat2, 1e-8)) uncertainties-3.1.7/uncertainties/unumpy/test_unumpy.py000066400000000000000000000247161425362552000235750ustar00rootroot00000000000000""" Tests of the code in uncertainties/unumpy/__init__.py. These tests can be run through the Nose testing framework. (c) 2010-2016 by Eric O. LEBIGOT (EOL). """ from __future__ import division # 3rd-party modules: try: import numpy except ImportError: import sys sys.exit() # There is no reason to test the interface to NumPy # Local modules: import uncertainties import uncertainties.core as uncert_core from uncertainties import ufloat, unumpy, test_uncertainties from uncertainties.unumpy import core from uncertainties.test_uncertainties import numbers_close, arrays_close def test_numpy(): """ Interaction with NumPy, including matrix inversion, correlated_values, and calculation of the mean. """ arr = numpy.arange(3) num = ufloat(3.14, 0.01) # NumPy arrays can be multiplied by Variable objects, # whatever the order of the operands: prod1 = arr*num prod2 = num*arr # Additional check: assert (prod1 == prod2).all() # Operations with arrays work (they are first handled by NumPy, # then by this module): prod1*prod2 # This should be calculable assert not (prod1-prod2).any() # All elements must be 0 # Comparisons work too: # Usual behavior: assert len(arr[arr > 1.5]) == 1 # Comparisons with Variable objects: assert len(arr[arr > ufloat(1.5, 0.1)]) == 1 assert len(prod1[prod1 < prod1*prod2]) == 2 # The following can be calculated (special NumPy abs() function): numpy.abs(arr + ufloat(-1, 0.1)) # The following does not completely work, because NumPy does not # implement numpy.exp on an array of general objects, apparently: assert numpy.exp(arr).all() # All elements > 0 # Equivalent with an array of AffineScalarFunc objects: try: numpy.exp(arr + ufloat(0, 0)) except (AttributeError, TypeError): # In numpy<1.17, an AttributeError is raised in this situation. This was # considered a bug however, and in numpy 1.17 it was changed to a # TypeError (see PR #12700 in numpy repository) pass else: raise Exception("numpy.exp unexpectedly worked") # Calculation of the mean, global and with a specific axis: arr_floats = numpy.random.random((10, 3, 5)) arr = unumpy.uarray(arr_floats, arr_floats/100) assert arr.mean(axis=0).shape == (3, 5) assert arr.mean(axis=1).shape == (10, 5) arr.mean() # Global mean def test_matrix(): "Matrices of numbers with uncertainties" # Matrix inversion: # Matrix with a mix of Variable objects and regular # Python numbers: m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)]]) m_nominal_values = unumpy.nominal_values(m) # Test of the nominal_value attribute: assert numpy.all(m_nominal_values == m.nominal_values) assert type(m[0, 0]) == uncert_core.Variable # Test of scalar multiplication, both sides: 3*m m*3 def derivatives_close(x, y): """ Returns True iff the AffineScalarFunc objects x and y have derivatives that are close to each other (they must depend on the same variables). """ # x and y must depend on the same variables: if set(x.derivatives) != set(y.derivatives): return False # Not the same variables return all(numbers_close(x.derivatives[var], y.derivatives[var]) for var in x.derivatives) def test_inverse(): "Tests of the matrix inverse" m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)]]) m_nominal_values = unumpy.nominal_values(m) # "Regular" inverse matrix, when uncertainties are not taken # into account: m_no_uncert_inv = m_nominal_values.I # The matrix inversion should not yield numbers with uncertainties: assert m_no_uncert_inv.dtype == numpy.dtype(float) # Inverse with uncertainties: m_inv_uncert = m.I # AffineScalarFunc elements # The inverse contains uncertainties: it must support custom # operations on matrices with uncertainties: assert isinstance(m_inv_uncert, unumpy.matrix) assert type(m_inv_uncert[0, 0]) == uncert_core.AffineScalarFunc # Checks of the numerical values: the diagonal elements of the # inverse should be the inverses of the diagonal elements of # m (because we started with a triangular matrix): assert numbers_close(1/m_nominal_values[0, 0], m_inv_uncert[0, 0].nominal_value), "Wrong value" assert numbers_close(1/m_nominal_values[1, 1], m_inv_uncert[1, 1].nominal_value), "Wrong value" #################### # Checks of the covariances between elements: x = ufloat(10, 1) m = unumpy.matrix([[x, x], [0, 3+2*x]]) m_inverse = m.I # Check of the properties of the inverse: m_double_inverse = m_inverse.I # The initial matrix should be recovered, including its # derivatives, which define covariances: assert numbers_close(m_double_inverse[0, 0].nominal_value, m[0, 0].nominal_value) assert numbers_close(m_double_inverse[0, 0].std_dev, m[0, 0].std_dev) assert arrays_close(m_double_inverse, m) # Partial test: assert derivatives_close(m_double_inverse[0, 0], m[0, 0]) assert derivatives_close(m_double_inverse[1, 1], m[1, 1]) #################### # Tests of covariances during the inversion: # There are correlations if both the next two derivatives are # not zero: assert m_inverse[0, 0].derivatives[x] assert m_inverse[0, 1].derivatives[x] # Correlations between m and m_inverse should create a perfect # inversion: assert arrays_close(m * m_inverse, numpy.eye(m.shape[0])) def test_wrap_array_func(): ''' Test of numpy.wrap_array_func(), with optional arguments and keyword arguments. ''' # Function that works with numbers with uncertainties in mat (if # mat is an uncertainties.unumpy.matrix): def f_unc(mat, *args, **kwargs): return mat.I + args[0]*kwargs['factor'] # Test with optional arguments and keyword arguments: def f(mat, *args, **kwargs): # This function is wrapped: it should only be called with pure # numbers: assert not any(isinstance(v, uncert_core.UFloat) for v in mat.flat) return f_unc(mat, *args, **kwargs) # Wrapped function: f_wrapped = core.wrap_array_func(f) ########## # Full rank rectangular matrix: m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)], [1, -3.1]]) # Numerical and package (analytical) pseudo-inverses: they must be # the same: m_f_wrapped = f_wrapped(m, 2, factor=10) m_f_unc = f_unc(m, 2, factor=10) assert arrays_close(m_f_wrapped, m_f_unc) def test_pseudo_inverse(): "Tests of the pseudo-inverse" # Numerical version of the pseudo-inverse: pinv_num = core.wrap_array_func(numpy.linalg.pinv) ########## # Full rank rectangular matrix: m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)], [1, -3.1]]) # Numerical and package (analytical) pseudo-inverses: they must be # the same: rcond = 1e-8 # Test of the second argument to pinv() m_pinv_num = pinv_num(m, rcond) m_pinv_package = core.pinv(m, rcond) assert arrays_close(m_pinv_num, m_pinv_package) ########## # Example with a non-full rank rectangular matrix: vector = [ufloat(10, 1), -3.1, 11] m = unumpy.matrix([vector, vector]) m_pinv_num = pinv_num(m, rcond) m_pinv_package = core.pinv(m, rcond) assert arrays_close(m_pinv_num, m_pinv_package) ########## # Example with a non-full-rank square matrix: m = unumpy.matrix([[ufloat(10, 1), 0], [3, 0]]) m_pinv_num = pinv_num(m, rcond) m_pinv_package = core.pinv(m, rcond) assert arrays_close(m_pinv_num, m_pinv_package) def test_broadcast_funcs(): """ Test of mathematical functions that work with NumPy arrays of numbers with uncertainties. """ x = ufloat(0.2, 0.1) arr = numpy.array([x, 2*x]) assert unumpy.cos(arr)[1] == uncertainties.umath.cos(arr[1]) # Some functions do not bear the same name in the math module and # in NumPy (acos instead of arccos, etc.): assert unumpy.arccos(arr)[1] == uncertainties.umath.acos(arr[1]) # The acos() function should not exist in unumpy because it does # not exist in numpy: assert not hasattr(numpy, 'acos') assert not hasattr(unumpy, 'acos') # Test of the __all__ variable: assert 'acos' not in unumpy.__all__ def test_array_and_matrix_creation(): "Test of custom array creation" arr = unumpy.uarray([1, 2], [0.1, 0.2]) assert arr[1].nominal_value == 2 assert arr[1].std_dev == 0.2 # Same thing for matrices: mat = unumpy.umatrix([1, 2], [0.1, 0.2]) assert mat[0,1].nominal_value == 2 assert mat[0,1].std_dev == 0.2 def test_component_extraction(): "Extracting the nominal values and standard deviations from an array" arr = unumpy.uarray([1, 2], [0.1, 0.2]) assert numpy.all(unumpy.nominal_values(arr) == [1, 2]) assert numpy.all(unumpy.std_devs(arr) == [0.1, 0.2]) # unumpy matrices, in addition, should have nominal_values that # are simply numpy matrices (not unumpy ones, because they have no # uncertainties): mat = unumpy.matrix(arr) assert numpy.all(unumpy.nominal_values(mat) == [1, 2]) assert numpy.all(unumpy.std_devs(mat) == [0.1, 0.2]) assert type(unumpy.nominal_values(mat)) == numpy.matrix def test_array_comparisons(): "Test of array and matrix comparisons" arr = unumpy.uarray([1, 2], [1, 4]) assert numpy.all((arr == [arr[0], 4]) == [True, False]) # For matrices, 1D arrays are converted to 2D arrays: mat = unumpy.umatrix([1, 2], [1, 4]) assert numpy.all((mat == [mat[0,0], 4]) == [True, False]) def test_obsolete(): 'Test of obsolete functions' # The new and old calls should give the same results: # The unusual syntax is here to protect against automatic code # update: arr_obs = unumpy.uarray.__call__(([1, 2], [1, 4])) # Obsolete call arr = unumpy.uarray([1, 2], [1, 4]) assert arrays_close(arr_obs, arr) # The new and old calls should give the same results: # The unusual syntax is here to protect against automatic code # update: mat_obs = unumpy.umatrix.__call__(([1, 2], [1, 4])) # Obsolete call mat = unumpy.umatrix([1, 2], [1, 4]) assert arrays_close(mat_obs, mat) uncertainties-3.1.7/uncertainties/unumpy/ulinalg.py000066400000000000000000000005631425362552000226260ustar00rootroot00000000000000""" This module provides uncertainty-aware functions that generalize some of the functions from numpy.linalg. (c) 2010-2016 by Eric O. LEBIGOT (EOL) . """ from uncertainties import __author__ from uncertainties.unumpy.core import inv, pinv # This module cannot import unumpy because unumpy imports this module. __all__ = ['inv', 'pinv']