pax_global_header00006660000000000000000000000064135775100170014520gustar00rootroot0000000000000052 comment=12d246f2165e9ee66f54f615ab4c65f0d4494352 lmfit-py-1.0.0/000077500000000000000000000000001357751001700132575ustar00rootroot00000000000000lmfit-py-1.0.0/INSTALL000066400000000000000000000006621357751001700143140ustar00rootroot00000000000000Installation instructions for LMFIT-py ======================================== To install the lmfit Python module, use:: python setup.py build python setup.py install For lmfit 1.0, the following versions are required: Python: 3.5 or higher NumPy: 1.16 or higher SciPy: 1.2 or higher asteval: 0.9.16 or higher uncertainties: 3.0.1 or higher Matt Newville Last Update: 2019-December-4 lmfit-py-1.0.0/LICENSE000066400000000000000000000071261357751001700142720ustar00rootroot00000000000000BSD-3 Copyright 2019 Matthew Newville, The University of Chicago Renee Otten, Brandeis University Till Stensitzki, Freie Universitat Berlin A. R. J. Nelson, Australian Nuclear Science and Technology Organisation Antonino Ingargiola, University of California, Los Angeles Daniel B. Allen, Johns Hopkins University Michal Rawlik, Eidgenossische Technische Hochschule, Zurich Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Some code has been taken from the scipy library whose licence is below. Copyright (c) 2001, 2002 Enthought, Inc. All rights reserved. Copyright (c) 2003-2019 SciPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: a. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. b. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. c. Neither the name of Enthought nor the names of the SciPy Developers may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Some code has been taken from the AMPGO library of Andrea Gavana, which was released under a MIT license. lmfit-py-1.0.0/MANIFEST.in000066400000000000000000000010121357751001700150070ustar00rootroot00000000000000include README.txt INSTALL LICENSE MANIFEST.in PKG-INFO THANKS.txt include setup.py publish_docs.sh include requirements.txt exclude *.pyc core.* *~ *.pdf recursive-include lmfit *.py recursive-include tests *.py *.dat recursive-include NIST_STRD *.dat recursive-include examples *.py *.csv *.dat recursive-exclude examples/documentation * recursive-include doc * recursive-exclude doc/_build * recursive-exclude doc/examples * recursive-exclude doc *.pdf *.csv *.dat *.sav include versioneer.py include lmfit/_version.py lmfit-py-1.0.0/NIST_STRD/000077500000000000000000000000001357751001700146705ustar00rootroot00000000000000lmfit-py-1.0.0/NIST_STRD/Bennett5.dat000066400000000000000000000153251357751001700170540ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Bennett5 (Bennett5.dat) File Format: ASCII Starting Values (lines 41 to 43) Certified Values (lines 41 to 48) Data (lines 61 to 214) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving superconductivity magnetization modeling. The response variable is magnetism, and the predictor variable is the log of time in minutes. Reference: Bennett, L., L. Swartzendruber, and H. Brown, NIST (1994). Superconductivity Magnetization Modeling. Data: 1 Response Variable (y = magnetism) 1 Predictor Variable (x = log[time]) 154 Observations Higher Level of Difficulty Observed Data Model: Miscellaneous Class 3 Parameters (b1 to b3) y = b1 * (b2+x)**(-1/b3) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = -2000 -1500 -2.5235058043E+03 2.9715175411E+02 b2 = 50 45 4.6736564644E+01 1.2448871856E+00 b3 = 0.8 0.85 9.3218483193E-01 2.0272299378E-02 Residual Sum of Squares: 5.2404744073E-04 Residual Standard Deviation: 1.8629312528E-03 Degrees of Freedom: 151 Number of Observations: 154 Data: y x -34.834702E0 7.447168E0 -34.393200E0 8.102586E0 -34.152901E0 8.452547E0 -33.979099E0 8.711278E0 -33.845901E0 8.916774E0 -33.732899E0 9.087155E0 -33.640301E0 9.232590E0 -33.559200E0 9.359535E0 -33.486801E0 9.472166E0 -33.423100E0 9.573384E0 -33.365101E0 9.665293E0 -33.313000E0 9.749461E0 -33.260899E0 9.827092E0 -33.217400E0 9.899128E0 -33.176899E0 9.966321E0 -33.139198E0 10.029280E0 -33.101601E0 10.088510E0 -33.066799E0 10.144430E0 -33.035000E0 10.197380E0 -33.003101E0 10.247670E0 -32.971298E0 10.295560E0 -32.942299E0 10.341250E0 -32.916302E0 10.384950E0 -32.890202E0 10.426820E0 -32.864101E0 10.467000E0 -32.841000E0 10.505640E0 -32.817799E0 10.542830E0 -32.797501E0 10.578690E0 -32.774300E0 10.613310E0 -32.757000E0 10.646780E0 -32.733799E0 10.679150E0 -32.716400E0 10.710520E0 -32.699100E0 10.740920E0 -32.678799E0 10.770440E0 -32.661400E0 10.799100E0 -32.644001E0 10.826970E0 -32.626701E0 10.854080E0 -32.612202E0 10.880470E0 -32.597698E0 10.906190E0 -32.583199E0 10.931260E0 -32.568699E0 10.955720E0 -32.554298E0 10.979590E0 -32.539799E0 11.002910E0 -32.525299E0 11.025700E0 -32.510799E0 11.047980E0 -32.499199E0 11.069770E0 -32.487598E0 11.091100E0 -32.473202E0 11.111980E0 -32.461601E0 11.132440E0 -32.435501E0 11.152480E0 -32.435501E0 11.172130E0 -32.426800E0 11.191410E0 -32.412300E0 11.210310E0 -32.400799E0 11.228870E0 -32.392101E0 11.247090E0 -32.380501E0 11.264980E0 -32.366001E0 11.282560E0 -32.357300E0 11.299840E0 -32.348598E0 11.316820E0 -32.339901E0 11.333520E0 -32.328400E0 11.349940E0 -32.319698E0 11.366100E0 -32.311001E0 11.382000E0 -32.299400E0 11.397660E0 -32.290699E0 11.413070E0 -32.282001E0 11.428240E0 -32.273300E0 11.443200E0 -32.264599E0 11.457930E0 -32.256001E0 11.472440E0 -32.247299E0 11.486750E0 -32.238602E0 11.500860E0 -32.229900E0 11.514770E0 -32.224098E0 11.528490E0 -32.215401E0 11.542020E0 -32.203800E0 11.555380E0 -32.198002E0 11.568550E0 -32.189400E0 11.581560E0 -32.183601E0 11.594420E0 -32.174900E0 11.607121E0 -32.169102E0 11.619640E0 -32.163300E0 11.632000E0 -32.154598E0 11.644210E0 -32.145901E0 11.656280E0 -32.140099E0 11.668200E0 -32.131401E0 11.679980E0 -32.125599E0 11.691620E0 -32.119801E0 11.703130E0 -32.111198E0 11.714510E0 -32.105400E0 11.725760E0 -32.096699E0 11.736880E0 -32.090900E0 11.747890E0 -32.088001E0 11.758780E0 -32.079300E0 11.769550E0 -32.073502E0 11.780200E0 -32.067699E0 11.790730E0 -32.061901E0 11.801160E0 -32.056099E0 11.811480E0 -32.050301E0 11.821700E0 -32.044498E0 11.831810E0 -32.038799E0 11.841820E0 -32.033001E0 11.851730E0 -32.027199E0 11.861550E0 -32.024300E0 11.871270E0 -32.018501E0 11.880890E0 -32.012699E0 11.890420E0 -32.004002E0 11.899870E0 -32.001099E0 11.909220E0 -31.995300E0 11.918490E0 -31.989500E0 11.927680E0 -31.983700E0 11.936780E0 -31.977900E0 11.945790E0 -31.972099E0 11.954730E0 -31.969299E0 11.963590E0 -31.963501E0 11.972370E0 -31.957701E0 11.981070E0 -31.951900E0 11.989700E0 -31.946100E0 11.998260E0 -31.940300E0 12.006740E0 -31.937401E0 12.015150E0 -31.931601E0 12.023490E0 -31.925800E0 12.031760E0 -31.922899E0 12.039970E0 -31.917101E0 12.048100E0 -31.911301E0 12.056170E0 -31.908400E0 12.064180E0 -31.902599E0 12.072120E0 -31.896900E0 12.080010E0 -31.893999E0 12.087820E0 -31.888201E0 12.095580E0 -31.885300E0 12.103280E0 -31.882401E0 12.110920E0 -31.876600E0 12.118500E0 -31.873699E0 12.126030E0 -31.867901E0 12.133500E0 -31.862101E0 12.140910E0 -31.859200E0 12.148270E0 -31.856300E0 12.155570E0 -31.850500E0 12.162830E0 -31.844700E0 12.170030E0 -31.841801E0 12.177170E0 -31.838900E0 12.184270E0 -31.833099E0 12.191320E0 -31.830200E0 12.198320E0 -31.827299E0 12.205270E0 -31.821600E0 12.212170E0 -31.818701E0 12.219030E0 -31.812901E0 12.225840E0 -31.809999E0 12.232600E0 -31.807100E0 12.239320E0 -31.801300E0 12.245990E0 -31.798401E0 12.252620E0 -31.795500E0 12.259200E0 -31.789700E0 12.265750E0 -31.786800E0 12.272240E0 lmfit-py-1.0.0/NIST_STRD/BoxBOD.dat000066400000000000000000000032611357751001700164410ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: BoxBOD (BoxBOD.dat) File Format: ASCII Starting Values (lines 41 to 42) Certified Values (lines 41 to 47) Data (lines 61 to 66) Procedure: Nonlinear Least Squares Regression Description: These data are described in detail in Box, Hunter and Hunter (1978). The response variable is biochemical oxygen demand (BOD) in mg/l, and the predictor variable is incubation time in days. Reference: Box, G. P., W. G. Hunter, and J. S. Hunter (1978). Statistics for Experimenters. New York, NY: Wiley, pp. 483-487. Data: 1 Response (y = biochemical oxygen demand) 1 Predictor (x = incubation time) 6 Observations Higher Level of Difficulty Observed Data Model: Exponential Class 2 Parameters (b1 and b2) y = b1*(1-exp[-b2*x]) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 1 100 2.1380940889E+02 1.2354515176E+01 b2 = 1 0.75 5.4723748542E-01 1.0455993237E-01 Residual Sum of Squares: 1.1680088766E+03 Residual Standard Deviation: 1.7088072423E+01 Degrees of Freedom: 4 Number of Observations: 6 Data: y x 109 1 149 2 149 3 191 5 213 7 224 10 lmfit-py-1.0.0/NIST_STRD/Chwirut1.dat000066400000000000000000000166061357751001700171010ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Chwirut1 (Chwirut1.dat) File Format: ASCII Starting Values (lines 41 to 43) Certified Values (lines 41 to 48) Data (lines 61 to 274) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving ultrasonic calibration. The response variable is ultrasonic response, and the predictor variable is metal distance. Reference: Chwirut, D., NIST (197?). Ultrasonic Reference Block Study. Data: 1 Response Variable (y = ultrasonic response) 1 Predictor Variable (x = metal distance) 214 Observations Lower Level of Difficulty Observed Data Model: Exponential Class 3 Parameters (b1 to b3) y = exp[-b1*x]/(b2+b3*x) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 0.1 0.15 1.9027818370E-01 2.1938557035E-02 b2 = 0.01 0.008 6.1314004477E-03 3.4500025051E-04 b3 = 0.02 0.010 1.0530908399E-02 7.9281847748E-04 Residual Sum of Squares: 2.3844771393E+03 Residual Standard Deviation: 3.3616721320E+00 Degrees of Freedom: 211 Number of Observations: 214 Data: y x 92.9000E0 0.5000E0 78.7000E0 0.6250E0 64.2000E0 0.7500E0 64.9000E0 0.8750E0 57.1000E0 1.0000E0 43.3000E0 1.2500E0 31.1000E0 1.7500E0 23.6000E0 2.2500E0 31.0500E0 1.7500E0 23.7750E0 2.2500E0 17.7375E0 2.7500E0 13.8000E0 3.2500E0 11.5875E0 3.7500E0 9.4125E0 4.2500E0 7.7250E0 4.7500E0 7.3500E0 5.2500E0 8.0250E0 5.7500E0 90.6000E0 0.5000E0 76.9000E0 0.6250E0 71.6000E0 0.7500E0 63.6000E0 0.8750E0 54.0000E0 1.0000E0 39.2000E0 1.2500E0 29.3000E0 1.7500E0 21.4000E0 2.2500E0 29.1750E0 1.7500E0 22.1250E0 2.2500E0 17.5125E0 2.7500E0 14.2500E0 3.2500E0 9.4500E0 3.7500E0 9.1500E0 4.2500E0 7.9125E0 4.7500E0 8.4750E0 5.2500E0 6.1125E0 5.7500E0 80.0000E0 0.5000E0 79.0000E0 0.6250E0 63.8000E0 0.7500E0 57.2000E0 0.8750E0 53.2000E0 1.0000E0 42.5000E0 1.2500E0 26.8000E0 1.7500E0 20.4000E0 2.2500E0 26.8500E0 1.7500E0 21.0000E0 2.2500E0 16.4625E0 2.7500E0 12.5250E0 3.2500E0 10.5375E0 3.7500E0 8.5875E0 4.2500E0 7.1250E0 4.7500E0 6.1125E0 5.2500E0 5.9625E0 5.7500E0 74.1000E0 0.5000E0 67.3000E0 0.6250E0 60.8000E0 0.7500E0 55.5000E0 0.8750E0 50.3000E0 1.0000E0 41.0000E0 1.2500E0 29.4000E0 1.7500E0 20.4000E0 2.2500E0 29.3625E0 1.7500E0 21.1500E0 2.2500E0 16.7625E0 2.7500E0 13.2000E0 3.2500E0 10.8750E0 3.7500E0 8.1750E0 4.2500E0 7.3500E0 4.7500E0 5.9625E0 5.2500E0 5.6250E0 5.7500E0 81.5000E0 .5000E0 62.4000E0 .7500E0 32.5000E0 1.5000E0 12.4100E0 3.0000E0 13.1200E0 3.0000E0 15.5600E0 3.0000E0 5.6300E0 6.0000E0 78.0000E0 .5000E0 59.9000E0 .7500E0 33.2000E0 1.5000E0 13.8400E0 3.0000E0 12.7500E0 3.0000E0 14.6200E0 3.0000E0 3.9400E0 6.0000E0 76.8000E0 .5000E0 61.0000E0 .7500E0 32.9000E0 1.5000E0 13.8700E0 3.0000E0 11.8100E0 3.0000E0 13.3100E0 3.0000E0 5.4400E0 6.0000E0 78.0000E0 .5000E0 63.5000E0 .7500E0 33.8000E0 1.5000E0 12.5600E0 3.0000E0 5.6300E0 6.0000E0 12.7500E0 3.0000E0 13.1200E0 3.0000E0 5.4400E0 6.0000E0 76.8000E0 .5000E0 60.0000E0 .7500E0 47.8000E0 1.0000E0 32.0000E0 1.5000E0 22.2000E0 2.0000E0 22.5700E0 2.0000E0 18.8200E0 2.5000E0 13.9500E0 3.0000E0 11.2500E0 4.0000E0 9.0000E0 5.0000E0 6.6700E0 6.0000E0 75.8000E0 .5000E0 62.0000E0 .7500E0 48.8000E0 1.0000E0 35.2000E0 1.5000E0 20.0000E0 2.0000E0 20.3200E0 2.0000E0 19.3100E0 2.5000E0 12.7500E0 3.0000E0 10.4200E0 4.0000E0 7.3100E0 5.0000E0 7.4200E0 6.0000E0 70.5000E0 .5000E0 59.5000E0 .7500E0 48.5000E0 1.0000E0 35.8000E0 1.5000E0 21.0000E0 2.0000E0 21.6700E0 2.0000E0 21.0000E0 2.5000E0 15.6400E0 3.0000E0 8.1700E0 4.0000E0 8.5500E0 5.0000E0 10.1200E0 6.0000E0 78.0000E0 .5000E0 66.0000E0 .6250E0 62.0000E0 .7500E0 58.0000E0 .8750E0 47.7000E0 1.0000E0 37.8000E0 1.2500E0 20.2000E0 2.2500E0 21.0700E0 2.2500E0 13.8700E0 2.7500E0 9.6700E0 3.2500E0 7.7600E0 3.7500E0 5.4400E0 4.2500E0 4.8700E0 4.7500E0 4.0100E0 5.2500E0 3.7500E0 5.7500E0 24.1900E0 3.0000E0 25.7600E0 3.0000E0 18.0700E0 3.0000E0 11.8100E0 3.0000E0 12.0700E0 3.0000E0 16.1200E0 3.0000E0 70.8000E0 .5000E0 54.7000E0 .7500E0 48.0000E0 1.0000E0 39.8000E0 1.5000E0 29.8000E0 2.0000E0 23.7000E0 2.5000E0 29.6200E0 2.0000E0 23.8100E0 2.5000E0 17.7000E0 3.0000E0 11.5500E0 4.0000E0 12.0700E0 5.0000E0 8.7400E0 6.0000E0 80.7000E0 .5000E0 61.3000E0 .7500E0 47.5000E0 1.0000E0 29.0000E0 1.5000E0 24.0000E0 2.0000E0 17.7000E0 2.5000E0 24.5600E0 2.0000E0 18.6700E0 2.5000E0 16.2400E0 3.0000E0 8.7400E0 4.0000E0 7.8700E0 5.0000E0 8.5100E0 6.0000E0 66.7000E0 .5000E0 59.2000E0 .7500E0 40.8000E0 1.0000E0 30.7000E0 1.5000E0 25.7000E0 2.0000E0 16.3000E0 2.5000E0 25.9900E0 2.0000E0 16.9500E0 2.5000E0 13.3500E0 3.0000E0 8.6200E0 4.0000E0 7.2000E0 5.0000E0 6.6400E0 6.0000E0 13.6900E0 3.0000E0 81.0000E0 .5000E0 64.5000E0 .7500E0 35.5000E0 1.5000E0 13.3100E0 3.0000E0 4.8700E0 6.0000E0 12.9400E0 3.0000E0 5.0600E0 6.0000E0 15.1900E0 3.0000E0 14.6200E0 3.0000E0 15.6400E0 3.0000E0 25.5000E0 1.7500E0 25.9500E0 1.7500E0 81.7000E0 .5000E0 61.6000E0 .7500E0 29.8000E0 1.7500E0 29.8100E0 1.7500E0 17.1700E0 2.7500E0 10.3900E0 3.7500E0 28.4000E0 1.7500E0 28.6900E0 1.7500E0 81.3000E0 .5000E0 60.9000E0 .7500E0 16.6500E0 2.7500E0 10.0500E0 3.7500E0 28.9000E0 1.7500E0 28.9500E0 1.7500E0 lmfit-py-1.0.0/NIST_STRD/Chwirut2.dat000066400000000000000000000057641357751001700171050ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Chwirut2 (Chwirut2.dat) File Format: ASCII Starting Values (lines 41 to 43) Certified Values (lines 41 to 48) Data (lines 61 to 114) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving ultrasonic calibration. The response variable is ultrasonic response, and the predictor variable is metal distance. Reference: Chwirut, D., NIST (197?). Ultrasonic Reference Block Study. Data: 1 Response (y = ultrasonic response) 1 Predictor (x = metal distance) 54 Observations Lower Level of Difficulty Observed Data Model: Exponential Class 3 Parameters (b1 to b3) y = exp(-b1*x)/(b2+b3*x) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 0.1 0.15 1.6657666537E-01 3.8303286810E-02 b2 = 0.01 0.008 5.1653291286E-03 6.6621605126E-04 b3 = 0.02 0.010 1.2150007096E-02 1.5304234767E-03 Residual Sum of Squares: 5.1304802941E+02 Residual Standard Deviation: 3.1717133040E+00 Degrees of Freedom: 51 Number of Observations: 54 Data: y x 92.9000E0 0.500E0 57.1000E0 1.000E0 31.0500E0 1.750E0 11.5875E0 3.750E0 8.0250E0 5.750E0 63.6000E0 0.875E0 21.4000E0 2.250E0 14.2500E0 3.250E0 8.4750E0 5.250E0 63.8000E0 0.750E0 26.8000E0 1.750E0 16.4625E0 2.750E0 7.1250E0 4.750E0 67.3000E0 0.625E0 41.0000E0 1.250E0 21.1500E0 2.250E0 8.1750E0 4.250E0 81.5000E0 .500E0 13.1200E0 3.000E0 59.9000E0 .750E0 14.6200E0 3.000E0 32.9000E0 1.500E0 5.4400E0 6.000E0 12.5600E0 3.000E0 5.4400E0 6.000E0 32.0000E0 1.500E0 13.9500E0 3.000E0 75.8000E0 .500E0 20.0000E0 2.000E0 10.4200E0 4.000E0 59.5000E0 .750E0 21.6700E0 2.000E0 8.5500E0 5.000E0 62.0000E0 .750E0 20.2000E0 2.250E0 7.7600E0 3.750E0 3.7500E0 5.750E0 11.8100E0 3.000E0 54.7000E0 .750E0 23.7000E0 2.500E0 11.5500E0 4.000E0 61.3000E0 .750E0 17.7000E0 2.500E0 8.7400E0 4.000E0 59.2000E0 .750E0 16.3000E0 2.500E0 8.6200E0 4.000E0 81.0000E0 .500E0 4.8700E0 6.000E0 14.6200E0 3.000E0 81.7000E0 .500E0 17.1700E0 2.750E0 81.3000E0 .500E0 28.9000E0 1.750E0 lmfit-py-1.0.0/NIST_STRD/DanWood.dat000066400000000000000000000037061357751001700167230ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: DanWood (DanWood.dat) File Format: ASCII Starting Values (lines 41 to 42) Certified Values (lines 41 to 47) Data (lines 61 to 66) Procedure: Nonlinear Least Squares Regression Description: These data and model are described in Daniel and Wood (1980), and originally published in E.S.Keeping, "Introduction to Statistical Inference," Van Nostrand Company, Princeton, NJ, 1962, p. 354. The response variable is energy radieted from a carbon filament lamp per cm**2 per second, and the predictor variable is the absolute temperature of the filament in 1000 degrees Kelvin. Reference: Daniel, C. and F. S. Wood (1980). Fitting Equations to Data, Second Edition. New York, NY: John Wiley and Sons, pp. 428-431. Data: 1 Response Variable (y = energy) 1 Predictor Variable (x = temperature) 6 Observations Lower Level of Difficulty Observed Data Model: Miscellaneous Class 2 Parameters (b1 and b2) y = b1*x**b2 + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 1 0.7 7.6886226176E-01 1.8281973860E-02 b2 = 5 4 3.8604055871E+00 5.1726610913E-02 Residual Sum of Squares: 4.3173084083E-03 Residual Standard Deviation: 3.2853114039E-02 Degrees of Freedom: 4 Number of Observations: 6 Data: y x 2.138E0 1.309E0 3.421E0 1.471E0 3.597E0 1.490E0 4.340E0 1.565E0 4.882E0 1.611E0 5.660E0 1.680E0 lmfit-py-1.0.0/NIST_STRD/ENSO.dat000066400000000000000000000151511357751001700161310ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: ENSO (ENSO.dat) File Format: ASCII Starting Values (lines 41 to 49) Certified Values (lines 41 to 54) Data (lines 61 to 228) Procedure: Nonlinear Least Squares Regression Description: The data are monthly averaged atmospheric pressure differences between Easter Island and Darwin, Australia. This difference drives the trade winds in the southern hemisphere. Fourier analysis of the data reveals 3 significant cycles. The annual cycle is the strongest, but cycles with periods of approximately 44 and 26 months are also present. These cycles correspond to the El Nino and the Southern Oscillation. Arguments to the SIN and COS functions are in radians. Reference: Kahaner, D., C. Moler, and S. Nash, (1989). Numerical Methods and Software. Englewood Cliffs, NJ: Prentice Hall, pp. 441-445. Data: 1 Response (y = atmospheric pressure) 1 Predictor (x = time) 168 Observations Average Level of Difficulty Observed Data Model: Miscellaneous Class 9 Parameters (b1 to b9) y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 ) + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 ) + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 ) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 11.0 10.0 1.0510749193E+01 1.7488832467E-01 b2 = 3.0 3.0 3.0762128085E+00 2.4310052139E-01 b3 = 0.5 0.5 5.3280138227E-01 2.4354686618E-01 b4 = 40.0 44.0 4.4311088700E+01 9.4408025976E-01 b5 = -0.7 -1.5 -1.6231428586E+00 2.8078369611E-01 b6 = -1.3 0.5 5.2554493756E-01 4.8073701119E-01 b7 = 25.0 26.0 2.6887614440E+01 4.1612939130E-01 b8 = -0.3 -0.1 2.1232288488E-01 5.1460022911E-01 b9 = 1.4 1.5 1.4966870418E+00 2.5434468893E-01 Residual Sum of Squares: 7.8853978668E+02 Residual Standard Deviation: 2.2269642403E+00 Degrees of Freedom: 159 Number of Observations: 168 Data: y x 12.90000 1.000000 11.30000 2.000000 10.60000 3.000000 11.20000 4.000000 10.90000 5.000000 7.500000 6.000000 7.700000 7.000000 11.70000 8.000000 12.90000 9.000000 14.30000 10.000000 10.90000 11.00000 13.70000 12.00000 17.10000 13.00000 14.00000 14.00000 15.30000 15.00000 8.500000 16.00000 5.700000 17.00000 5.500000 18.00000 7.600000 19.00000 8.600000 20.00000 7.300000 21.00000 7.600000 22.00000 12.70000 23.00000 11.00000 24.00000 12.70000 25.00000 12.90000 26.00000 13.00000 27.00000 10.90000 28.00000 10.400000 29.00000 10.200000 30.00000 8.000000 31.00000 10.90000 32.00000 13.60000 33.00000 10.500000 34.00000 9.200000 35.00000 12.40000 36.00000 12.70000 37.00000 13.30000 38.00000 10.100000 39.00000 7.800000 40.00000 4.800000 41.00000 3.000000 42.00000 2.500000 43.00000 6.300000 44.00000 9.700000 45.00000 11.60000 46.00000 8.600000 47.00000 12.40000 48.00000 10.500000 49.00000 13.30000 50.00000 10.400000 51.00000 8.100000 52.00000 3.700000 53.00000 10.70000 54.00000 5.100000 55.00000 10.400000 56.00000 10.90000 57.00000 11.70000 58.00000 11.40000 59.00000 13.70000 60.00000 14.10000 61.00000 14.00000 62.00000 12.50000 63.00000 6.300000 64.00000 9.600000 65.00000 11.70000 66.00000 5.000000 67.00000 10.80000 68.00000 12.70000 69.00000 10.80000 70.00000 11.80000 71.00000 12.60000 72.00000 15.70000 73.00000 12.60000 74.00000 14.80000 75.00000 7.800000 76.00000 7.100000 77.00000 11.20000 78.00000 8.100000 79.00000 6.400000 80.00000 5.200000 81.00000 12.00000 82.00000 10.200000 83.00000 12.70000 84.00000 10.200000 85.00000 14.70000 86.00000 12.20000 87.00000 7.100000 88.00000 5.700000 89.00000 6.700000 90.00000 3.900000 91.00000 8.500000 92.00000 8.300000 93.00000 10.80000 94.00000 16.70000 95.00000 12.60000 96.00000 12.50000 97.00000 12.50000 98.00000 9.800000 99.00000 7.200000 100.00000 4.100000 101.00000 10.60000 102.00000 10.100000 103.00000 10.100000 104.00000 11.90000 105.00000 13.60000 106.0000 16.30000 107.0000 17.60000 108.0000 15.50000 109.0000 16.00000 110.0000 15.20000 111.0000 11.20000 112.0000 14.30000 113.0000 14.50000 114.0000 8.500000 115.0000 12.00000 116.0000 12.70000 117.0000 11.30000 118.0000 14.50000 119.0000 15.10000 120.0000 10.400000 121.0000 11.50000 122.0000 13.40000 123.0000 7.500000 124.0000 0.6000000 125.0000 0.3000000 126.0000 5.500000 127.0000 5.000000 128.0000 4.600000 129.0000 8.200000 130.0000 9.900000 131.0000 9.200000 132.0000 12.50000 133.0000 10.90000 134.0000 9.900000 135.0000 8.900000 136.0000 7.600000 137.0000 9.500000 138.0000 8.400000 139.0000 10.70000 140.0000 13.60000 141.0000 13.70000 142.0000 13.70000 143.0000 16.50000 144.0000 16.80000 145.0000 17.10000 146.0000 15.40000 147.0000 9.500000 148.0000 6.100000 149.0000 10.100000 150.0000 9.300000 151.0000 5.300000 152.0000 11.20000 153.0000 16.60000 154.0000 15.60000 155.0000 12.00000 156.0000 11.50000 157.0000 8.600000 158.0000 13.80000 159.0000 8.700000 160.0000 8.600000 161.0000 8.600000 162.0000 8.700000 163.0000 12.80000 164.0000 13.20000 165.0000 14.00000 166.0000 13.40000 167.0000 14.80000 168.0000 lmfit-py-1.0.0/NIST_STRD/Eckerle4.dat000066400000000000000000000053251357751001700170250ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Eckerle4 (Eckerle4.dat) File Format: ASCII Starting Values (lines 41 to 43) Certified Values (lines 41 to 48) Data (lines 61 to 95) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving circular interference transmittance. The response variable is transmittance, and the predictor variable is wavelength. Reference: Eckerle, K., NIST (197?). Circular Interference Transmittance Study. Data: 1 Response Variable (y = transmittance) 1 Predictor Variable (x = wavelength) 35 Observations Higher Level of Difficulty Observed Data Model: Exponential Class 3 Parameters (b1 to b3) y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2] + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 1 1.5 1.5543827178E+00 1.5408051163E-02 b2 = 10 5 4.0888321754E+00 4.6803020753E-02 b3 = 500 450 4.5154121844E+02 4.6800518816E-02 Residual Sum of Squares: 1.4635887487E-03 Residual Standard Deviation: 6.7629245447E-03 Degrees of Freedom: 32 Number of Observations: 35 Data: y x 0.0001575E0 400.000000E0 0.0001699E0 405.000000E0 0.0002350E0 410.000000E0 0.0003102E0 415.000000E0 0.0004917E0 420.000000E0 0.0008710E0 425.000000E0 0.0017418E0 430.000000E0 0.0046400E0 435.000000E0 0.0065895E0 436.500000E0 0.0097302E0 438.000000E0 0.0149002E0 439.500000E0 0.0237310E0 441.000000E0 0.0401683E0 442.500000E0 0.0712559E0 444.000000E0 0.1264458E0 445.500000E0 0.2073413E0 447.000000E0 0.2902366E0 448.500000E0 0.3445623E0 450.000000E0 0.3698049E0 451.500000E0 0.3668534E0 453.000000E0 0.3106727E0 454.500000E0 0.2078154E0 456.000000E0 0.1164354E0 457.500000E0 0.0616764E0 459.000000E0 0.0337200E0 460.500000E0 0.0194023E0 462.000000E0 0.0117831E0 463.500000E0 0.0074357E0 465.000000E0 0.0022732E0 470.000000E0 0.0008800E0 475.000000E0 0.0004579E0 480.000000E0 0.0002345E0 485.000000E0 0.0001586E0 490.000000E0 0.0001143E0 495.000000E0 0.0000710E0 500.000000E0 lmfit-py-1.0.0/NIST_STRD/Gauss1.dat000066400000000000000000000176421357751001700165370ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Gauss1 (Gauss1.dat) File Format: ASCII Starting Values (lines 41 to 48) Certified Values (lines 41 to 53) Data (lines 61 to 310) Procedure: Nonlinear Least Squares Regression Description: The data are two well-separated Gaussians on a decaying exponential baseline plus normally distributed zero-mean noise with variance = 6.25. Reference: Rust, B., NIST (1996). Data: 1 Response (y) 1 Predictor (x) 250 Observations Lower Level of Difficulty Generated Data Model: Exponential Class 8 Parameters (b1 to b8) y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 ) + b6*exp( -(x-b7)**2 / b8**2 ) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 97.0 94.0 9.8778210871E+01 5.7527312730E-01 b2 = 0.009 0.0105 1.0497276517E-02 1.1406289017E-04 b3 = 100.0 99.0 1.0048990633E+02 5.8831775752E-01 b4 = 65.0 63.0 6.7481111276E+01 1.0460593412E-01 b5 = 20.0 25.0 2.3129773360E+01 1.7439951146E-01 b6 = 70.0 71.0 7.1994503004E+01 6.2622793913E-01 b7 = 178.0 180.0 1.7899805021E+02 1.2436988217E-01 b8 = 16.5 20.0 1.8389389025E+01 2.0134312832E-01 Residual Sum of Squares: 1.3158222432E+03 Residual Standard Deviation: 2.3317980180E+00 Degrees of Freedom: 242 Number of Observations: 250 Data: y x 97.62227 1.000000 97.80724 2.000000 96.62247 3.000000 92.59022 4.000000 91.23869 5.000000 95.32704 6.000000 90.35040 7.000000 89.46235 8.000000 91.72520 9.000000 89.86916 10.000000 86.88076 11.00000 85.94360 12.00000 87.60686 13.00000 86.25839 14.00000 80.74976 15.00000 83.03551 16.00000 88.25837 17.00000 82.01316 18.00000 82.74098 19.00000 83.30034 20.00000 81.27850 21.00000 81.85506 22.00000 80.75195 23.00000 80.09573 24.00000 81.07633 25.00000 78.81542 26.00000 78.38596 27.00000 79.93386 28.00000 79.48474 29.00000 79.95942 30.00000 76.10691 31.00000 78.39830 32.00000 81.43060 33.00000 82.48867 34.00000 81.65462 35.00000 80.84323 36.00000 88.68663 37.00000 84.74438 38.00000 86.83934 39.00000 85.97739 40.00000 91.28509 41.00000 97.22411 42.00000 93.51733 43.00000 94.10159 44.00000 101.91760 45.00000 98.43134 46.00000 110.4214 47.00000 107.6628 48.00000 111.7288 49.00000 116.5115 50.00000 120.7609 51.00000 123.9553 52.00000 124.2437 53.00000 130.7996 54.00000 133.2960 55.00000 130.7788 56.00000 132.0565 57.00000 138.6584 58.00000 142.9252 59.00000 142.7215 60.00000 144.1249 61.00000 147.4377 62.00000 148.2647 63.00000 152.0519 64.00000 147.3863 65.00000 149.2074 66.00000 148.9537 67.00000 144.5876 68.00000 148.1226 69.00000 148.0144 70.00000 143.8893 71.00000 140.9088 72.00000 143.4434 73.00000 139.3938 74.00000 135.9878 75.00000 136.3927 76.00000 126.7262 77.00000 124.4487 78.00000 122.8647 79.00000 113.8557 80.00000 113.7037 81.00000 106.8407 82.00000 107.0034 83.00000 102.46290 84.00000 96.09296 85.00000 94.57555 86.00000 86.98824 87.00000 84.90154 88.00000 81.18023 89.00000 76.40117 90.00000 67.09200 91.00000 72.67155 92.00000 68.10848 93.00000 67.99088 94.00000 63.34094 95.00000 60.55253 96.00000 56.18687 97.00000 53.64482 98.00000 53.70307 99.00000 48.07893 100.00000 42.21258 101.00000 45.65181 102.00000 41.69728 103.00000 41.24946 104.00000 39.21349 105.00000 37.71696 106.0000 36.68395 107.0000 37.30393 108.0000 37.43277 109.0000 37.45012 110.0000 32.64648 111.0000 31.84347 112.0000 31.39951 113.0000 26.68912 114.0000 32.25323 115.0000 27.61008 116.0000 33.58649 117.0000 28.10714 118.0000 30.26428 119.0000 28.01648 120.0000 29.11021 121.0000 23.02099 122.0000 25.65091 123.0000 28.50295 124.0000 25.23701 125.0000 26.13828 126.0000 33.53260 127.0000 29.25195 128.0000 27.09847 129.0000 26.52999 130.0000 25.52401 131.0000 26.69218 132.0000 24.55269 133.0000 27.71763 134.0000 25.20297 135.0000 25.61483 136.0000 25.06893 137.0000 27.63930 138.0000 24.94851 139.0000 25.86806 140.0000 22.48183 141.0000 26.90045 142.0000 25.39919 143.0000 17.90614 144.0000 23.76039 145.0000 25.89689 146.0000 27.64231 147.0000 22.86101 148.0000 26.47003 149.0000 23.72888 150.0000 27.54334 151.0000 30.52683 152.0000 28.07261 153.0000 34.92815 154.0000 28.29194 155.0000 34.19161 156.0000 35.41207 157.0000 37.09336 158.0000 40.98330 159.0000 39.53923 160.0000 47.80123 161.0000 47.46305 162.0000 51.04166 163.0000 54.58065 164.0000 57.53001 165.0000 61.42089 166.0000 62.79032 167.0000 68.51455 168.0000 70.23053 169.0000 74.42776 170.0000 76.59911 171.0000 81.62053 172.0000 83.42208 173.0000 79.17451 174.0000 88.56985 175.0000 85.66525 176.0000 86.55502 177.0000 90.65907 178.0000 84.27290 179.0000 85.72220 180.0000 83.10702 181.0000 82.16884 182.0000 80.42568 183.0000 78.15692 184.0000 79.79691 185.0000 77.84378 186.0000 74.50327 187.0000 71.57289 188.0000 65.88031 189.0000 65.01385 190.0000 60.19582 191.0000 59.66726 192.0000 52.95478 193.0000 53.87792 194.0000 44.91274 195.0000 41.09909 196.0000 41.68018 197.0000 34.53379 198.0000 34.86419 199.0000 33.14787 200.0000 29.58864 201.0000 27.29462 202.0000 21.91439 203.0000 19.08159 204.0000 24.90290 205.0000 19.82341 206.0000 16.75551 207.0000 18.24558 208.0000 17.23549 209.0000 16.34934 210.0000 13.71285 211.0000 14.75676 212.0000 13.97169 213.0000 12.42867 214.0000 14.35519 215.0000 7.703309 216.0000 10.234410 217.0000 11.78315 218.0000 13.87768 219.0000 4.535700 220.0000 10.059280 221.0000 8.424824 222.0000 10.533120 223.0000 9.602255 224.0000 7.877514 225.0000 6.258121 226.0000 8.899865 227.0000 7.877754 228.0000 12.51191 229.0000 10.66205 230.0000 6.035400 231.0000 6.790655 232.0000 8.783535 233.0000 4.600288 234.0000 8.400915 235.0000 7.216561 236.0000 10.017410 237.0000 7.331278 238.0000 6.527863 239.0000 2.842001 240.0000 10.325070 241.0000 4.790995 242.0000 8.377101 243.0000 6.264445 244.0000 2.706213 245.0000 8.362329 246.0000 8.983658 247.0000 3.362571 248.0000 1.182746 249.0000 4.875359 250.0000 lmfit-py-1.0.0/NIST_STRD/Gauss2.dat000066400000000000000000000176441357751001700165420ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Gauss2 (Gauss2.dat) File Format: ASCII Starting Values (lines 41 to 48) Certified Values (lines 41 to 53) Data (lines 61 to 310) Procedure: Nonlinear Least Squares Regression Description: The data are two slightly-blended Gaussians on a decaying exponential baseline plus normally distributed zero-mean noise with variance = 6.25. Reference: Rust, B., NIST (1996). Data: 1 Response (y) 1 Predictor (x) 250 Observations Lower Level of Difficulty Generated Data Model: Exponential Class 8 Parameters (b1 to b8) y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 ) + b6*exp( -(x-b7)**2 / b8**2 ) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 96.0 98.0 9.9018328406E+01 5.3748766879E-01 b2 = 0.009 0.0105 1.0994945399E-02 1.3335306766E-04 b3 = 103.0 103.0 1.0188022528E+02 5.9217315772E-01 b4 = 106.0 105.0 1.0703095519E+02 1.5006798316E-01 b5 = 18.0 20.0 2.3578584029E+01 2.2695595067E-01 b6 = 72.0 73.0 7.2045589471E+01 6.1721965884E-01 b7 = 151.0 150.0 1.5327010194E+02 1.9466674341E-01 b8 = 18.0 20.0 1.9525972636E+01 2.6416549393E-01 Residual Sum of Squares: 1.2475282092E+03 Residual Standard Deviation: 2.2704790782E+00 Degrees of Freedom: 242 Number of Observations: 250 Data: y x 97.58776 1.000000 97.76344 2.000000 96.56705 3.000000 92.52037 4.000000 91.15097 5.000000 95.21728 6.000000 90.21355 7.000000 89.29235 8.000000 91.51479 9.000000 89.60966 10.000000 86.56187 11.00000 85.55316 12.00000 87.13054 13.00000 85.67940 14.00000 80.04851 15.00000 82.18925 16.00000 87.24081 17.00000 80.79407 18.00000 81.28570 19.00000 81.56940 20.00000 79.22715 21.00000 79.43275 22.00000 77.90195 23.00000 76.75468 24.00000 77.17377 25.00000 74.27348 26.00000 73.11900 27.00000 73.84826 28.00000 72.47870 29.00000 71.92292 30.00000 66.92176 31.00000 67.93835 32.00000 69.56207 33.00000 69.07066 34.00000 66.53983 35.00000 63.87883 36.00000 69.71537 37.00000 63.60588 38.00000 63.37154 39.00000 60.01835 40.00000 62.67481 41.00000 65.80666 42.00000 59.14304 43.00000 56.62951 44.00000 61.21785 45.00000 54.38790 46.00000 62.93443 47.00000 56.65144 48.00000 57.13362 49.00000 58.29689 50.00000 58.91744 51.00000 58.50172 52.00000 55.22885 53.00000 58.30375 54.00000 57.43237 55.00000 51.69407 56.00000 49.93132 57.00000 53.70760 58.00000 55.39712 59.00000 52.89709 60.00000 52.31649 61.00000 53.98720 62.00000 53.54158 63.00000 56.45046 64.00000 51.32276 65.00000 53.11676 66.00000 53.28631 67.00000 49.80555 68.00000 54.69564 69.00000 56.41627 70.00000 54.59362 71.00000 54.38520 72.00000 60.15354 73.00000 59.78773 74.00000 60.49995 75.00000 65.43885 76.00000 60.70001 77.00000 63.71865 78.00000 67.77139 79.00000 64.70934 80.00000 70.78193 81.00000 70.38651 82.00000 77.22359 83.00000 79.52665 84.00000 80.13077 85.00000 85.67823 86.00000 85.20647 87.00000 90.24548 88.00000 93.61953 89.00000 95.86509 90.00000 93.46992 91.00000 105.8137 92.00000 107.8269 93.00000 114.0607 94.00000 115.5019 95.00000 118.5110 96.00000 119.6177 97.00000 122.1940 98.00000 126.9903 99.00000 125.7005 100.00000 123.7447 101.00000 130.6543 102.00000 129.7168 103.00000 131.8240 104.00000 131.8759 105.00000 131.9994 106.0000 132.1221 107.0000 133.4414 108.0000 133.8252 109.0000 133.6695 110.0000 128.2851 111.0000 126.5182 112.0000 124.7550 113.0000 118.4016 114.0000 122.0334 115.0000 115.2059 116.0000 118.7856 117.0000 110.7387 118.0000 110.2003 119.0000 105.17290 120.0000 103.44720 121.0000 94.54280 122.0000 94.40526 123.0000 94.57964 124.0000 88.76605 125.0000 87.28747 126.0000 92.50443 127.0000 86.27997 128.0000 82.44307 129.0000 80.47367 130.0000 78.36608 131.0000 78.74307 132.0000 76.12786 133.0000 79.13108 134.0000 76.76062 135.0000 77.60769 136.0000 77.76633 137.0000 81.28220 138.0000 79.74307 139.0000 81.97964 140.0000 80.02952 141.0000 85.95232 142.0000 85.96838 143.0000 79.94789 144.0000 87.17023 145.0000 90.50992 146.0000 93.23373 147.0000 89.14803 148.0000 93.11492 149.0000 90.34337 150.0000 93.69421 151.0000 95.74256 152.0000 91.85105 153.0000 96.74503 154.0000 87.60996 155.0000 90.47012 156.0000 88.11690 157.0000 85.70673 158.0000 85.01361 159.0000 78.53040 160.0000 81.34148 161.0000 75.19295 162.0000 72.66115 163.0000 69.85504 164.0000 66.29476 165.0000 63.58502 166.0000 58.33847 167.0000 57.50766 168.0000 52.80498 169.0000 50.79319 170.0000 47.03490 171.0000 46.47090 172.0000 43.09016 173.0000 34.11531 174.0000 39.28235 175.0000 32.68386 176.0000 30.44056 177.0000 31.98932 178.0000 23.63330 179.0000 23.69643 180.0000 20.26812 181.0000 19.07074 182.0000 17.59544 183.0000 16.08785 184.0000 18.94267 185.0000 18.61354 186.0000 17.25800 187.0000 16.62285 188.0000 13.48367 189.0000 15.37647 190.0000 13.47208 191.0000 15.96188 192.0000 12.32547 193.0000 16.33880 194.0000 10.438330 195.0000 9.628715 196.0000 13.12268 197.0000 8.772417 198.0000 11.76143 199.0000 12.55020 200.0000 11.33108 201.0000 11.20493 202.0000 7.816916 203.0000 6.800675 204.0000 14.26581 205.0000 10.66285 206.0000 8.911574 207.0000 11.56733 208.0000 11.58207 209.0000 11.59071 210.0000 9.730134 211.0000 11.44237 212.0000 11.22912 213.0000 10.172130 214.0000 12.50905 215.0000 6.201493 216.0000 9.019605 217.0000 10.80607 218.0000 13.09625 219.0000 3.914271 220.0000 9.567886 221.0000 8.038448 222.0000 10.231040 223.0000 9.367410 224.0000 7.695971 225.0000 6.118575 226.0000 8.793207 227.0000 7.796692 228.0000 12.45065 229.0000 10.61601 230.0000 6.001003 231.0000 6.765098 232.0000 8.764653 233.0000 4.586418 234.0000 8.390783 235.0000 7.209202 236.0000 10.012090 237.0000 7.327461 238.0000 6.525136 239.0000 2.840065 240.0000 10.323710 241.0000 4.790035 242.0000 8.376431 243.0000 6.263980 244.0000 2.705892 245.0000 8.362109 246.0000 8.983507 247.0000 3.362469 248.0000 1.182678 249.0000 4.875312 250.0000 lmfit-py-1.0.0/NIST_STRD/Gauss3.dat000066400000000000000000000176461357751001700165450ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Gauss3 (Gauss3.dat) File Format: ASCII Starting Values (lines 41 to 48) Certified Values (lines 41 to 53) Data (lines 61 to 310) Procedure: Nonlinear Least Squares Regression Description: The data are two strongly-blended Gaussians on a decaying exponential baseline plus normally distributed zero-mean noise with variance = 6.25. Reference: Rust, B., NIST (1996). Data: 1 Response (y) 1 Predictor (x) 250 Observations Average Level of Difficulty Generated Data Model: Exponential Class 8 Parameters (b1 to b8) y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 ) + b6*exp( -(x-b7)**2 / b8**2 ) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 94.9 96.0 9.8940368970E+01 5.3005192833E-01 b2 = 0.009 0.0096 1.0945879335E-02 1.2554058911E-04 b3 = 90.1 80.0 1.0069553078E+02 8.1256587317E-01 b4 = 113.0 110.0 1.1163619459E+02 3.5317859757E-01 b5 = 20.0 25.0 2.3300500029E+01 3.6584783023E-01 b6 = 73.8 74.0 7.3705031418E+01 1.2091239082E+00 b7 = 140.0 139.0 1.4776164251E+02 4.0488183351E-01 b8 = 20.0 25.0 1.9668221230E+01 3.7806634336E-01 Residual Sum of Squares: 1.2444846360E+03 Residual Standard Deviation: 2.2677077625E+00 Degrees of Freedom: 242 Number of Observations: 250 Data: y x 97.58776 1.000000 97.76344 2.000000 96.56705 3.000000 92.52037 4.000000 91.15097 5.000000 95.21728 6.000000 90.21355 7.000000 89.29235 8.000000 91.51479 9.000000 89.60965 10.000000 86.56187 11.00000 85.55315 12.00000 87.13053 13.00000 85.67938 14.00000 80.04849 15.00000 82.18922 16.00000 87.24078 17.00000 80.79401 18.00000 81.28564 19.00000 81.56932 20.00000 79.22703 21.00000 79.43259 22.00000 77.90174 23.00000 76.75438 24.00000 77.17338 25.00000 74.27296 26.00000 73.11830 27.00000 73.84732 28.00000 72.47746 29.00000 71.92128 30.00000 66.91962 31.00000 67.93554 32.00000 69.55841 33.00000 69.06592 34.00000 66.53371 35.00000 63.87094 36.00000 69.70526 37.00000 63.59295 38.00000 63.35509 39.00000 59.99747 40.00000 62.64843 41.00000 65.77345 42.00000 59.10141 43.00000 56.57750 44.00000 61.15313 45.00000 54.30767 46.00000 62.83535 47.00000 56.52957 48.00000 56.98427 49.00000 58.11459 50.00000 58.69576 51.00000 58.23322 52.00000 54.90490 53.00000 57.91442 54.00000 56.96629 55.00000 51.13831 56.00000 49.27123 57.00000 52.92668 58.00000 54.47693 59.00000 51.81710 60.00000 51.05401 61.00000 52.51731 62.00000 51.83710 63.00000 54.48196 64.00000 49.05859 65.00000 50.52315 66.00000 50.32755 67.00000 46.44419 68.00000 50.89281 69.00000 52.13203 70.00000 49.78741 71.00000 49.01637 72.00000 54.18198 73.00000 53.17456 74.00000 53.20827 75.00000 57.43459 76.00000 51.95282 77.00000 54.20282 78.00000 57.46687 79.00000 53.60268 80.00000 58.86728 81.00000 57.66652 82.00000 63.71034 83.00000 65.24244 84.00000 65.10878 85.00000 69.96313 86.00000 68.85475 87.00000 73.32574 88.00000 76.21241 89.00000 78.06311 90.00000 75.37701 91.00000 87.54449 92.00000 89.50588 93.00000 95.82098 94.00000 97.48390 95.00000 100.86070 96.00000 102.48510 97.00000 105.7311 98.00000 111.3489 99.00000 111.0305 100.00000 110.1920 101.00000 118.3581 102.00000 118.8086 103.00000 122.4249 104.00000 124.0953 105.00000 125.9337 106.0000 127.8533 107.0000 131.0361 108.0000 133.3343 109.0000 135.1278 110.0000 131.7113 111.0000 131.9151 112.0000 132.1107 113.0000 127.6898 114.0000 133.2148 115.0000 128.2296 116.0000 133.5902 117.0000 127.2539 118.0000 128.3482 119.0000 124.8694 120.0000 124.6031 121.0000 117.0648 122.0000 118.1966 123.0000 119.5408 124.0000 114.7946 125.0000 114.2780 126.0000 120.3484 127.0000 114.8647 128.0000 111.6514 129.0000 110.1826 130.0000 108.4461 131.0000 109.0571 132.0000 106.5308 133.0000 109.4691 134.0000 106.8709 135.0000 107.3192 136.0000 106.9000 137.0000 109.6526 138.0000 107.1602 139.0000 108.2509 140.0000 104.96310 141.0000 109.3601 142.0000 107.6696 143.0000 99.77286 144.0000 104.96440 145.0000 106.1376 146.0000 106.5816 147.0000 100.12860 148.0000 101.66910 149.0000 96.44254 150.0000 97.34169 151.0000 96.97412 152.0000 90.73460 153.0000 93.37949 154.0000 82.12331 155.0000 83.01657 156.0000 78.87360 157.0000 74.86971 158.0000 72.79341 159.0000 65.14744 160.0000 67.02127 161.0000 60.16136 162.0000 57.13996 163.0000 54.05769 164.0000 50.42265 165.0000 47.82430 166.0000 42.85748 167.0000 42.45495 168.0000 38.30808 169.0000 36.95794 170.0000 33.94543 171.0000 34.19017 172.0000 31.66097 173.0000 23.56172 174.0000 29.61143 175.0000 23.88765 176.0000 22.49812 177.0000 24.86901 178.0000 17.29481 179.0000 18.09291 180.0000 15.34813 181.0000 14.77997 182.0000 13.87832 183.0000 12.88891 184.0000 16.20763 185.0000 16.29024 186.0000 15.29712 187.0000 14.97839 188.0000 12.11330 189.0000 14.24168 190.0000 12.53824 191.0000 15.19818 192.0000 11.70478 193.0000 15.83745 194.0000 10.035850 195.0000 9.307574 196.0000 12.86800 197.0000 8.571671 198.0000 11.60415 199.0000 12.42772 200.0000 11.23627 201.0000 11.13198 202.0000 7.761117 203.0000 6.758250 204.0000 14.23375 205.0000 10.63876 206.0000 8.893581 207.0000 11.55398 208.0000 11.57221 209.0000 11.58347 210.0000 9.724857 211.0000 11.43854 212.0000 11.22636 213.0000 10.170150 214.0000 12.50765 215.0000 6.200494 216.0000 9.018902 217.0000 10.80557 218.0000 13.09591 219.0000 3.914033 220.0000 9.567723 221.0000 8.038338 222.0000 10.230960 223.0000 9.367358 224.0000 7.695937 225.0000 6.118552 226.0000 8.793192 227.0000 7.796682 228.0000 12.45064 229.0000 10.61601 230.0000 6.001000 231.0000 6.765096 232.0000 8.764652 233.0000 4.586417 234.0000 8.390782 235.0000 7.209201 236.0000 10.012090 237.0000 7.327461 238.0000 6.525136 239.0000 2.840065 240.0000 10.323710 241.0000 4.790035 242.0000 8.376431 243.0000 6.263980 244.0000 2.705892 245.0000 8.362109 246.0000 8.983507 247.0000 3.362469 248.0000 1.182678 249.0000 4.875312 250.0000 lmfit-py-1.0.0/NIST_STRD/Hahn1.dat000066400000000000000000000220741357751001700163260ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Hahn1 (Hahn1.dat) File Format: ASCII Starting Values (lines 41 to 47) Certified Values (lines 41 to 52) Data (lines 61 to 296) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving the thermal expansion of copper. The response variable is the coefficient of thermal expansion, and the predictor variable is temperature in degrees kelvin. Reference: Hahn, T., NIST (197?). Copper Thermal Expansion Study. Data: 1 Response (y = coefficient of thermal expansion) 1 Predictor (x = temperature, degrees kelvin) 236 Observations Average Level of Difficulty Observed Data Model: Rational Class (cubic/cubic) 7 Parameters (b1 to b7) y = (b1+b2*x+b3*x**2+b4*x**3) / (1+b5*x+b6*x**2+b7*x**3) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 10 1 1.0776351733E+00 1.7070154742E-01 b2 = -1 -0.1 -1.2269296921E-01 1.2000289189E-02 b3 = 0.05 0.005 4.0863750610E-03 2.2508314937E-04 b4 = -0.00001 -0.000001 -1.4262662514E-06 2.7578037666E-07 b5 = -0.05 -0.005 -5.7609940901E-03 2.4712888219E-04 b6 = 0.001 0.0001 2.4053735503E-04 1.0449373768E-05 b7 = -0.000001 -0.0000001 -1.2314450199E-07 1.3027335327E-08 Residual Sum of Squares: 1.5324382854E+00 Residual Standard Deviation: 8.1803852243E-02 Degrees of Freedom: 229 Number of Observations: 236 Data: y x .591E0 24.41E0 1.547E0 34.82E0 2.902E0 44.09E0 2.894E0 45.07E0 4.703E0 54.98E0 6.307E0 65.51E0 7.03E0 70.53E0 7.898E0 75.70E0 9.470E0 89.57E0 9.484E0 91.14E0 10.072E0 96.40E0 10.163E0 97.19E0 11.615E0 114.26E0 12.005E0 120.25E0 12.478E0 127.08E0 12.982E0 133.55E0 12.970E0 133.61E0 13.926E0 158.67E0 14.452E0 172.74E0 14.404E0 171.31E0 15.190E0 202.14E0 15.550E0 220.55E0 15.528E0 221.05E0 15.499E0 221.39E0 16.131E0 250.99E0 16.438E0 268.99E0 16.387E0 271.80E0 16.549E0 271.97E0 16.872E0 321.31E0 16.830E0 321.69E0 16.926E0 330.14E0 16.907E0 333.03E0 16.966E0 333.47E0 17.060E0 340.77E0 17.122E0 345.65E0 17.311E0 373.11E0 17.355E0 373.79E0 17.668E0 411.82E0 17.767E0 419.51E0 17.803E0 421.59E0 17.765E0 422.02E0 17.768E0 422.47E0 17.736E0 422.61E0 17.858E0 441.75E0 17.877E0 447.41E0 17.912E0 448.7E0 18.046E0 472.89E0 18.085E0 476.69E0 18.291E0 522.47E0 18.357E0 522.62E0 18.426E0 524.43E0 18.584E0 546.75E0 18.610E0 549.53E0 18.870E0 575.29E0 18.795E0 576.00E0 19.111E0 625.55E0 .367E0 20.15E0 .796E0 28.78E0 0.892E0 29.57E0 1.903E0 37.41E0 2.150E0 39.12E0 3.697E0 50.24E0 5.870E0 61.38E0 6.421E0 66.25E0 7.422E0 73.42E0 9.944E0 95.52E0 11.023E0 107.32E0 11.87E0 122.04E0 12.786E0 134.03E0 14.067E0 163.19E0 13.974E0 163.48E0 14.462E0 175.70E0 14.464E0 179.86E0 15.381E0 211.27E0 15.483E0 217.78E0 15.59E0 219.14E0 16.075E0 262.52E0 16.347E0 268.01E0 16.181E0 268.62E0 16.915E0 336.25E0 17.003E0 337.23E0 16.978E0 339.33E0 17.756E0 427.38E0 17.808E0 428.58E0 17.868E0 432.68E0 18.481E0 528.99E0 18.486E0 531.08E0 19.090E0 628.34E0 16.062E0 253.24E0 16.337E0 273.13E0 16.345E0 273.66E0 16.388E0 282.10E0 17.159E0 346.62E0 17.116E0 347.19E0 17.164E0 348.78E0 17.123E0 351.18E0 17.979E0 450.10E0 17.974E0 450.35E0 18.007E0 451.92E0 17.993E0 455.56E0 18.523E0 552.22E0 18.669E0 553.56E0 18.617E0 555.74E0 19.371E0 652.59E0 19.330E0 656.20E0 0.080E0 14.13E0 0.248E0 20.41E0 1.089E0 31.30E0 1.418E0 33.84E0 2.278E0 39.70E0 3.624E0 48.83E0 4.574E0 54.50E0 5.556E0 60.41E0 7.267E0 72.77E0 7.695E0 75.25E0 9.136E0 86.84E0 9.959E0 94.88E0 9.957E0 96.40E0 11.600E0 117.37E0 13.138E0 139.08E0 13.564E0 147.73E0 13.871E0 158.63E0 13.994E0 161.84E0 14.947E0 192.11E0 15.473E0 206.76E0 15.379E0 209.07E0 15.455E0 213.32E0 15.908E0 226.44E0 16.114E0 237.12E0 17.071E0 330.90E0 17.135E0 358.72E0 17.282E0 370.77E0 17.368E0 372.72E0 17.483E0 396.24E0 17.764E0 416.59E0 18.185E0 484.02E0 18.271E0 495.47E0 18.236E0 514.78E0 18.237E0 515.65E0 18.523E0 519.47E0 18.627E0 544.47E0 18.665E0 560.11E0 19.086E0 620.77E0 0.214E0 18.97E0 0.943E0 28.93E0 1.429E0 33.91E0 2.241E0 40.03E0 2.951E0 44.66E0 3.782E0 49.87E0 4.757E0 55.16E0 5.602E0 60.90E0 7.169E0 72.08E0 8.920E0 85.15E0 10.055E0 97.06E0 12.035E0 119.63E0 12.861E0 133.27E0 13.436E0 143.84E0 14.167E0 161.91E0 14.755E0 180.67E0 15.168E0 198.44E0 15.651E0 226.86E0 15.746E0 229.65E0 16.216E0 258.27E0 16.445E0 273.77E0 16.965E0 339.15E0 17.121E0 350.13E0 17.206E0 362.75E0 17.250E0 371.03E0 17.339E0 393.32E0 17.793E0 448.53E0 18.123E0 473.78E0 18.49E0 511.12E0 18.566E0 524.70E0 18.645E0 548.75E0 18.706E0 551.64E0 18.924E0 574.02E0 19.1E0 623.86E0 0.375E0 21.46E0 0.471E0 24.33E0 1.504E0 33.43E0 2.204E0 39.22E0 2.813E0 44.18E0 4.765E0 55.02E0 9.835E0 94.33E0 10.040E0 96.44E0 11.946E0 118.82E0 12.596E0 128.48E0 13.303E0 141.94E0 13.922E0 156.92E0 14.440E0 171.65E0 14.951E0 190.00E0 15.627E0 223.26E0 15.639E0 223.88E0 15.814E0 231.50E0 16.315E0 265.05E0 16.334E0 269.44E0 16.430E0 271.78E0 16.423E0 273.46E0 17.024E0 334.61E0 17.009E0 339.79E0 17.165E0 349.52E0 17.134E0 358.18E0 17.349E0 377.98E0 17.576E0 394.77E0 17.848E0 429.66E0 18.090E0 468.22E0 18.276E0 487.27E0 18.404E0 519.54E0 18.519E0 523.03E0 19.133E0 612.99E0 19.074E0 638.59E0 19.239E0 641.36E0 19.280E0 622.05E0 19.101E0 631.50E0 19.398E0 663.97E0 19.252E0 646.9E0 19.89E0 748.29E0 20.007E0 749.21E0 19.929E0 750.14E0 19.268E0 647.04E0 19.324E0 646.89E0 20.049E0 746.9E0 20.107E0 748.43E0 20.062E0 747.35E0 20.065E0 749.27E0 19.286E0 647.61E0 19.972E0 747.78E0 20.088E0 750.51E0 20.743E0 851.37E0 20.83E0 845.97E0 20.935E0 847.54E0 21.035E0 849.93E0 20.93E0 851.61E0 21.074E0 849.75E0 21.085E0 850.98E0 20.935E0 848.23E0 lmfit-py-1.0.0/NIST_STRD/Kirby2.dat000066400000000000000000000133421357751001700165270ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Kirby2 (Kirby2.dat) File Format: ASCII Starting Values (lines 41 to 45) Certified Values (lines 41 to 50) Data (lines 61 to 211) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving scanning electron microscope line with standards. Reference: Kirby, R., NIST (197?). Scanning electron microscope line width standards. Data: 1 Response (y) 1 Predictor (x) 151 Observations Average Level of Difficulty Observed Data Model: Rational Class (quadratic/quadratic) 5 Parameters (b1 to b5) y = (b1 + b2*x + b3*x**2) / (1 + b4*x + b5*x**2) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 2 1.5 1.6745063063E+00 8.7989634338E-02 b2 = -0.1 -0.15 -1.3927397867E-01 4.1182041386E-03 b3 = 0.003 0.0025 2.5961181191E-03 4.1856520458E-05 b4 = -0.001 -0.0015 -1.7241811870E-03 5.8931897355E-05 b5 = 0.00001 0.00002 2.1664802578E-05 2.0129761919E-07 Residual Sum of Squares: 3.9050739624E+00 Residual Standard Deviation: 1.6354535131E-01 Degrees of Freedom: 146 Number of Observations: 151 Data: y x 0.0082E0 9.65E0 0.0112E0 10.74E0 0.0149E0 11.81E0 0.0198E0 12.88E0 0.0248E0 14.06E0 0.0324E0 15.28E0 0.0420E0 16.63E0 0.0549E0 18.19E0 0.0719E0 19.88E0 0.0963E0 21.84E0 0.1291E0 24.00E0 0.1710E0 26.25E0 0.2314E0 28.86E0 0.3227E0 31.85E0 0.4809E0 35.79E0 0.7084E0 40.18E0 1.0220E0 44.74E0 1.4580E0 49.53E0 1.9520E0 53.94E0 2.5410E0 58.29E0 3.2230E0 62.63E0 3.9990E0 67.03E0 4.8520E0 71.25E0 5.7320E0 75.22E0 6.7270E0 79.33E0 7.8350E0 83.56E0 9.0250E0 87.75E0 10.2670E0 91.93E0 11.5780E0 96.10E0 12.9440E0 100.28E0 14.3770E0 104.46E0 15.8560E0 108.66E0 17.3310E0 112.71E0 18.8850E0 116.88E0 20.5750E0 121.33E0 22.3200E0 125.79E0 22.3030E0 125.79E0 23.4600E0 128.74E0 24.0600E0 130.27E0 25.2720E0 133.33E0 25.8530E0 134.79E0 27.1100E0 137.93E0 27.6580E0 139.33E0 28.9240E0 142.46E0 29.5110E0 143.90E0 30.7100E0 146.91E0 31.3500E0 148.51E0 32.5200E0 151.41E0 33.2300E0 153.17E0 34.3300E0 155.97E0 35.0600E0 157.76E0 36.1700E0 160.56E0 36.8400E0 162.30E0 38.0100E0 165.21E0 38.6700E0 166.90E0 39.8700E0 169.92E0 40.0300E0 170.32E0 40.5000E0 171.54E0 41.3700E0 173.79E0 41.6700E0 174.57E0 42.3100E0 176.25E0 42.7300E0 177.34E0 43.4600E0 179.19E0 44.1400E0 181.02E0 44.5500E0 182.08E0 45.2200E0 183.88E0 45.9200E0 185.75E0 46.3000E0 186.80E0 47.0000E0 188.63E0 47.6800E0 190.45E0 48.0600E0 191.48E0 48.7400E0 193.35E0 49.4100E0 195.22E0 49.7600E0 196.23E0 50.4300E0 198.05E0 51.1100E0 199.97E0 51.5000E0 201.06E0 52.1200E0 202.83E0 52.7600E0 204.69E0 53.1800E0 205.86E0 53.7800E0 207.58E0 54.4600E0 209.50E0 54.8300E0 210.65E0 55.4000E0 212.33E0 56.4300E0 215.43E0 57.0300E0 217.16E0 58.0000E0 220.21E0 58.6100E0 221.98E0 59.5800E0 225.06E0 60.1100E0 226.79E0 61.1000E0 229.92E0 61.6500E0 231.69E0 62.5900E0 234.77E0 63.1200E0 236.60E0 64.0300E0 239.63E0 64.6200E0 241.50E0 65.4900E0 244.48E0 66.0300E0 246.40E0 66.8900E0 249.35E0 67.4200E0 251.32E0 68.2300E0 254.22E0 68.7700E0 256.24E0 69.5900E0 259.11E0 70.1100E0 261.18E0 70.8600E0 264.02E0 71.4300E0 266.13E0 72.1600E0 268.94E0 72.7000E0 271.09E0 73.4000E0 273.87E0 73.9300E0 276.08E0 74.6000E0 278.83E0 75.1600E0 281.08E0 75.8200E0 283.81E0 76.3400E0 286.11E0 76.9800E0 288.81E0 77.4800E0 291.08E0 78.0800E0 293.75E0 78.6000E0 295.99E0 79.1700E0 298.64E0 79.6200E0 300.84E0 79.8800E0 302.02E0 80.1900E0 303.48E0 80.6600E0 305.65E0 81.2200E0 308.27E0 81.6600E0 310.41E0 82.1600E0 313.01E0 82.5900E0 315.12E0 83.1400E0 317.71E0 83.5000E0 319.79E0 84.0000E0 322.36E0 84.4000E0 324.42E0 84.8900E0 326.98E0 85.2600E0 329.01E0 85.7400E0 331.56E0 86.0700E0 333.56E0 86.5400E0 336.10E0 86.8900E0 338.08E0 87.3200E0 340.60E0 87.6500E0 342.57E0 88.1000E0 345.08E0 88.4300E0 347.02E0 88.8300E0 349.52E0 89.1200E0 351.44E0 89.5400E0 353.93E0 89.8500E0 355.83E0 90.2500E0 358.32E0 90.5500E0 360.20E0 90.9300E0 362.67E0 91.2000E0 364.53E0 91.5500E0 367.00E0 92.2000E0 371.30E0 lmfit-py-1.0.0/NIST_STRD/Lanczos1.dat000066400000000000000000000056011357751001700170560ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Lanczos1 (Lanczos1.dat) File Format: ASCII Starting Values (lines 41 to 46) Certified Values (lines 41 to 51) Data (lines 61 to 84) Procedure: Nonlinear Least Squares Regression Description: These data are taken from an example discussed in Lanczos (1956). The data were generated to 14-digits of accuracy using f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) + 1.5576*exp(-5*x). Reference: Lanczos, C. (1956). Applied Analysis. Englewood Cliffs, NJ: Prentice Hall, pp. 272-280. Data: 1 Response (y) 1 Predictor (x) 24 Observations Average Level of Difficulty Generated Data Model: Exponential Class 6 Parameters (b1 to b6) y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 1.2 0.5 9.5100000027E-02 5.3347304234E-11 b2 = 0.3 0.7 1.0000000001E+00 2.7473038179E-10 b3 = 5.6 3.6 8.6070000013E-01 1.3576062225E-10 b4 = 5.5 4.2 3.0000000002E+00 3.3308253069E-10 b5 = 6.5 4 1.5575999998E+00 1.8815731448E-10 b6 = 7.6 6.3 5.0000000001E+00 1.1057500538E-10 Residual Sum of Squares: 1.4307867721E-25 Residual Standard Deviation: 8.9156129349E-14 Degrees of Freedom: 18 Number of Observations: 24 Data: y x 2.513400000000E+00 0.000000000000E+00 2.044333373291E+00 5.000000000000E-02 1.668404436564E+00 1.000000000000E-01 1.366418021208E+00 1.500000000000E-01 1.123232487372E+00 2.000000000000E-01 9.268897180037E-01 2.500000000000E-01 7.679338563728E-01 3.000000000000E-01 6.388775523106E-01 3.500000000000E-01 5.337835317402E-01 4.000000000000E-01 4.479363617347E-01 4.500000000000E-01 3.775847884350E-01 5.000000000000E-01 3.197393199326E-01 5.500000000000E-01 2.720130773746E-01 6.000000000000E-01 2.324965529032E-01 6.500000000000E-01 1.996589546065E-01 7.000000000000E-01 1.722704126914E-01 7.500000000000E-01 1.493405660168E-01 8.000000000000E-01 1.300700206922E-01 8.500000000000E-01 1.138119324644E-01 9.000000000000E-01 1.000415587559E-01 9.500000000000E-01 8.833209084540E-02 1.000000000000E+00 7.833544019350E-02 1.050000000000E+00 6.976693743449E-02 1.100000000000E+00 6.239312536719E-02 1.150000000000E+00 lmfit-py-1.0.0/NIST_STRD/Lanczos2.dat000066400000000000000000000050511357751001700170560ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Lanczos2 (Lanczos2.dat) File Format: ASCII Starting Values (lines 41 to 46) Certified Values (lines 41 to 51) Data (lines 61 to 84) Procedure: Nonlinear Least Squares Regression Description: These data are taken from an example discussed in Lanczos (1956). The data were generated to 6-digits of accuracy using f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) + 1.5576*exp(-5*x). Reference: Lanczos, C. (1956). Applied Analysis. Englewood Cliffs, NJ: Prentice Hall, pp. 272-280. Data: 1 Response (y) 1 Predictor (x) 24 Observations Average Level of Difficulty Generated Data Model: Exponential Class 6 Parameters (b1 to b6) y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 1.2 0.5 9.6251029939E-02 6.6770575477E-04 b2 = 0.3 0.7 1.0057332849E+00 3.3989646176E-03 b3 = 5.6 3.6 8.6424689056E-01 1.7185846685E-03 b4 = 5.5 4.2 3.0078283915E+00 4.1707005856E-03 b5 = 6.5 4 1.5529016879E+00 2.3744381417E-03 b6 = 7.6 6.3 5.0028798100E+00 1.3958787284E-03 Residual Sum of Squares: 2.2299428125E-11 Residual Standard Deviation: 1.1130395851E-06 Degrees of Freedom: 18 Number of Observations: 24 Data: y x 2.51340E+00 0.00000E+00 2.04433E+00 5.00000E-02 1.66840E+00 1.00000E-01 1.36642E+00 1.50000E-01 1.12323E+00 2.00000E-01 9.26890E-01 2.50000E-01 7.67934E-01 3.00000E-01 6.38878E-01 3.50000E-01 5.33784E-01 4.00000E-01 4.47936E-01 4.50000E-01 3.77585E-01 5.00000E-01 3.19739E-01 5.50000E-01 2.72013E-01 6.00000E-01 2.32497E-01 6.50000E-01 1.99659E-01 7.00000E-01 1.72270E-01 7.50000E-01 1.49341E-01 8.00000E-01 1.30070E-01 8.50000E-01 1.13812E-01 9.00000E-01 1.00042E-01 9.50000E-01 8.83321E-02 1.00000E+00 7.83354E-02 1.05000E+00 6.97669E-02 1.10000E+00 6.23931E-02 1.15000E+00 lmfit-py-1.0.0/NIST_STRD/Lanczos3.dat000066400000000000000000000050161357751001700170600ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Lanczos3 (Lanczos3.dat) File Format: ASCII Starting Values (lines 41 to 46) Certified Values (lines 41 to 51) Data (lines 61 to 84) Procedure: Nonlinear Least Squares Regression Description: These data are taken from an example discussed in Lanczos (1956). The data were generated to 5-digits of accuracy using f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) + 1.5576*exp(-5*x). Reference: Lanczos, C. (1956). Applied Analysis. Englewood Cliffs, NJ: Prentice Hall, pp. 272-280. Data: 1 Response (y) 1 Predictor (x) 24 Observations Lower Level of Difficulty Generated Data Model: Exponential Class 6 Parameters (b1 to b6) y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 1.2 0.5 8.6816414977E-02 1.7197908859E-02 b2 = 0.3 0.7 9.5498101505E-01 9.7041624475E-02 b3 = 5.6 3.6 8.4400777463E-01 4.1488663282E-02 b4 = 5.5 4.2 2.9515951832E+00 1.0766312506E-01 b5 = 6.5 4 1.5825685901E+00 5.8371576281E-02 b6 = 7.6 6.3 4.9863565084E+00 3.4436403035E-02 Residual Sum of Squares: 1.6117193594E-08 Residual Standard Deviation: 2.9923229172E-05 Degrees of Freedom: 18 Number of Observations: 24 Data: y x 2.5134E+00 0.00000E+00 2.0443E+00 5.00000E-02 1.6684E+00 1.00000E-01 1.3664E+00 1.50000E-01 1.1232E+00 2.00000E-01 0.9269E+00 2.50000E-01 0.7679E+00 3.00000E-01 0.6389E+00 3.50000E-01 0.5338E+00 4.00000E-01 0.4479E+00 4.50000E-01 0.3776E+00 5.00000E-01 0.3197E+00 5.50000E-01 0.2720E+00 6.00000E-01 0.2325E+00 6.50000E-01 0.1997E+00 7.00000E-01 0.1723E+00 7.50000E-01 0.1493E+00 8.00000E-01 0.1301E+00 8.50000E-01 0.1138E+00 9.00000E-01 0.1000E+00 9.50000E-01 0.0883E+00 1.00000E+00 0.0783E+00 1.05000E+00 0.0698E+00 1.10000E+00 0.0624E+00 1.15000E+00 lmfit-py-1.0.0/NIST_STRD/MGH09.dat000066400000000000000000000044011357751001700161450ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: MGH09 (MGH09.dat) File Format: ASCII Starting Values (lines 41 to 44) Certified Values (lines 41 to 49) Data (lines 61 to 71) Procedure: Nonlinear Least Squares Regression Description: This problem was found to be difficult for some very good algorithms. There is a local minimum at (+inf, -14.07..., -inf, -inf) with final sum of squares 0.00102734.... See More, J. J., Garbow, B. S., and Hillstrom, K. E. (1981). Testing unconstrained optimization software. ACM Transactions on Mathematical Software. 7(1): pp. 17-41. Reference: Kowalik, J.S., and M. R. Osborne, (1978). Methods for Unconstrained Optimization Problems. New York, NY: Elsevier North-Holland. Data: 1 Response (y) 1 Predictor (x) 11 Observations Higher Level of Difficulty Generated Data Model: Rational Class (linear/quadratic) 4 Parameters (b1 to b4) y = b1*(x**2+x*b2) / (x**2+x*b3+b4) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 25 0.25 1.9280693458E-01 1.1435312227E-02 b2 = 39 0.39 1.9128232873E-01 1.9633220911E-01 b3 = 41.5 0.415 1.2305650693E-01 8.0842031232E-02 b4 = 39 0.39 1.3606233068E-01 9.0025542308E-02 Residual Sum of Squares: 3.0750560385E-04 Residual Standard Deviation: 6.6279236551E-03 Degrees of Freedom: 7 Number of Observations: 11 Data: y x 1.957000E-01 4.000000E+00 1.947000E-01 2.000000E+00 1.735000E-01 1.000000E+00 1.600000E-01 5.000000E-01 8.440000E-02 2.500000E-01 6.270000E-02 1.670000E-01 4.560000E-02 1.250000E-01 3.420000E-02 1.000000E-01 3.230000E-02 8.330000E-02 2.350000E-02 7.140000E-02 2.460000E-02 6.250000E-02 lmfit-py-1.0.0/NIST_STRD/MGH10.dat000066400000000000000000000044371357751001700161460ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: MGH10 (MGH10.dat) File Format: ASCII Starting Values (lines 41 to 43) Certified Values (lines 41 to 48) Data (lines 61 to 76) Procedure: Nonlinear Least Squares Regression Description: This problem was found to be difficult for some very good algorithms. See More, J. J., Garbow, B. S., and Hillstrom, K. E. (1981). Testing unconstrained optimization software. ACM Transactions on Mathematical Software. 7(1): pp. 17-41. Reference: Meyer, R. R. (1970). Theoretical and computational aspects of nonlinear regression. In Nonlinear Programming, Rosen, Mangasarian and Ritter (Eds). New York, NY: Academic Press, pp. 465-486. Data: 1 Response (y) 1 Predictor (x) 16 Observations Higher Level of Difficulty Generated Data Model: Exponential Class 3 Parameters (b1 to b3) y = b1 * exp[b2/(x+b3)] + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 2 0.02 5.6096364710E-03 1.5687892471E-04 b2 = 400000 4000 6.1813463463E+03 2.3309021107E+01 b3 = 25000 250 3.4522363462E+02 7.8486103508E-01 Residual Sum of Squares: 8.7945855171E+01 Residual Standard Deviation: 2.6009740065E+00 Degrees of Freedom: 13 Number of Observations: 16 Data: y x 3.478000E+04 5.000000E+01 2.861000E+04 5.500000E+01 2.365000E+04 6.000000E+01 1.963000E+04 6.500000E+01 1.637000E+04 7.000000E+01 1.372000E+04 7.500000E+01 1.154000E+04 8.000000E+01 9.744000E+03 8.500000E+01 8.261000E+03 9.000000E+01 7.030000E+03 9.500000E+01 6.005000E+03 1.000000E+02 5.147000E+03 1.050000E+02 4.427000E+03 1.100000E+02 3.820000E+03 1.150000E+02 3.307000E+03 1.200000E+02 2.872000E+03 1.250000E+02 lmfit-py-1.0.0/NIST_STRD/MGH17.dat000066400000000000000000000060061357751001700161470ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: MGH17 (MGH17.dat) File Format: ASCII Starting Values (lines 41 to 45) Certified Values (lines 41 to 50) Data (lines 61 to 93) Procedure: Nonlinear Least Squares Regression Description: This problem was found to be difficult for some very good algorithms. See More, J. J., Garbow, B. S., and Hillstrom, K. E. (1981). Testing unconstrained optimization software. ACM Transactions on Mathematical Software. 7(1): pp. 17-41. Reference: Osborne, M. R. (1972). Some aspects of nonlinear least squares calculations. In Numerical Methods for Nonlinear Optimization, Lootsma (Ed). New York, NY: Academic Press, pp. 171-189. Data: 1 Response (y) 1 Predictor (x) 33 Observations Average Level of Difficulty Generated Data Model: Exponential Class 5 Parameters (b1 to b5) y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5] + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 50 0.5 3.7541005211E-01 2.0723153551E-03 b2 = 150 1.5 1.9358469127E+00 2.2031669222E-01 b3 = -100 -1 -1.4646871366E+00 2.2175707739E-01 b4 = 1 0.01 1.2867534640E-02 4.4861358114E-04 b5 = 2 0.02 2.2122699662E-02 8.9471996575E-04 Residual Sum of Squares: 5.4648946975E-05 Residual Standard Deviation: 1.3970497866E-03 Degrees of Freedom: 28 Number of Observations: 33 Data: y x 8.440000E-01 0.000000E+00 9.080000E-01 1.000000E+01 9.320000E-01 2.000000E+01 9.360000E-01 3.000000E+01 9.250000E-01 4.000000E+01 9.080000E-01 5.000000E+01 8.810000E-01 6.000000E+01 8.500000E-01 7.000000E+01 8.180000E-01 8.000000E+01 7.840000E-01 9.000000E+01 7.510000E-01 1.000000E+02 7.180000E-01 1.100000E+02 6.850000E-01 1.200000E+02 6.580000E-01 1.300000E+02 6.280000E-01 1.400000E+02 6.030000E-01 1.500000E+02 5.800000E-01 1.600000E+02 5.580000E-01 1.700000E+02 5.380000E-01 1.800000E+02 5.220000E-01 1.900000E+02 5.060000E-01 2.000000E+02 4.900000E-01 2.100000E+02 4.780000E-01 2.200000E+02 4.670000E-01 2.300000E+02 4.570000E-01 2.400000E+02 4.480000E-01 2.500000E+02 4.380000E-01 2.600000E+02 4.310000E-01 2.700000E+02 4.240000E-01 2.800000E+02 4.200000E-01 2.900000E+02 4.140000E-01 3.000000E+02 4.110000E-01 3.100000E+02 4.060000E-01 3.200000E+02 lmfit-py-1.0.0/NIST_STRD/Misra1a.dat000066400000000000000000000034751357751001700166700ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Misra1a (Misra1a.dat) File Format: ASCII Starting Values (lines 41 to 42) Certified Values (lines 41 to 47) Data (lines 61 to 74) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study regarding dental research in monomolecular adsorption. The response variable is volume, and the predictor variable is pressure. Reference: Misra, D., NIST (1978). Dental Research Monomolecular Adsorption Study. Data: 1 Response Variable (y = volume) 1 Predictor Variable (x = pressure) 14 Observations Lower Level of Difficulty Observed Data Model: Exponential Class 2 Parameters (b1 and b2) y = b1*(1-exp[-b2*x]) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 500 250 2.3894212918E+02 2.7070075241E+00 b2 = 0.0001 0.0005 5.5015643181E-04 7.2668688436E-06 Residual Sum of Squares: 1.2455138894E-01 Residual Standard Deviation: 1.0187876330E-01 Degrees of Freedom: 12 Number of Observations: 14 Data: y x 10.07E0 77.6E0 14.73E0 114.9E0 17.94E0 141.1E0 23.93E0 190.8E0 29.61E0 239.9E0 35.18E0 289.0E0 40.02E0 332.8E0 44.82E0 378.4E0 50.76E0 434.8E0 55.05E0 477.3E0 61.01E0 536.8E0 66.40E0 593.1E0 75.47E0 689.1E0 81.78E0 760.0E0 lmfit-py-1.0.0/NIST_STRD/Misra1b.dat000066400000000000000000000034651357751001700166700ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Misra1b (Misra1b.dat) File Format: ASCII Starting Values (lines 41 to 42) Certified Values (lines 41 to 47) Data (lines 61 to 74) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study regarding dental research in monomolecular adsorption. The response variable is volume, and the predictor variable is pressure. Reference: Misra, D., NIST (1978). Dental Research Monomolecular Adsorption Study. Data: 1 Response (y = volume) 1 Predictor (x = pressure) 14 Observations Lower Level of Difficulty Observed Data Model: Miscellaneous Class 2 Parameters (b1 and b2) y = b1 * (1-(1+b2*x/2)**(-2)) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 500 300 3.3799746163E+02 3.1643950207E+00 b2 = 0.0001 0.0002 3.9039091287E-04 4.2547321834E-06 Residual Sum of Squares: 7.5464681533E-02 Residual Standard Deviation: 7.9301471998E-02 Degrees of Freedom: 12 Number of Observations: 14 Data: y x 10.07E0 77.6E0 14.73E0 114.9E0 17.94E0 141.1E0 23.93E0 190.8E0 29.61E0 239.9E0 35.18E0 289.0E0 40.02E0 332.8E0 44.82E0 378.4E0 50.76E0 434.8E0 55.05E0 477.3E0 61.01E0 536.8E0 66.40E0 593.1E0 75.47E0 689.1E0 81.78E0 760.0E0 lmfit-py-1.0.0/NIST_STRD/Misra1c.dat000066400000000000000000000034571357751001700166720ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Misra1c (Misra1c.dat) File Format: ASCII Starting Values (lines 41 to 42) Certified Values (lines 41 to 47) Data (lines 61 to 74) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study regarding dental research in monomolecular adsorption. The response variable is volume, and the predictor variable is pressure. Reference: Misra, D., NIST (1978). Dental Research Monomolecular Adsorption. Data: 1 Response (y = volume) 1 Predictor (x = pressure) 14 Observations Average Level of Difficulty Observed Data Model: Miscellaneous Class 2 Parameters (b1 and b2) y = b1 * (1-(1+2*b2*x)**(-.5)) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 500 600 6.3642725809E+02 4.6638326572E+00 b2 = 0.0001 0.0002 2.0813627256E-04 1.7728423155E-06 Residual Sum of Squares: 4.0966836971E-02 Residual Standard Deviation: 5.8428615257E-02 Degrees of Freedom: 12 Number of Observations: 14 Data: y x 10.07E0 77.6E0 14.73E0 114.9E0 17.94E0 141.1E0 23.93E0 190.8E0 29.61E0 239.9E0 35.18E0 289.0E0 40.02E0 332.8E0 44.82E0 378.4E0 50.76E0 434.8E0 55.05E0 477.3E0 61.01E0 536.8E0 66.40E0 593.1E0 75.47E0 689.1E0 81.78E0 760.0E0 lmfit-py-1.0.0/NIST_STRD/Misra1d.dat000066400000000000000000000034631357751001700166700ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Misra1d (Misra1d.dat) File Format: ASCII Starting Values (lines 41 to 42) Certified Values (lines 41 to 47) Data (lines 61 to 74) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study regarding dental research in monomolecular adsorption. The response variable is volume, and the predictor variable is pressure. Reference: Misra, D., NIST (1978). Dental Research Monomolecular Adsorption Study. Data: 1 Response (y = volume) 1 Predictor (x = pressure) 14 Observations Average Level of Difficulty Observed Data Model: Miscellaneous Class 2 Parameters (b1 and b2) y = b1*b2*x*((1+b2*x)**(-1)) + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 500 450 4.3736970754E+02 3.6489174345E+00 b2 = 0.0001 0.0003 3.0227324449E-04 2.9334354479E-06 Residual Sum of Squares: 5.6419295283E-02 Residual Standard Deviation: 6.8568272111E-02 Degrees of Freedom: 12 Number of Observations: 14 Data: y x 10.07E0 77.6E0 14.73E0 114.9E0 17.94E0 141.1E0 23.93E0 190.8E0 29.61E0 239.9E0 35.18E0 289.0E0 40.02E0 332.8E0 44.82E0 378.4E0 50.76E0 434.8E0 55.05E0 477.3E0 61.01E0 536.8E0 66.40E0 593.1E0 75.47E0 689.1E0 81.78E0 760.0E0 lmfit-py-1.0.0/NIST_STRD/Nelson.dat000066400000000000000000000155311357751001700166250ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Nelson (Nelson.dat) File Format: ASCII Starting Values (lines 41 to 43) Certified Values (lines 41 to 48) Data (lines 61 to 188) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a study involving the analysis of performance degradation data from accelerated tests, published in IEEE Transactions on Reliability. The response variable is dialectric breakdown strength in kilo-volts, and the predictor variables are time in weeks and temperature in degrees Celcius. Reference: Nelson, W. (1981). Analysis of Performance-Degradation Data. IEEE Transactions on Reliability. Vol. 2, R-30, No. 2, pp. 149-155. Data: 1 Response ( y = dialectric breakdown strength) 2 Predictors (x1 = time; x2 = temperature) 128 Observations Average Level of Difficulty Observed Data Model: Exponential Class 3 Parameters (b1 to b3) log[y] = b1 - b2*x1 * exp[-b3*x2] + e Starting values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 2 2.5 2.5906836021E+00 1.9149996413E-02 b2 = 0.0001 0.000000005 5.6177717026E-09 6.1124096540E-09 b3 = -0.01 -0.05 -5.7701013174E-02 3.9572366543E-03 Residual Sum of Squares: 3.7976833176E+00 Residual Standard Deviation: 1.7430280130E-01 Degrees of Freedom: 125 Number of Observations: 128 Data: y x1 x2 15.00E0 1E0 180E0 17.00E0 1E0 180E0 15.50E0 1E0 180E0 16.50E0 1E0 180E0 15.50E0 1E0 225E0 15.00E0 1E0 225E0 16.00E0 1E0 225E0 14.50E0 1E0 225E0 15.00E0 1E0 250E0 14.50E0 1E0 250E0 12.50E0 1E0 250E0 11.00E0 1E0 250E0 14.00E0 1E0 275E0 13.00E0 1E0 275E0 14.00E0 1E0 275E0 11.50E0 1E0 275E0 14.00E0 2E0 180E0 16.00E0 2E0 180E0 13.00E0 2E0 180E0 13.50E0 2E0 180E0 13.00E0 2E0 225E0 13.50E0 2E0 225E0 12.50E0 2E0 225E0 12.50E0 2E0 225E0 12.50E0 2E0 250E0 12.00E0 2E0 250E0 11.50E0 2E0 250E0 12.00E0 2E0 250E0 13.00E0 2E0 275E0 11.50E0 2E0 275E0 13.00E0 2E0 275E0 12.50E0 2E0 275E0 13.50E0 4E0 180E0 17.50E0 4E0 180E0 17.50E0 4E0 180E0 13.50E0 4E0 180E0 12.50E0 4E0 225E0 12.50E0 4E0 225E0 15.00E0 4E0 225E0 13.00E0 4E0 225E0 12.00E0 4E0 250E0 13.00E0 4E0 250E0 12.00E0 4E0 250E0 13.50E0 4E0 250E0 10.00E0 4E0 275E0 11.50E0 4E0 275E0 11.00E0 4E0 275E0 9.50E0 4E0 275E0 15.00E0 8E0 180E0 15.00E0 8E0 180E0 15.50E0 8E0 180E0 16.00E0 8E0 180E0 13.00E0 8E0 225E0 10.50E0 8E0 225E0 13.50E0 8E0 225E0 14.00E0 8E0 225E0 12.50E0 8E0 250E0 12.00E0 8E0 250E0 11.50E0 8E0 250E0 11.50E0 8E0 250E0 6.50E0 8E0 275E0 5.50E0 8E0 275E0 6.00E0 8E0 275E0 6.00E0 8E0 275E0 18.50E0 16E0 180E0 17.00E0 16E0 180E0 15.30E0 16E0 180E0 16.00E0 16E0 180E0 13.00E0 16E0 225E0 14.00E0 16E0 225E0 12.50E0 16E0 225E0 11.00E0 16E0 225E0 12.00E0 16E0 250E0 12.00E0 16E0 250E0 11.50E0 16E0 250E0 12.00E0 16E0 250E0 6.00E0 16E0 275E0 6.00E0 16E0 275E0 5.00E0 16E0 275E0 5.50E0 16E0 275E0 12.50E0 32E0 180E0 13.00E0 32E0 180E0 16.00E0 32E0 180E0 12.00E0 32E0 180E0 11.00E0 32E0 225E0 9.50E0 32E0 225E0 11.00E0 32E0 225E0 11.00E0 32E0 225E0 11.00E0 32E0 250E0 10.00E0 32E0 250E0 10.50E0 32E0 250E0 10.50E0 32E0 250E0 2.70E0 32E0 275E0 2.70E0 32E0 275E0 2.50E0 32E0 275E0 2.40E0 32E0 275E0 13.00E0 48E0 180E0 13.50E0 48E0 180E0 16.50E0 48E0 180E0 13.60E0 48E0 180E0 11.50E0 48E0 225E0 10.50E0 48E0 225E0 13.50E0 48E0 225E0 12.00E0 48E0 225E0 7.00E0 48E0 250E0 6.90E0 48E0 250E0 8.80E0 48E0 250E0 7.90E0 48E0 250E0 1.20E0 48E0 275E0 1.50E0 48E0 275E0 1.00E0 48E0 275E0 1.50E0 48E0 275E0 13.00E0 64E0 180E0 12.50E0 64E0 180E0 16.50E0 64E0 180E0 16.00E0 64E0 180E0 11.00E0 64E0 225E0 11.50E0 64E0 225E0 10.50E0 64E0 225E0 10.00E0 64E0 225E0 7.27E0 64E0 250E0 7.50E0 64E0 250E0 6.70E0 64E0 250E0 7.60E0 64E0 250E0 1.50E0 64E0 275E0 1.00E0 64E0 275E0 1.20E0 64E0 275E0 1.20E0 64E0 275E0 lmfit-py-1.0.0/NIST_STRD/Rat42.dat000066400000000000000000000035211357751001700162570ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Rat42 (Rat42.dat) File Format: ASCII Starting Values (lines 41 to 43) Certified Values (lines 41 to 48) Data (lines 61 to 69) Procedure: Nonlinear Least Squares Regression Description: This model and data are an example of fitting sigmoidal growth curves taken from Ratkowsky (1983). The response variable is pasture yield, and the predictor variable is growing time. Reference: Ratkowsky, D.A. (1983). Nonlinear Regression Modeling. New York, NY: Marcel Dekker, pp. 61 and 88. Data: 1 Response (y = pasture yield) 1 Predictor (x = growing time) 9 Observations Higher Level of Difficulty Observed Data Model: Exponential Class 3 Parameters (b1 to b3) y = b1 / (1+exp[b2-b3*x]) + e Starting Values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 100 75 7.2462237576E+01 1.7340283401E+00 b2 = 1 2.5 2.6180768402E+00 8.8295217536E-02 b3 = 0.1 0.07 6.7359200066E-02 3.4465663377E-03 Residual Sum of Squares: 8.0565229338E+00 Residual Standard Deviation: 1.1587725499E+00 Degrees of Freedom: 6 Number of Observations: 9 Data: y x 8.930E0 9.000E0 10.800E0 14.000E0 18.590E0 21.000E0 22.330E0 28.000E0 39.350E0 42.000E0 56.110E0 57.000E0 61.730E0 63.000E0 64.620E0 70.000E0 67.080E0 79.000E0 lmfit-py-1.0.0/NIST_STRD/Rat43.dat000066400000000000000000000040301357751001700162540ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Rat43 (Rat43.dat) File Format: ASCII Starting Values (lines 41 to 44) Certified Values (lines 41 to 49) Data (lines 61 to 75) Procedure: Nonlinear Least Squares Regression Description: This model and data are an example of fitting sigmoidal growth curves taken from Ratkowsky (1983). The response variable is the dry weight of onion bulbs and tops, and the predictor variable is growing time. Reference: Ratkowsky, D.A. (1983). Nonlinear Regression Modeling. New York, NY: Marcel Dekker, pp. 62 and 88. Data: 1 Response (y = onion bulb dry weight) 1 Predictor (x = growing time) 15 Observations Higher Level of Difficulty Observed Data Model: Exponential Class 4 Parameters (b1 to b4) y = b1 / ((1+exp[b2-b3*x])**(1/b4)) + e Starting Values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 100 700 6.9964151270E+02 1.6302297817E+01 b2 = 10 5 5.2771253025E+00 2.0828735829E+00 b3 = 1 0.75 7.5962938329E-01 1.9566123451E-01 b4 = 1 1.3 1.2792483859E+00 6.8761936385E-01 Residual Sum of Squares: 8.7864049080E+03 Residual Standard Deviation: 2.8262414662E+01 Degrees of Freedom: 9 Number of Observations: 15 Data: y x 16.08E0 1.0E0 33.83E0 2.0E0 65.80E0 3.0E0 97.20E0 4.0E0 191.55E0 5.0E0 326.20E0 6.0E0 386.87E0 7.0E0 520.53E0 8.0E0 590.03E0 9.0E0 651.92E0 10.0E0 724.93E0 11.0E0 699.56E0 12.0E0 689.96E0 13.0E0 637.56E0 14.0E0 717.41E0 15.0E0 lmfit-py-1.0.0/NIST_STRD/Roszman1.dat000066400000000000000000000046661357751001700171100ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Roszman1 (Roszman1.dat) File Format: ASCII Starting Values (lines 41 to 44) Certified Values (lines 41 to 49) Data (lines 61 to 85) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving quantum defects in iodine atoms. The response variable is the number of quantum defects, and the predictor variable is the excited energy state. The argument to the ARCTAN function is in radians. Reference: Roszman, L., NIST (19??). Quantum Defects for Sulfur I Atom. Data: 1 Response (y = quantum defect) 1 Predictor (x = excited state energy) 25 Observations Average Level of Difficulty Observed Data Model: Miscellaneous Class 4 Parameters (b1 to b4) pi = 3.141592653589793238462643383279E0 y = b1 - b2*x - arctan[b3/(x-b4)]/pi + e Starting Values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 0.1 0.2 2.0196866396E-01 1.9172666023E-02 b2 = -0.00001 -0.000005 -6.1953516256E-06 3.2058931691E-06 b3 = 1000 1200 1.2044556708E+03 7.4050983057E+01 b4 = -100 -150 -1.8134269537E+02 4.9573513849E+01 Residual Sum of Squares: 4.9484847331E-04 Residual Standard Deviation: 4.8542984060E-03 Degrees of Freedom: 21 Number of Observations: 25 Data: y x 0.252429 -4868.68 0.252141 -4868.09 0.251809 -4867.41 0.297989 -3375.19 0.296257 -3373.14 0.295319 -3372.03 0.339603 -2473.74 0.337731 -2472.35 0.333820 -2469.45 0.389510 -1894.65 0.386998 -1893.40 0.438864 -1497.24 0.434887 -1495.85 0.427893 -1493.41 0.471568 -1208.68 0.461699 -1206.18 0.461144 -1206.04 0.513532 -997.92 0.506641 -996.61 0.505062 -996.31 0.535648 -834.94 0.533726 -834.66 0.568064 -710.03 0.612886 -530.16 0.624169 -464.17 lmfit-py-1.0.0/NIST_STRD/Thurber.dat000066400000000000000000000057221357751001700170030ustar00rootroot00000000000000NIST/ITL StRD Dataset Name: Thurber (Thurber.dat) File Format: ASCII Starting Values (lines 41 to 47) Certified Values (lines 41 to 52) Data (lines 61 to 97) Procedure: Nonlinear Least Squares Regression Description: These data are the result of a NIST study involving semiconductor electron mobility. The response variable is a measure of electron mobility, and the predictor variable is the natural log of the density. Reference: Thurber, R., NIST (197?). Semiconductor electron mobility modeling. Data: 1 Response Variable (y = electron mobility) 1 Predictor Variable (x = log[density]) 37 Observations Higher Level of Difficulty Observed Data Model: Rational Class (cubic/cubic) 7 Parameters (b1 to b7) y = (b1 + b2*x + b3*x**2 + b4*x**3) / (1 + b5*x + b6*x**2 + b7*x**3) + e Starting Values Certified Values Start 1 Start 2 Parameter Standard Deviation b1 = 1000 1300 1.2881396800E+03 4.6647963344E+00 b2 = 1000 1500 1.4910792535E+03 3.9571156086E+01 b3 = 400 500 5.8323836877E+02 2.8698696102E+01 b4 = 40 75 7.5416644291E+01 5.5675370270E+00 b5 = 0.7 1 9.6629502864E-01 3.1333340687E-02 b6 = 0.3 0.4 3.9797285797E-01 1.4984928198E-02 b7 = 0.03 0.05 4.9727297349E-02 6.5842344623E-03 Residual Sum of Squares: 5.6427082397E+03 Residual Standard Deviation: 1.3714600784E+01 Degrees of Freedom: 30 Number of Observations: 37 Data: y x 80.574E0 -3.067E0 84.248E0 -2.981E0 87.264E0 -2.921E0 87.195E0 -2.912E0 89.076E0 -2.840E0 89.608E0 -2.797E0 89.868E0 -2.702E0 90.101E0 -2.699E0 92.405E0 -2.633E0 95.854E0 -2.481E0 100.696E0 -2.363E0 101.060E0 -2.322E0 401.672E0 -1.501E0 390.724E0 -1.460E0 567.534E0 -1.274E0 635.316E0 -1.212E0 733.054E0 -1.100E0 759.087E0 -1.046E0 894.206E0 -0.915E0 990.785E0 -0.714E0 1090.109E0 -0.566E0 1080.914E0 -0.545E0 1122.643E0 -0.400E0 1178.351E0 -0.309E0 1260.531E0 -0.109E0 1273.514E0 -0.103E0 1288.339E0 0.010E0 1327.543E0 0.119E0 1353.863E0 0.377E0 1414.509E0 0.790E0 1425.208E0 0.963E0 1421.384E0 1.006E0 1442.962E0 1.115E0 1464.350E0 1.572E0 1468.705E0 1.841E0 1447.894E0 2.047E0 1457.628E0 2.200E0 lmfit-py-1.0.0/PKG-INFO000066400000000000000000000040171357751001700143560ustar00rootroot00000000000000Metadata-Version: 1.2 Name: lmfit Version: 1.0.0 Summary: Least-Squares Minimization with Bounds and Constraints Home-page: https://lmfit.github.io/lmfit-py/ Author: LMFit Development Team Author-email: matt.newville@gmail.com License: BSD-3 Download-URL: https://lmfit.github.io//lmfit-py/ Description: A library for least-squares minimization and data fitting in Python. Built on top of scipy.optimize, lmfit provides a Parameter object which can be set as fixed or free, can have upper and/or lower bounds, or can be written in terms of algebraic constraints of other Parameters. The user writes a function to be minimized as a function of these Parameters, and the scipy.optimize methods are used to find the optimal values for the Parameters. The Levenberg-Marquardt (leastsq) is the default minimization algorithm, and provides estimated standard errors and correlations between varied Parameters. Other minimization methods, including Nelder-Mead's downhill simplex, Powell's method, BFGS, Sequential Least Squares, and others are also supported. Bounds and contraints can be placed on Parameters for all of these methods. In addition, methods for explicitly calculating confidence intervals are provided for exploring minmization problems where the approximation of estimating Parameter uncertainties from the covariance matrix is questionable. Keywords: curve-fitting,least-squares minimization Platform: Windows Platform: Linux Platform: Mac OS X Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Science/Research Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Scientific/Engineering Requires-Python: >=3.5 lmfit-py-1.0.0/README.rst000066400000000000000000000076501357751001700147560ustar00rootroot00000000000000LMfit-py ======== .. image:: https://travis-ci.org/lmfit/lmfit-py.svg :target: https://travis-ci.org/lmfit/lmfit-py .. image:: https://codecov.io/gh/lmfit/lmfit-py/branch/master/graph/badge.svg :target: https://codecov.io/gh/lmfit/lmfit-py .. image:: https://img.shields.io/pypi/v/lmfit.svg :target: https://pypi.org/project/lmfit .. image:: https://img.shields.io/pypi/dm/lmfit.svg :target: https://pypi.org/project/lmfit .. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg :target: https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py .. _LMfit mailing list: https://groups.google.com/group/lmfit-py Overview --------- LMfit-py provides a Least-Squares Minimization routine and class with a simple, flexible approach to parameterizing a model for fitting to data. LMfit is a pure Python package, and so easy to install from source or with ``pip install lmfit``. For questions, comments, and suggestions, please use the `LMfit mailing list`_. Using the bug tracking software in GitHub Issues is encouraged for known problems and bug reports. Please read `Contributing.md <.github/CONTRIBUTING.md>`_ before creating an Issue. Parameters and Fitting ------------------------- LMfit-py provides a Least-Squares Minimization routine and class with a simple, flexible approach to parameterizing a model for fitting to data. Named Parameters can be held fixed or freely adjusted in the fit, or held between lower and upper bounds. In addition, parameters can be constrained as a simple mathematical expression of other Parameters. To do this, the programmer defines a Parameters object, an enhanced dictionary, containing named parameters:: fit_params = Parameters() fit_params['amp'] = Parameter(value=1.2, min=0.1, max=1000) fit_params['cen'] = Parameter(value=40.0, vary=False) fit_params['wid'] = Parameter(value=4, min=0) or using the equivalent:: fit_params = Parameters() fit_params.add('amp', value=1.2, min=0.1, max=1000) fit_params.add('cen', value=40.0, vary=False) fit_params.add('wid', value=4, min=0) The programmer will also write a function to be minimized (in the least-squares sense) with its first argument being this Parameters object, and additional positional and keyword arguments as desired:: def myfunc(params, x, data, someflag=True): amp = params['amp'].value cen = params['cen'].value wid = params['wid'].value ... return residual_array For each call of this function, the values for the params may have changed, subject to the bounds and constraint settings for each Parameter. The function should return the residual (i.e., data-model) array to be minimized. The advantage here is that the function to be minimized does not have to be changed if different bounds or constraints are placed on the fitting Parameters. The fitting model (as described in myfunc) is instead written in terms of physical parameters of the system, and remains remains independent of what is actually varied in the fit. In addition, which parameters are adjusted and which are fixed happens at run-time, so that changing what is varied and what constraints are placed on the parameters can easily be modified by the user in real-time data analysis. To perform the fit, the user calls:: result = minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....) After the fit, a ``MinimizerResult`` class is returned that holds the results the fit (e.g., fitting statistics and optimized parameters). The dictionary ``result.params`` contains the best-fit values, estimated standard deviations, and correlations with other variables in the fit. By default, the underlying fit algorithm is the Levenberg-Marquart algorithm with numerically-calculated derivatives from MINPACK's lmdif function, as used by ``scipy.optimize.leastsq``. Most other solvers that are present in ``scipy`` (e.g., Nelder-Mead, differential_evolution, basinhopping, etctera) are also supported. lmfit-py-1.0.0/THANKS.txt000066400000000000000000000053121357751001700150110ustar00rootroot00000000000000Many people have contributed to lmfit. The attribution of credit in a project such as this is difficult to get perfect, and there are no doubt important contributions that are missing or under-represented here. Please consider this file as part of the code and documentation that may have bugs that need fixing. Some of the largest and most important contributions (in approximate order of size of the contribution to the existing code) are from: Matthew Newville wrote the original version and maintains the project. Renee Otten wrote the brute force method, implemented the basin-hopping and AMPGO global solvers, implemented uncertainty calculations for scalar minimizers and has greatly improved the code, testing, and documentation and overall project. Till Stensitzki wrote the improved estimates of confidence intervals, and contributed many tests, bug fixes, and documentation. A. R. J. Nelson added differential_evolution, emcee, and greatly improved the code, docstrings, and overall project. Antonino Ingargiola wrote much of the high level Model code and has provided many bug fixes and improvements. Daniel B. Allan wrote much of the original version of the high level Model code, and many improvements to the testing and documentation. Austen Fox fixed many of the built-in model functions and improved the testing and documentation of these. Michal Rawlik added plotting capabilities for Models. The method used for placing bounds on parameters was derived from the clear description in the MINUIT documentation, and adapted from J. J. Helmus's python implementation in leastsqbounds.py. E. O. Le Bigot wrote the uncertainties package, a version of which was used by lmfit for many years, and is now an external dependency. The original AMPGO code came from Andrea Gavana and was adopted for lmfit. The propagation of parameter uncertainties to uncertainties in a Model was adapted from the excellent description at https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#confidence-and-prediction-intervals, which references the original work of: J. Wolberg, Data Analysis Using the Method of Least Squares, 2006, Springer. Additional patches, bug fixes, and suggestions have come from Faustin Carter, Christoph Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau, nmearl, Gustavo Pasquevich, Clemens Prescher, LiCode, Ben Gamari, Yoav Roam, Alexander Stark, Alexandre Beelen, Andrey Aristov, Nicholas Zobrist, Ethan Welty, Julius Zimmermann, and many others. The lmfit code obviously depends on, and owes a very large debt to the code in scipy.optimize. Several discussions on the SciPy-user and lmfit mailing lists have also led to improvements in this code. lmfit-py-1.0.0/doc/000077500000000000000000000000001357751001700140245ustar00rootroot00000000000000lmfit-py-1.0.0/doc/Makefile000066400000000000000000000103261357751001700154660ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXBUILD = sphinx-build SPHINX_OPTS = -W SPHINX_DEBUGOPTS = --keep-going -n BUILDDIR = _build # Internal variables. PAPER = PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter SPHINX_OUTPUT = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) ALLSPHINXOPTS = $(SPHINX_OUTPUT) $(SPHINXOPTS) . .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest latexpdf htmlzip .PHONY: all pdf gallery debug html: gallery cp sphinx/ext_mathjax.py extensions.py $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/html @echo @echo "html build finished: $(BUILDDIR)/html." debug: gallery cp sphinx/ext_mathjax.py extensions.py $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_DEBUGOPTS) . $(BUILDDIR)/html @echo @echo "html build finished: $(BUILDDIR)/html." gallery: examples/index.rst examples/index.rst: ./doc_examples_to_gallery.py htmlzip: html cp sphinx/ext_mathjax.py extensions.py $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/lmfit_doc cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc epub: html cp sphinx/ext_mathjax.py extensions.py $(SPHINXBUILD) -b epub $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/epub cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/. pdf: latex cp sphinx/ext_imgmath.py extensions.py cd $(BUILDDIR)/latex && make all-pdf cp -pr $(BUILDDIR)/latex/lmfit.pdf $(BUILDDIR)/html/. all: html htmlzip epub pdf help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* -rm -f extensions.py -rm -f *.dat *.sav *.csv -rm -rf examples/* -rm -rf ../examples/documentation dirhtml: gallery $(SPHINXBUILD) -b dirhtml $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: gallery $(SPHINXBUILD) -b pickle $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: gallery $(SPHINXBUILD) -b json $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: gallery $(SPHINXBUILD) -b htmlhelp $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." latex: gallery cp sphinx/ext_imgmath.py extensions.py $(SPHINXBUILD) -b latex $(SPHINX_OUTPUT) $(SPHINX_OPTS) . _build/latex @echo @echo "Build finished; the LaTeX files are in _build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." latexpdf: cp sphinx/ext_imgmath.py extensions.py $(SPHINXBUILD) -b latex $(SPHINX_OUTPUT) $(SPHINX_OPTS) . _build/latex @echo "Running LaTeX files through pdflatex..." make -C _build/latex all-pdf @echo "pdflatex finished; the PDF files are in _build/latex." changes: $(SPHINXBUILD) -b changes $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." lmfit-py-1.0.0/doc/__pycache__/000077500000000000000000000000001357751001700162345ustar00rootroot00000000000000lmfit-py-1.0.0/doc/__pycache__/extensions.cpython-37.pyc000066400000000000000000000006251357751001700230650ustar00rootroot00000000000000B ”&ý]ã @sdddddddddg Zd S) zsphinx.ext.autodoczsphinx.ext.extlinkszsphinx.ext.intersphinxzsphinx.ext.mathjaxzsphinx.ext.napoleonzsphinx.ext.todoz.IPython.sphinxext.ipython_console_highlightingzjupyter_sphinx.executezsphinx_gallery.gen_galleryN)Ú extensions©rrú0/Users/Newville/Codes/lmfit-py/doc/extensions.pyÚslmfit-py-1.0.0/doc/_static/000077500000000000000000000000001357751001700154525ustar00rootroot00000000000000lmfit-py-1.0.0/doc/_static/empty000066400000000000000000000000001357751001700165210ustar00rootroot00000000000000lmfit-py-1.0.0/doc/_templates/000077500000000000000000000000001357751001700161615ustar00rootroot00000000000000lmfit-py-1.0.0/doc/_templates/indexsidebar.html000066400000000000000000000014051357751001700215100ustar00rootroot00000000000000

Getting LMFIT

Current version: {{ release }}

Install:   pip install lmfit

Download:   PyPI

Develop:   GitHub

Questions?

  Frequently Asked Questions
  Mailing List
  Getting Help

Static, off-line docs

[PDF | EPUB | HTML (zip)]

lmfit-py-1.0.0/doc/bounds.rst000066400000000000000000000067171357751001700160630ustar00rootroot00000000000000.. _bounds_chapter: ================================= Bounds Implementation ================================= .. _MINPACK-1: https://en.wikipedia.org/wiki/MINPACK .. _MINUIT: https://en.wikipedia.org/wiki/MINUIT .. _leastsqbound: https://github.com/jjhelmus/leastsqbound-scipy This section describes the implementation of :class:`Parameter` bounds. The `MINPACK-1`_ implementation used in :scipydoc:`optimize.leastsq` for the Levenberg-Marquardt algorithm does not explicitly support bounds on parameters, and expects to be able to fully explore the available range of values for any Parameter. Simply placing hard constraints (that is, resetting the value when it exceeds the desired bounds) prevents the algorithm from determining the partial derivatives, and leads to unstable results. Instead of placing such hard constraints, bounded parameters are mathematically transformed using the formulation devised (and documented) for `MINUIT`_. This is implemented following (and borrowing heavily from) the `leastsqbound`_ from J. J. Helmus. Parameter values are mapped from internally used, freely variable values :math:`P_{\rm internal}` to bounded parameters :math:`P_{\rm bounded}`. When both ``min`` and ``max`` bounds are specified, the mapping is: .. math:: :nowrap: \begin{eqnarray*} P_{\rm internal} &=& \arcsin\big(\frac{2 (P_{\rm bounded} - {\rm min})}{({\rm max} - {\rm min})} - 1\big) \\ P_{\rm bounded} &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2} \end{eqnarray*} With only an upper limit ``max`` supplied, but ``min`` left unbounded, the mapping is: .. math:: :nowrap: \begin{eqnarray*} P_{\rm internal} &=& \sqrt{({\rm max} - P_{\rm bounded} + 1)^2 - 1} \\ P_{\rm bounded} &=& {\rm max} + 1 - \sqrt{P_{\rm internal}^2 + 1} \end{eqnarray*} With only a lower limit ``min`` supplied, but ``max`` left unbounded, the mapping is: .. math:: :nowrap: \begin{eqnarray*} P_{\rm internal} &=& \sqrt{(P_{\rm bounded} - {\rm min} + 1)^2 - 1} \\ P_{\rm bounded} &=& {\rm min} - 1 + \sqrt{P_{\rm internal}^2 + 1} \end{eqnarray*} With these mappings, the value for the bounded Parameter cannot exceed the specified bounds, though the internally varied value can be freely varied. It bears repeating that code from `leastsqbound`_ was adopted to implement the transformation described above. The challenging part (thanks again to Jonathan J. Helmus!) here is to re-transform the covariance matrix so that the uncertainties can be estimated for bounded Parameters. This is included by using the derivate :math:`dP_{\rm internal}/dP_{\rm bounded}` from the equations above to re-scale the Jacobin matrix before constructing the covariance matrix from it. Tests show that this re-scaling of the covariance matrix works quite well, and that uncertainties estimated for bounded are quite reasonable. Of course, if the best fit value is very close to a boundary, the derivative estimated uncertainty and correlations for that parameter may not be reliable. The `MINUIT`_ documentation recommends caution in using bounds. Setting bounds can certainly increase the number of function evaluations (and so computation time), and in some cases may cause some instabilities, as the range of acceptable parameter values is not fully explored. On the other hand, preliminary tests suggest that using ``max`` and ``min`` to set clearly outlandish bounds does not greatly affect performance or results. lmfit-py-1.0.0/doc/builtin_models.rst000066400000000000000000000511311357751001700175700ustar00rootroot00000000000000.. _builtin_models_chapter: ===================================================== Built-in Fitting Models in the :mod:`models` module ===================================================== .. module:: lmfit.models Lmfit provides several built-in fitting models in the :mod:`models` module. These pre-defined models each subclass from the :class:`~lmfit.model.Model` class of the previous chapter and wrap relatively well-known functional forms, such as Gaussians, Lorentzian, and Exponentials that are used in a wide range of scientific domains. In fact, all the models are based on simple, plain Python functions defined in the :mod:`~lmfit.lineshapes` module. In addition to wrapping a function into a :class:`~lmfit.model.Model`, these models also provide a :meth:`~lmfit.model.Model.guess` method that is intended to give a reasonable set of starting values from a data array that closely approximates the data to be fit. As shown in the previous chapter, a key feature of the :class:`~lmfit.model.Model` class is that models can easily be combined to give a composite :class:`~lmfit.model.CompositeModel`. Thus, while some of the models listed here may seem pretty trivial (notably, :class:`ConstantModel` and :class:`LinearModel`), the main point of having these is to be able to use them in composite models. For example, a Lorentzian plus a linear background might be represented as:: from lmfit.models import LinearModel, LorentzianModel peak = LorentzianModel() background = LinearModel() model = peak + background All the models listed below are one-dimensional, with an independent variable named ``x``. Many of these models represent a function with a distinct peak, and so share common features. To maintain uniformity, common parameter names are used whenever possible. Thus, most models have a parameter called ``amplitude`` that represents the overall intensity (or area of) a peak or function and a ``sigma`` parameter that gives a characteristic width. After a list of built-in models, a few examples of their use are given. Peak-like models ------------------- There are many peak-like models available. These include :class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel`, :class:`PseudoVoigtModel`, and some less commonly used variations. Most of these models are *unit-normalized* and share the same parameter names so that you can easily switch between models and interpret the results. The ``amplitude`` parameter is the multiplicative factor for the unit-normalized peak lineshape, and so will represent the strength of that peak or the area under that curve. The ``center`` parameter will be the centroid ``x`` value. The ``sigma`` parameter is the characteristic width of the peak, with many functions using :math:`(x-\mu)/\sigma` where :math:`\mu` is the centroid value. Most of these peak functions will have two additional parameters derived from and constrained by the other parameters. The first of these is ``fwhm`` which will hold the estimated "Full Width at Half Max" for the peak, which is often easier to compare between different models than ``sigma``. The second of these is ``height`` which will contain the maximum value of the peak, typically the value at :math:`x = \mu`. Finally, each of these models has a :meth:`guess` method that uses data to make a fairly crude but usually sufficient guess for the value of ``amplitude``, ``center``, and ``sigma``, and sets a lower bound of 0 on the value of ``sigma``. :class:`GaussianModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: GaussianModel :class:`LorentzianModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: LorentzianModel :class:`SplitLorentzianModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: SplitLorentzianModel :class:`VoigtModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: VoigtModel :class:`PseudoVoigtModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: PseudoVoigtModel :class:`MoffatModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: MoffatModel :class:`Pearson7Model` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: Pearson7Model :class:`StudentsTModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: StudentsTModel :class:`BreitWignerModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: BreitWignerModel :class:`LognormalModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: LognormalModel :class:`DampedOcsillatorModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: DampedOscillatorModel :class:`DampedHarmonicOcsillatorModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: DampedHarmonicOscillatorModel :class:`ExponentialGaussianModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: ExponentialGaussianModel :class:`SkewedGaussianModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: SkewedGaussianModel :class:`SkewedVoigtModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: SkewedVoigtModel :class:`DonaichModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: DonaichModel Linear and Polynomial Models ------------------------------------ These models correspond to polynomials of some degree. Of course, lmfit is a very inefficient way to do linear regression (see :numpydoc:`polyfit` or :scipydoc:`stats.linregress`), but these models may be useful as one of many components of a composite model. :class:`ConstantModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: ConstantModel :class:`LinearModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: LinearModel :class:`QuadraticModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: QuadraticModel :class:`PolynomialModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: PolynomialModel Step-like models ----------------------------------------------- Two models represent step-like functions, and share many characteristics. :class:`StepModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: StepModel :class:`RectangleModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: RectangleModel Exponential and Power law models ----------------------------------------------- :class:`ExponentialModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: ExponentialModel :class:`PowerLawModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: PowerLawModel User-defined Models ---------------------------- .. _asteval: https://newville.github.io/asteval/ As shown in the previous chapter (:ref:`model_chapter`), it is fairly straightforward to build fitting models from parametrized Python functions. The number of model classes listed so far in the present chapter should make it clear that this process is not too difficult. Still, it is sometimes desirable to build models from a user-supplied function. This may be especially true if model-building is built-in to some larger library or application for fitting in which the user may not be able to easily build and use a new model from Python code. The :class:`ExpressionModel` allows a model to be built from a user-supplied expression. This uses the `asteval`_ module also used for mathematical constraints as discussed in :ref:`constraints_chapter`. :class:`ExpressionModel` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: ExpressionModel Since the point of this model is that an arbitrary expression will be supplied, the determination of what are the parameter names for the model happens when the model is created. To do this, the expression is parsed, and all symbol names are found. Names that are already known (there are over 500 function and value names in the asteval namespace, including most Python built-ins, more than 200 functions inherited from NumPy, and more than 20 common lineshapes defined in the :mod:`lineshapes` module) are not converted to parameters. Unrecognized names are expected to be names of either parameters or independent variables. If ``independent_vars`` is the default value of None, and if the expression contains a variable named ``x``, that will be used as the independent variable. Otherwise, ``independent_vars`` must be given. For example, if one creates an :class:`ExpressionModel` as:: mod = ExpressionModel('off + amp * exp(-x/x0) * sin(x*phase)') The name ``exp`` will be recognized as the exponent function, so the model will be interpreted to have parameters named ``off``, ``amp``, ``x0`` and ``phase``. In addition, ``x`` will be assumed to be the sole independent variable. In general, there is no obvious way to set default parameter values or parameter hints for bounds, so this will have to be handled explicitly. To evaluate this model, you might do the following:: x = numpy.linspace(0, 10, 501) params = mod.make_params(off=0.25, amp=1.0, x0=2.0, phase=0.04) y = mod.eval(params, x=x) While many custom models can be built with a single line expression (especially since the names of the lineshapes like ``gaussian``, ``lorentzian`` and so on, as well as many NumPy functions, are available), more complex models will inevitably require multiple line functions. You can include such Python code with the ``init_script`` argument. The text of this script is evaluated when the model is initialized (and before the actual expression is parsed), so that you can define functions to be used in your expression. As a probably unphysical example, to make a model that is the derivative of a Gaussian function times the logarithm of a Lorentzian function you may could to define this in a script:: script = """ def mycurve(x, amp, cen, sig): loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig) gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig) return log(loren) * gradient(gauss) / gradient(x) """ and then use this with :class:`ExpressionModel` as:: mod = ExpressionModel('mycurve(x, height, mid, wid)', init_script=script, independent_vars=['x']) As above, this will interpret the parameter names to be ``height``, ``mid``, and ``wid``, and build a model that can be used to fit data. Example 1: Fit Peak data to Gaussian, Lorentzian, and Voigt profiles ------------------------------------------------------------------------ Here, we will fit data to three similar line shapes, in order to decide which might be the better model. We will start with a Gaussian profile, as in the previous chapter, but use the built-in :class:`GaussianModel` instead of writing one ourselves. This is a slightly different version from the one in previous example in that the parameter names are different, and have built-in default values. We will simply use: .. jupyter-execute:: :hide-output: from numpy import loadtxt from lmfit.models import GaussianModel data = loadtxt('test_peak.dat') x = data[:, 0] y = data[:, 1] mod = GaussianModel() pars = mod.guess(y, x=x) out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.25)) which prints out the results: .. jupyter-execute:: :hide-code: print(out.fit_report(min_correl=0.25)) We see a few interesting differences from the results of the previous chapter. First, the parameter names are longer. Second, there are ``fwhm`` and ``height`` parameters, to give the full-width-at-half-maximum and maximum peak height, respectively. And third, the automated initial guesses are pretty good. A plot of the fit: .. jupyter-execute:: :hide-code: import matplotlib as mpl mpl.rcParams['figure.dpi'] = 150 %matplotlib inline %config InlineBackend.figure_format = 'svg' import matplotlib.pyplot as plt plt.plot(x, y, 'b-') plt.plot(x, out.best_fit, 'r-', label='Gaussian Model') plt.legend(loc='best') plt.show() shows a decent match to the data -- the fit worked with no explicit setting of initial parameter values. Looking more closely, the fit is not perfect, especially in the tails of the peak, suggesting that a different peak shape, with longer tails, should be used. Perhaps a Lorentzian would be better? To do this, we simply replace ``GaussianModel`` with ``LorentzianModel`` to get a :class:`LorentzianModel`: .. jupyter-execute:: from lmfit.models import LorentzianModel mod = LorentzianModel() with the rest of the script as above. Perhaps predictably, the first thing we try gives results that are worse by comaparing the fit statistics: .. jupyter-execute:: :hide-code: pars = mod.guess(y, x=x) out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.25)) and also by visual inspection of the fit to the data (figure below). .. jupyter-execute:: :hide-code: plt.plot(x, y, 'b-') plt.plot(x, out.best_fit, 'r-', label='Lorentzian Model') plt.legend(loc='best') plt.show() The tails are now too big, and the value for :math:`\chi^2` almost doubled. A Voigt model does a better job. Using :class:`VoigtModel`, this is as simple as using: .. jupyter-execute:: from lmfit.models import VoigtModel mod = VoigtModel() with all the rest of the script as above. This gives: .. jupyter-execute:: :hide-code: pars = mod.guess(y, x=x) out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.25)) which has a much better value for :math:`\chi^2` and the other goodness-of-fit measures, and an obviously better match to the data as seen in the figure below (left). .. jupyter-execute:: :hide-code: fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) axes[0].plot(x, y, 'b-') axes[0].plot(x, out.best_fit, 'r-', label='Voigt Model\ngamma constrained') axes[0].legend(loc='best') # free gamma parameter pars['gamma'].set(value=0.7, vary=True, expr='') out_gamma = mod.fit(y, pars, x=x) axes[1].plot(x, y, 'b-') axes[1].plot(x, out_gamma.best_fit, 'r-', label='Voigt Model\ngamma unconstrained') axes[1].legend(loc='best') plt.show() Fit to peak with Voigt model (left) and Voigt model with ``gamma`` varying independently of ``sigma`` (right). Can we do better? The Voigt function has a :math:`\gamma` parameter (``gamma``) that can be distinct from ``sigma``. The default behavior used above constrains ``gamma`` to have exactly the same value as ``sigma``. If we allow these to vary separately, does the fit improve? To do this, we have to change the ``gamma`` parameter from a constrained expression and give it a starting value using something like:: mod = VoigtModel() pars = mod.guess(y, x=x) pars['gamma'].set(value=0.7, vary=True, expr='') which gives: .. jupyter-execute:: :hide-code: print(out_gamma.fit_report(min_correl=0.25)) and the fit shown on the right above. Comparing the two fits with the Voigt function, we see that :math:`\chi^2` is definitely improved with a separately varying ``gamma`` parameter. In addition, the two values for ``gamma`` and ``sigma`` differ significantly -- well outside the estimated uncertainties. More compelling, reduced :math:`\chi^2` is improved even though a fourth variable has been added to the fit. In the simplest statistical sense, this suggests that ``gamma`` is a significant variable in the model. In addition, we can use both the Akaike or Bayesian Information Criteria (see :ref:`information_criteria_label`) to assess how likely the model with variable ``gamma`` is to explain the data than the model with ``gamma`` fixed to the value of ``sigma``. According to theory, :math:`\exp(-(\rm{AIC1}-\rm{AIC0})/2)` gives the probability that a model with AIC1 is more likely than a model with AIC0. For the two models here, with AIC values of -1436 and -1324 (Note: if we had more carefully set the value for ``weights`` based on the noise in the data, these values might be positive, but there difference would be roughly the same), this says that the model with ``gamma`` fixed to ``sigma`` has a probability less than 5.e-25 of being the better model. Example 2: Fit data to a Composite Model with pre-defined models ------------------------------------------------------------------ Here, we repeat the point made at the end of the last chapter that instances of :class:`~lmfit.model.Model` class can be added together to make a *composite model*. By using the large number of built-in models available, it is therefore very simple to build models that contain multiple peaks and various backgrounds. An example of a simple fit to a noisy step function plus a constant: .. jupyter-execute:: ../examples/doc_builtinmodels_stepmodel.py :hide-output: After constructing step-like data, we first create a :class:`StepModel` telling it to use the ``erf`` form (see details above), and a :class:`ConstantModel`. We set initial values, in one case using the data and :meth:`guess` method for the initial step function paramaters, and :meth:`make_params` arguments for the linear component. After making a composite model, we run :meth:`fit` and report the results, which gives: .. jupyter-execute:: :hide-code: print(out.fit_report()) with a plot of .. jupyter-execute:: :hide-code: plt.plot(x, y, 'b') plt.plot(x, out.init_fit, 'k--', label='initial fit') plt.plot(x, out.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() Example 3: Fitting Multiple Peaks -- and using Prefixes ------------------------------------------------------------------ .. _NIST StRD: https://itl.nist.gov/div898/strd/nls/nls_main.shtml As shown above, many of the models have similar parameter names. For composite models, this could lead to a problem of having parameters for different parts of the model having the same name. To overcome this, each :class:`~lmfit.model.Model` can have a ``prefix`` attribute (normally set to a blank string) that will be put at the beginning of each parameter name. To illustrate, we fit one of the classic datasets from the `NIST StRD`_ suite involving a decaying exponential and two gaussians. .. jupyter-execute:: ../examples/doc_builtinmodels_nistgauss.py :hide-output: where we give a separate prefix to each model (they all have an ``amplitude`` parameter). The ``prefix`` values are attached transparently to the models. Note that the calls to :meth:`make_param` used the bare name, without the prefix. We could have used the prefixes, but because we used the individual model ``gauss1`` and ``gauss2``, there was no need. Note also in the example here that we explicitly set bounds on many of the parameter values. The fit results printed out are: .. jupyter-execute:: :hide-code: print(out.fit_report()) We get a very good fit to this problem (described at the NIST site as of average difficulty, but the tests there are generally deliberately challenging) by applying reasonable initial guesses and putting modest but explicit bounds on the parameter values. The overall fit is shown on the left, with its individual components displayed on the right: .. jupyter-execute:: :hide-code: fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) axes[0].plot(x, y, 'b') axes[0].plot(x, init, 'k--', label='initial fit') axes[0].plot(x, out.best_fit, 'r-', label='best fit') axes[0].legend(loc='best') comps = out.eval_components(x=x) axes[1].plot(x, y, 'b') axes[1].plot(x, comps['g1_'], 'g--', label='Gaussian component 1') axes[1].plot(x, comps['g2_'], 'm--', label='Gaussian component 2') axes[1].plot(x, comps['exp_'], 'k--', label='Exponential component') axes[1].legend(loc='best') plt.show() One final point on setting initial values. From looking at the data itself, we can see the two Gaussian peaks are reasonably well separated but do overlap. Furthermore, we can tell that the initial guess for the decaying exponential component was poorly estimated because we used the full data range. We can simplify the initial parameter values by using this, and by defining an :func:`index_of` function to limit the data range. That is, with:: def index_of(arrval, value): """Return index of array *at or below* value.""" if value < min(arrval): return 0 return max(np.where(arrval <= value)[0]) ix1 = index_of(x, 75) ix2 = index_of(x, 135) ix3 = index_of(x, 175) exp_mod.guess(y[:ix1], x=x[:ix1]) gauss1.guess(y[ix1:ix2], x=x[ix1:ix2]) gauss2.guess(y[ix2:ix3], x=x[ix2:ix3]) .. jupyter-execute:: ../examples/doc_builtinmodels_nistgauss2.py :hide-code: :hide-output: we can get a better initial estimate (see below). .. jupyter-execute:: :hide-code: plt.plot(x, y, 'b') plt.plot(x, out.init_fit, 'k--', label='initial fit') plt.plot(x, out.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() The fit converges to the same answer, giving to identical values (to the precision printed out in the report), but in fewer steps, and without any bounds on parameters at all: .. jupyter-execute:: :hide-code: print(out.fit_report()) This script is in the file ``doc_builtinmodels_nistgauss2.py`` in the examples folder, and the figure above shows an improved initial estimate of the data. lmfit-py-1.0.0/doc/conf.py000066400000000000000000000136161357751001700153320ustar00rootroot00000000000000# lmfit documentation build configuration file # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from datetime import date import os import sys import lmfit # -------------------------- General configuration -------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath(os.path.join('.'))) # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # we want to swap sphinx.ext.mathjax and sphinx.ext.pngmath depending on the # type of documentation we build from extensions import extensions # shpinx.ext.napoleon settings napoleon_google_docstring = False # shpinx.ext.autodoc settings autoclass_content = 'both' # shpinx.ext.intersphinx settings intersphinx_mapping = {'py': ('https://docs.python.org/3', None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), } # shpinx.ext.extlinks settings extlinks = { 'scipydoc': ('https://docs.scipy.org/doc/scipy/reference/generated/scipy.%s.html', 'scipy.'), 'numpydoc': ('https://docs.scipy.org/doc/numpy/reference/generated/numpy.%s.html', 'numpy.'), } # sphinx.ext.imgmath settings imgmath_image_format = 'svg' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = {'.rst': 'restructuredtext'} # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'lmfit' copyright = u'{}, Matthew Newville, Till Stensitzki, and others'.format(date.today().year) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the version = release = lmfit.__version__.split('+', 1)[0] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['sphinx/theme'] html_theme = 'lmfitdoc' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python' # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'Minimization and Curve-Fitting for Python' # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # Custom sidebar templates, maps document names to template names. html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']} html_domain_indices = False html_use_index = True #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'lmfitdoc' # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'lmfit.tex', 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python', 'Matthew Newville, Till Stensitzki, and others', 'manual'), ] # configuration for jupyter_sphinx package_path = os.path.abspath('../..') os.environ['PYTHONPATH'] = ':'.join((package_path, os.environ.get('PYTHONPATH', ''))) image_converter_args=["-density", "300"] # Sphinx-gallery configuration sphinx_gallery_conf = { 'examples_dirs': '../examples', 'gallery_dirs': 'examples', 'filename_pattern': '/documentation|/example_', 'ignore_pattern': '/doc_', 'expected_failing_examples': ['../examples/documentation/model_loadmodel.py'] } lmfit-py-1.0.0/doc/confidence.rst000066400000000000000000000215001357751001700166510ustar00rootroot00000000000000.. _confidence_chapter: Calculation of confidence intervals ==================================== .. module:: lmfit.confidence The lmfit :mod:`confidence` module allows you to explicitly calculate confidence intervals for variable parameters. For most models, it is not necessary since the estimation of the standard error from the estimated covariance matrix is normally quite good. But for some models, the sum of two exponentials for example, the approximation begins to fail. For this case, lmfit has the function :func:`conf_interval` to calculate confidence intervals directly. This is substantially slower than using the errors estimated from the covariance matrix, but the results are more robust. Method used for calculating confidence intervals ------------------------------------------------- The F-test is used to compare our null model, which is the best fit we have found, with an alternate model, where one of the parameters is fixed to a specific value. The value is changed until the difference between :math:`\chi^2_0` and :math:`\chi^2_{f}` can't be explained by the loss of a degree of freedom within a certain confidence. .. math:: F(P_{fix},N-P) = \left(\frac{\chi^2_f}{\chi^2_{0}}-1\right)\frac{N-P}{P_{fix}} ``N`` is the number of data points and ``P`` the number of parameters of the null model. :math:`P_{fix}` is the number of fixed parameters (or to be more clear, the difference of number of parameters between our null model and the alternate model). Adding a log-likelihood method is under consideration. A basic example --------------- First we create an example problem: .. jupyter-execute:: import numpy as np import lmfit x = np.linspace(0.3, 10, 100) np.random.seed(0) y = 1/(0.1*x) + 2 + 0.1*np.random.randn(x.size) pars = lmfit.Parameters() pars.add_many(('a', 0.1), ('b', 1)) def residual(p): return 1/(p['a']*x) + p['b'] - y before we can generate the confidence intervals, we have to run a fit, so that the automated estimate of the standard errors can be used as a starting point: .. jupyter-execute:: mini = lmfit.Minimizer(residual, pars) result = mini.minimize() print(lmfit.fit_report(result.params)) Now it is just a simple function call to calculate the confidence intervals: .. jupyter-execute:: ci = lmfit.conf_interval(mini, result) lmfit.printfuncs.report_ci(ci) This shows the best-fit values for the parameters in the ``_BEST_`` column, and parameter values that are at the varying confidence levels given by steps in :math:`\sigma`. As we can see, the estimated error is almost the same, and the uncertainties are well behaved: Going from 1-:math:`\sigma` (68% confidence) to 3-:math:`\sigma` (99.7% confidence) uncertainties is fairly linear. It can also be seen that the errors are fairy symmetric around the best fit value. For this problem, it is not necessary to calculate confidence intervals, and the estimates of the uncertainties from the covariance matrix are sufficient. Working without standard error estimates ---------------------------------------- Sometimes the estimation of the standard errors from the covariance matrix fails, especially if values are near given bounds. Hence, to find the confidence intervals in these cases, it is necessary to set the errors by hand. Note that the standard error is only used to find an upper limit for each value, hence the exact value is not important. To set the step-size to 10% of the initial value we loop through all parameters and set it manually: .. jupyter-execute:: for p in result.params: result.params[p].stderr = abs(result.params[p].value * 0.1) .. _label-confidence-advanced: An advanced example for evaluating confidence intervals --------------------------------------------------------- Now we look at a problem where calculating the error from approximated covariance can lead to misleading result -- the same double exponential problem shown in :ref:`label-emcee`. In fact such a problem is particularly hard for the Levenberg-Marquardt method, so we first estimate the results using the slower but robust Nelder-Mead method. We can then compare the uncertainties computed (if the ``numdifftools`` package is installed) with those estimated using Levenberg-Marquardt around the previously found solution. We can also compare to the results of using ``emcee``. .. jupyter-execute:: :hide-code: import warnings warnings.filterwarnings(action="ignore") import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['figure.dpi'] = 150 %matplotlib inline %config InlineBackend.figure_format = 'svg' .. jupyter-execute:: ../examples/doc_confidence_advanced.py :hide-output: which will report: .. jupyter-execute:: :hide-code: lmfit.report_fit(out2.params, min_correl=0.5) print('') lmfit.printfuncs.report_ci(ci) Again we called :func:`conf_interval`, this time with tracing and only for 1- and 2-:math:`\sigma`. Comparing these two different estimates, we see that the estimate for ``a1`` is reasonably well approximated from the covariance matrix, but the estimates for ``a2`` and especially for ``t1``, and ``t2`` are very asymmetric and that going from 1 :math:`\sigma` (68% confidence) to 2 :math:`\sigma` (95% confidence) is not very predictable. Plots of the confidence region are shown in the figures below for ``a1`` and ``t2`` (left), and ``a2`` and ``t2`` (right): .. jupyter-execute:: :hide-code: fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a1', 't2', 30, 30) ctp = axes[0].contourf(cx, cy, grid, np.linspace(0, 1, 11)) fig.colorbar(ctp, ax=axes[0]) axes[0].set_xlabel('a1') axes[0].set_ylabel('t2') cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a2', 't2', 30, 30) ctp = axes[1].contourf(cx, cy, grid, np.linspace(0, 1, 11)) fig.colorbar(ctp, ax=axes[1]) axes[1].set_xlabel('a2') axes[1].set_ylabel('t2') plt.show() Neither of these plots is very much like an ellipse, which is implicitly assumed by the approach using the covariance matrix. The plots actually look quite a bit like those found with MCMC and shown in the "corner plot" in :ref:`label-emcee`. In fact, comparing the confidence interval results here with the results for the 1- and 2-:math:`\sigma` error estimated with ``emcee``, we can see that the agreement is pretty good and that the asymmetry in the parameter distributions are reflected well in the asymmetry of the uncertainties The trace returned as the optional second argument from :func:`conf_interval` contains a dictionary for each variable parameter. The values are dictionaries with arrays of values for each variable, and an array of corresponding probabilities for the corresponding cumulative variables. This can be used to show the dependence between two parameters: .. jupyter-execute:: :hide-output: fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'], trace['a1']['prob'] cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'], trace['t2']['prob'] axes[0].scatter(cx1, cy1, c=prob, s=30) axes[0].set_xlabel('a1') axes[0].set_ylabel('t2') axes[1].scatter(cx2, cy2, c=prob2, s=30) axes[1].set_xlabel('t2') axes[1].set_ylabel('a1') plt.show() which shows the trace of values: .. jupyter-execute:: :hide-code: fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'], trace['a1']['prob'] cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'], trace['t2']['prob'] axes[0].scatter(cx1, cy1, c=prob, s=30) axes[0].set_xlabel('a1') axes[0].set_ylabel('t2') axes[1].scatter(cx2, cy2, c=prob2, s=30) axes[1].set_xlabel('t2') axes[1].set_ylabel('a1') plt.show() As an alternative/complement to the confidence intervals, the :meth:`Minimizer.emcee` method uses Markov Chain Monte Carlo to sample the posterior probability distribution. These distributions demonstrate the range of solutions that the data supports and we refer to :ref:`label-emcee` where this methodology was used on the same problem. Credible intervals (the Bayesian equivalent of the frequentist confidence interval) can be obtained with this method. MCMC can be used for model selection, to determine outliers, to marginalise over nuisance parameters, etcetera. For example, you may have fractionally underestimated the uncertainties on a dataset. MCMC can be used to estimate the true level of uncertainty on each datapoint. A tutorial on the possibilities offered by MCMC can be found at [1]_. .. [1] https://jakevdp.github.io/blog/2014/03/11/frequentism-and-bayesianism-a-practical-intro/ Confidence Interval Functions ---------------------------------- .. autofunction:: lmfit.conf_interval .. autofunction:: lmfit.conf_interval2d .. autofunction:: lmfit.ci_report lmfit-py-1.0.0/doc/constraints.rst000066400000000000000000000156361357751001700171400ustar00rootroot00000000000000.. _constraints_chapter: ================================= Using Mathematical Constraints ================================= .. _asteval: https://newville.github.io/asteval/ Being able to fix variables to a constant value or place upper and lower bounds on their values can greatly simplify modeling real data. These capabilities are key to lmfit's Parameters. In addition, it is sometimes highly desirable to place mathematical constraints on parameter values. For example, one might want to require that two Gaussian peaks have the same width, or have amplitudes that are constrained to add to some value. Of course, one could rewrite the objective or model function to place such requirements, but this is somewhat error prone, and limits the flexibility so that exploring constraints becomes laborious. To simplify the setting of constraints, Parameters can be assigned a mathematical expression of other Parameters, builtin constants, and builtin mathematical functions that will be used to determine its value. The expressions used for constraints are evaluated using the `asteval`_ module, which uses Python syntax, and evaluates the constraint expressions in a safe and isolated namespace. This approach to mathematical constraints allows one to not have to write a separate model function for two Gaussians where the two ``sigma`` values are forced to be equal, or where amplitudes are related. Instead, one can write a more general two Gaussian model (perhaps using :class:`GaussianModel`) and impose such constraints on the Parameters for a particular fit. Overview =============== Just as one can place bounds on a Parameter, or keep it fixed during the fit, so too can one place mathematical constraints on parameters. The way this is done with lmfit is to write a Parameter as a mathematical expression of the other parameters and a set of pre-defined operators and functions. The constraint expressions are simple Python statements, allowing one to place constraints like:: pars = Parameters() pars.add('frac_curve1', value=0.5, min=0, max=1) pars.add('frac_curve2', expr='1-frac_curve1') as the value of the ``frac_curve1`` parameter is updated at each step in the fit, the value of ``frac_curve2`` will be updated so that the two values are constrained to add to 1.0. Of course, such a constraint could be placed in the fitting function, but the use of such constraints allows the end-user to modify the model of a more general-purpose fitting function. Nearly any valid mathematical expression can be used, and a variety of built-in functions are available for flexible modeling. Supported Operators, Functions, and Constants ================================================= The mathematical expressions used to define constrained Parameters need to be valid python expressions. As you'd expect, the operators '+', '-', '*', '/', '**', are supported. In fact, a much more complete set can be used, including Python's bit- and logical operators:: +, -, *, /, **, &, |, ^, <<, >>, %, and, or, ==, >, >=, <, <=, !=, ~, not, is, is not, in, not in The values for ``e`` (2.7182818...) and ``pi`` (3.1415926...) are available, as are several supported mathematical and trigonometric function:: abs, acos, acosh, asin, asinh, atan, atan2, atanh, ceil, copysign, cos, cosh, degrees, exp, fabs, factorial, floor, fmod, frexp, fsum, hypot, isinf, isnan, ldexp, log, log10, log1p, max, min, modf, pow, radians, sin, sinh, sqrt, tan, tanh, trunc In addition, all Parameter names will be available in the mathematical expressions. Thus, with parameters for a few peak-like functions:: pars = Parameters() pars.add('amp_1', value=0.5, min=0, max=1) pars.add('cen_1', value=2.2) pars.add('wid_1', value=0.2) The following expression are all valid:: pars.add('amp_2', expr='(2.0 - amp_1**2)') pars.add('cen_2', expr='cen_1 * wid_2 / max(wid_1, 0.001)') pars.add('wid_2', expr='sqrt(pi)*wid_1') In fact, almost any valid Python expression is allowed. A notable example is that Python's 1-line *if expression* is supported:: pars.add('bounded', expr='param_a if test_val/2. > 100 else param_b') which is equivalent to the more familiar:: if test_val/2. > 100: bounded = param_a else: bounded = param_b Using Inequality Constraints ============================== A rather common question about how to set up constraints that use an inequality, say, :math:`x + y \le 10`. This can be done with algebraic constraints by recasting the problem, as :math:`x + y = \delta` and :math:`\delta \le 10`. That is, first, allow :math:`x` to be held by the freely varying parameter ``x``. Next, define a parameter ``delta`` to be variable with a maximum value of 10, and define parameter ``y`` as ``delta - x``:: pars = Parameters() pars.add('x', value=5, vary=True) pars.add('delta', value=5, max=10, vary=True) pars.add('y', expr='delta-x') The essential point is that an inequality still implies that a variable (here, ``delta``) is needed to describe the constraint. The secondary point is that upper and lower bounds can be used as part of the inequality to make the definitions more convenient. Advanced usage of Expressions in lmfit ============================================= The expression used in a constraint is converted to a Python `Abstract Syntax Tree `_, which is an intermediate version of the expression -- a syntax-checked, partially compiled expression. Among other things, this means that Python's own parser is used to parse and convert the expression into something that can easily be evaluated within Python. It also means that the symbols in the expressions can point to any Python object. In fact, the use of Python's AST allows a nearly full version of Python to be supported, without using Python's built-in :meth:`eval` function. The `asteval`_ module actually supports most Python syntax, including for- and while-loops, conditional expressions, and user-defined functions. There are several unsupported Python constructs, most notably the class statement, so that new classes cannot be created, and the import statement, which helps make the `asteval`_ module safe from malicious use. One important feature of the `asteval`_ module is that you can add domain-specific functions into the it, for later use in constraint expressions. To do this, you would use the :attr:`asteval` attribute of the :class:`Minimizer` class, which contains a complete AST interpreter. The `asteval`_ interpreter uses a flat namespace, implemented as a single dictionary. That means you can preload any Python symbol into the namespace for the constraints:: def mylorentzian(x, amp, cen, wid): "lorentzian function: wid = half-width at half-max" return (amp / (1 + ((x-cen) / wid)**2)) fitter = Minimizer() fitter.asteval.symtable['lorentzian'] = mylorentzian and this :meth:`lorentzian` function can now be used in constraint expressions. lmfit-py-1.0.0/doc/contents.rst000066400000000000000000000003501357751001700164110ustar00rootroot00000000000000:orphan: Contents ================= .. toctree:: :maxdepth: 3 intro installation whatsnew support faq parameters fitting model builtin_models confidence bounds constraints examples/index lmfit-py-1.0.0/doc/doc_examples_to_gallery.py000077500000000000000000000045161357751001700212730ustar00rootroot00000000000000#! /usr/bin/env python """ Process the examples in the documentation for inclusion in the Gallery: - create a "documentation" directory within "examples" - add a README.txt file - copy the examples from the documentation, bu remove the "doc_" from the filename - add the required docstring to the files for proper rendering - copy the data files """ import os import time basedir = os.getcwd() examples_dir = os.path.abspath(os.path.join(basedir, '..', 'examples')) files = [fn for fn in os.listdir(examples_dir) if fn.startswith('doc_')] examples_documentation_dir = os.path.join(examples_dir, 'documentation') os.makedirs(examples_documentation_dir, exist_ok=True) scripts_to_run = [] with open(os.path.join(examples_documentation_dir, 'README.txt'), 'w') as out: out.write("Examples from the documentation\n") out.write("===============================\n\n") out.write("Below are all the examples that are part of the lmfit documentation.") for fn in files: inp_path = os.path.join(examples_dir, fn) with open(inp_path, 'r') as inp: script_text = inp.read() gallery_file = os.path.join(examples_documentation_dir, fn[4:]) with open(gallery_file, 'w') as out: msg = "" if 'model_loadmodel.py' in fn: msg = ('.. note:: This example *does* actually work, but running from within ' ' sphinx-gallery fails to find symbols saved in the save file.') out.write('"""\n{}\n{}\n\n{}\n"""\n'.format(fn, "="*len(fn), msg)) out.write('##\nimport warnings\nwarnings.filterwarnings("ignore")\n##\n') out.write(script_text) # make sure the saved Models and ModelResult are available if 'save' in fn: scripts_to_run.append(fn[4:]) time.sleep(1.0) os.system('cp {}/*.dat {}'.format(examples_dir, examples_documentation_dir)) os.system('cp {}/*.csv {}'.format(examples_dir, examples_documentation_dir)) os.system('cp {}/*.sav {}'.format(examples_dir, examples_documentation_dir)) # os.chdir(examples_documentation_dir) for script in scripts_to_run: os.system('python {}'.format(script)) os.chdir(basedir) time.sleep(1.0) # data files for the other Gallery examples os.system('cp {}/*.dat .'.format(examples_documentation_dir)) os.system('cp {}/*.csv .'.format(examples_documentation_dir)) os.system('cp {}/*.sav .'.format(examples_documentation_dir)) lmfit-py-1.0.0/doc/extensions.py000066400000000000000000000006201357751001700165730ustar00rootroot00000000000000# sphinx extensions for mathjax extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'IPython.sphinxext.ipython_console_highlighting', 'jupyter_sphinx.execute', 'sphinx_gallery.gen_gallery'] lmfit-py-1.0.0/doc/faq.rst000066400000000000000000000144241357751001700153320ustar00rootroot00000000000000.. _faq_chapter: ==================================== Frequently Asked Questions ==================================== A list of common questions. What's the best way to ask for help or submit a bug report? ================================================================ See :ref:`support_chapter`. Why did my script break when upgrading from lmfit 0.8.3 to 0.9.0? ==================================================================== See :ref:`whatsnew_090_label` I get import errors from IPython ============================================================== If you see something like:: from IPython.html.widgets import Dropdown ImportError: No module named 'widgets' then you need to install the ``ipywidgets`` package, try: ``pip install ipywidgets``. How can I fit multi-dimensional data? ======================================== The fitting routines accept data arrays that are one-dimensional and double precision. So you need to convert the data and model (or the value returned by the objective function) to be one-dimensional. A simple way to do this is to use :numpydoc:`ndarray.flatten`, for example:: def residual(params, x, data=None): .... resid = calculate_multidim_residual() return resid.flatten() How can I fit multiple data sets? ======================================== As above, the fitting routines accept data arrays that are one-dimensional and double precision. So you need to convert the sets of data and models (or the value returned by the objective function) to be one-dimensional. A simple way to do this is to use :numpydoc:`concatenate`. As an example, here is a residual function to simultaneously fit two lines to two different arrays. As a bonus, the two lines share the 'offset' parameter:: import numpy as np def fit_function(params, x=None, dat1=None, dat2=None): model1 = params['offset'] + x * params['slope1'] model2 = params['offset'] + x * params['slope2'] resid1 = dat1 - model1 resid2 = dat2 - model2 return np.concatenate((resid1, resid2)) How can I fit complex data? =================================== As with working with multi-dimensional data, you need to convert your data and model (or the value returned by the objective function) to be double precision, floating point numbers. The simplest approach is to use :numpydoc:`ndarray.view`, perhaps like:: import numpy as np def residual(params, x, data=None): .... resid = calculate_complex_residual() return resid.view(np.float) Alternately, you can use the :class:`lmfit.Model` class to wrap a fit function that returns a complex vector. It will automatically apply the above prescription when calculating the residual. The benefit to this method is that you also get access to the plot routines from the ModelResult class, which are also complex-aware. Can I constrain values to have integer values? =============================================== Basically, no. None of the minimizers in lmfit support integer programming. They all (I think) assume that they can make a very small change to a floating point value for a parameters value and see a change in the value to be minimized. How should I cite LMFIT? ================================== See https://dx.doi.org/10.5281/zenodo.11813 I get errors from NaN in my fit. What can I do? ====================================================== The solvers used by lmfit use NaN (see https://en.wikipedia.org/wiki/NaN) values as signals that the calculation cannot continue. If any value in the residual array (typically `(data-model)*weight`) is NaN, then calculations of chi-square or comparisons with other residual arrays to try find a better fit will also give NaN and fail. There is no sensible way for lmfit or any of the optimization routines to know how to handle such NaN values. They indicate that numerical calculations are not sensible and must stop. This means that if your objective function (if using ``minimize``) or model function (if using ``Model``) generates a NaN, the fit will stop immediately. If your objective or model function generates a NaN, you really must handle that. `nan_policy` ~~~~~~~~~~~~~~~~~~ If you are using :class:`lmfit.Model` and the NaN values come from your data array and are meant to indicate missing values, or if you using :func:`lmfit.minimize` with the same basic intention, then it might be possible to get a successful fit in spite of the NaN values. To do this, you can add a ``nan_policy='omit'`` argument to :func:`lmfit.minimize`, or when creating a :class:`lmfit.Model`, or when running :meth:`lmfit.Model.fit`. In order for this to be effective, the number of NaN values cannot ever change during the fit. If the NaN values come from the data and not the calculated model, that should be the case. Common sources of NaN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are seeing erros due to NaN values, you will need to figure out where they are coming from and eliminate them. It is sometimes difficult to tell what causes NaN values. Keep in mind that all values should be assumed to be either scalar values or numpy arrays of double precision real numbers when fitting. Some of the most likely causes of NaNs are: * taking ``sqrt(x)`` or ``log(x)`` where ``x`` is negative. * doing ``x**y`` where ``x`` is negative. Since ``y`` is real, there will be a fractional component, and a negative number to a fractional exponent is not a real number. * doing ``x/y`` where both ``x`` and ``y`` are 0. If you use these very common constructs in your objective or model function, you should take some caution for what values you are passing these functions and operators. Many special functions have similar limitations and should also be viewed with some suspicion if NaNs are being generated. A related problem is the generation of Inf (Infinity in floating point), which generally comes from ``exp(x)`` where ``x`` has values greater than 700 or so, so that the resulting value is greater than 1.e308. Inf is only slightly better than NaN. It will completely ruin the ability to do the fit. However, unlike NaN, it is also usually clear how to handle Inf, as you probably won't ever have values greater than 1.e308 and can therefore (usually) safely clip the argument passed to ``exp()`` to be smaller than about 700. lmfit-py-1.0.0/doc/fitting.rst000066400000000000000000001035241357751001700162270ustar00rootroot00000000000000.. _minimize_chapter: .. module:: lmfit.minimizer ======================================= Performing Fits and Analyzing Outputs ======================================= As shown in the previous chapter, a simple fit can be performed with the :func:`minimize` function. For more sophisticated modeling, the :class:`Minimizer` class can be used to gain a bit more control, especially when using complicated constraints or comparing results from related fits. The :func:`minimize` function ============================= The :func:`minimize` function is a wrapper around :class:`Minimizer` for running an optimization problem. It takes an objective function (the function that calculates the array to be minimized), a :class:`Parameters` object, and several optional arguments. See :ref:`fit-func-label` for details on writing the objective function. .. autofunction:: minimize .. _fit-func-label: Writing a Fitting Function =============================== An important component of a fit is writing a function to be minimized -- the *objective function*. Since this function will be called by other routines, there are fairly stringent requirements for its call signature and return value. In principle, your function can be any Python callable, but it must look like this: .. function:: func(params, *args, **kws): Calculate objective residual to be minimized from parameters. :param params: Parameters. :type params: :class:`~lmfit.parameter.Parameters` :param args: Positional arguments. Must match ``args`` argument to :func:`minimize`. :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`. :return: Residual array (generally data-model) to be minimized in the least-squares sense. :rtype: numpy.ndarray. The length of this array cannot change between calls. A common use for the positional and keyword arguments would be to pass in other data needed to calculate the residual, including things as the data array, dependent variable, uncertainties in the data, and other data structures for the model calculation. The objective function should return the value to be minimized. For the Levenberg-Marquardt algorithm from :meth:`leastsq`, this returned value **must** be an array, with a length greater than or equal to the number of fitting variables in the model. For the other methods, the return value can either be a scalar or an array. If an array is returned, the sum of squares of the array will be sent to the underlying fitting method, effectively doing a least-squares optimization of the return values. Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable to unpack these to get numerical values at the top of the function. A simple way to do this is with :meth:`Parameters.valuesdict`, as shown below:: from numpy import exp, sign, sin, pi def residual(pars, x, data=None, eps=None): # unpack parameters: extract .value attribute for each parameter parvals = pars.valuesdict() period = parvals['period'] shift = parvals['shift'] decay = parvals['decay'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi if abs(period) < 1.e-10: period = sign(period)*1.e-10 model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay) if data is None: return model if eps is None: return model - data return (model-data) / eps In this example, ``x`` is a positional (required) argument, while the ``data`` array is actually optional (so that the function returns the model calculation if the data is neglected). Also note that the model calculation will divide ``x`` by the value of the ``period`` Parameter. It might be wise to ensure this parameter cannot be 0. It would be possible to use bounds on the :class:`Parameter` to do this:: params['period'] = Parameter(value=2, min=1.e-10) but putting this directly in the function with:: if abs(period) < 1.e-10: period = sign(period)*1.e-10 is also a reasonable approach. Similarly, one could place bounds on the ``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``. .. _fit-methods-label: Choosing Different Fitting Methods =========================================== By default, the `Levenberg-Marquardt `_ algorithm is used for fitting. While often criticized, including the fact it finds a *local* minima, this approach has some distinct advantages. These include being fast, and well-behaved for most curve-fitting needs, and making it easy to estimate uncertainties for and correlations between pairs of fit variables, as discussed in :ref:`fit-results-label`. Alternative algorithms can also be used by providing the ``method`` keyword to the :func:`minimize` function or :meth:`Minimizer.minimize` class as listed in the :ref:`Table of Supported Fitting Methods `. If you have the ``numdifftools`` package installed, lmfit will try to estimate the covariance matrix and determine parameter uncertainties and correlations if ``calc_covar`` is True (default). .. _fit-methods-table: Table of Supported Fitting Methods: +--------------------------+------------------------------------------------------------------+ | Fitting Method | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` | +==========================+==================================================================+ | Levenberg-Marquardt | ``leastsq`` or ``least_squares`` | +--------------------------+------------------------------------------------------------------+ | Nelder-Mead | ``nelder`` | +--------------------------+------------------------------------------------------------------+ | L-BFGS-B | ``lbfgsb`` | +--------------------------+------------------------------------------------------------------+ | Powell | ``powell`` | +--------------------------+------------------------------------------------------------------+ | Conjugate Gradient | ``cg`` | +--------------------------+------------------------------------------------------------------+ | Newton-CG | ``newton`` | +--------------------------+------------------------------------------------------------------+ | COBYLA | ``cobyla`` | +--------------------------+------------------------------------------------------------------+ | BFGS | ``bfgsb`` | +--------------------------+------------------------------------------------------------------+ | Truncated Newton | ``tnc`` | +--------------------------+------------------------------------------------------------------+ | Newton CG trust-region | ``trust-ncg`` | +--------------------------+------------------------------------------------------------------+ | Exact trust-region | ``trust-exact`` | +--------------------------+------------------------------------------------------------------+ | Newton GLTR trust-region | ``trust-krylov`` | +--------------------------+------------------------------------------------------------------+ | Constrained trust-region | ``trust-constr`` | +--------------------------+------------------------------------------------------------------+ | Dogleg | ``dogleg`` | +--------------------------+------------------------------------------------------------------+ | Sequential Linear | ``slsqp`` | | Squares Programming | | +--------------------------+------------------------------------------------------------------+ | Differential | ``differential_evolution`` | | Evolution | | +--------------------------+------------------------------------------------------------------+ | Brute force method | ``brute`` | +--------------------------+------------------------------------------------------------------+ | Basinhopping | ``basinhopping`` | +--------------------------+------------------------------------------------------------------+ | Adaptive Memory | ``ampgo`` | | Programming for Global | | | Optimization | | +--------------------------+------------------------------------------------------------------+ | Simplicial Homology | ``shgo`` | | Global Ooptimization | | +--------------------------+------------------------------------------------------------------+ | Dual Annealing | ``dual_annealing`` | +--------------------------+------------------------------------------------------------------+ | Maximum likelihood via | ``emcee`` | | Monte-Carlo Markov Chain | | +--------------------------+------------------------------------------------------------------+ .. note:: The objective function for the Levenberg-Marquardt method **must** return an array, with more elements than variables. All other methods can return either a scalar value or an array. The Monte-Carlo Markov Chain or ``emcee`` method has two different operating methods when the objective function returns a scalar value. See the documentation for ``emcee``. .. warning:: Much of this documentation assumes that the Levenberg-Marquardt (``leastsq``) method is used. Many of the fit statistics and estimates for uncertainties in parameters discussed in :ref:`fit-results-label` are done only unconditionally for this (and the ``least_squares``) method. Lmfit versions newer than 0.9.11 provide the capability to use ``numdifftools`` to estimate the covariance matrix and calculate parameter uncertainties and correlations for other methods as well. .. _fit-results-label: :class:`MinimizerResult` -- the optimization result ======================================================== .. versionadded:: 0.9.0 An optimization with :func:`minimize` or :meth:`Minimizer.minimize` will return a :class:`MinimizerResult` object. This is an otherwise plain container object (that is, with no methods of its own) that simply holds the results of the minimization. These results will include several pieces of informational data such as status and error messages, fit statistics, and the updated parameters themselves. Importantly, the parameters passed in to :meth:`Minimizer.minimize` will be not be changed. To find the best-fit values, uncertainties and so on for each parameter, one must use the :attr:`MinimizerResult.params` attribute. For example, to print the fitted values, bounds and other parameter attributes in a well-formatted text tables you can execute:: result.params.pretty_print() with ``results`` being a ``MinimizerResult`` object. Note that the method :meth:`~lmfit.parameter.Parameters.pretty_print` accepts several arguments for customizing the output (e.g., column width, numeric format, etcetera). .. autoclass:: MinimizerResult Goodness-of-Fit Statistics ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. _goodfit-table: Table of Fit Results: These values, including the standard Goodness-of-Fit statistics, are all attributes of the :class:`MinimizerResult` object returned by :func:`minimize` or :meth:`Minimizer.minimize`. +----------------------+----------------------------------------------------------------------------+ | Attribute Name | Description / Formula | +======================+============================================================================+ | nfev | number of function evaluations | +----------------------+----------------------------------------------------------------------------+ | nvarys | number of variables in fit :math:`N_{\rm varys}` | +----------------------+----------------------------------------------------------------------------+ | ndata | number of data points: :math:`N` | +----------------------+----------------------------------------------------------------------------+ | nfree | degrees of freedom in fit: :math:`N - N_{\rm varys}` | +----------------------+----------------------------------------------------------------------------+ | residual | residual array, returned by the objective function: :math:`\{\rm Resid_i\}`| +----------------------+----------------------------------------------------------------------------+ | chisqr | chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2` | +----------------------+----------------------------------------------------------------------------+ | redchi | reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}` | +----------------------+----------------------------------------------------------------------------+ | aic | Akaike Information Criterion statistic (see below) | +----------------------+----------------------------------------------------------------------------+ | bic | Bayesian Information Criterion statistic (see below) | +----------------------+----------------------------------------------------------------------------+ | var_names | ordered list of variable parameter names used for init_vals and covar | +----------------------+----------------------------------------------------------------------------+ | covar | covariance matrix (with rows/columns using var_names) | +----------------------+----------------------------------------------------------------------------+ | init_vals | list of initial values for variable parameters | +----------------------+----------------------------------------------------------------------------+ Note that the calculation of chi-square and reduced chi-square assume that the returned residual function is scaled properly to the uncertainties in the data. For these statistics to be meaningful, the person writing the function to be minimized **must** scale them properly. After a fit using the :meth:`leastsq` or :meth:`least_squares` method has completed successfully, standard errors for the fitted variables and correlations between pairs of fitted variables are automatically calculated from the covariance matrix. For other methods, the ``calc_covar`` parameter (default is True) in the :class:`Minimizer` class determines whether or not to use the ``numdifftools`` package to estimate the covariance matrix. The standard error (estimated :math:`1\sigma` error-bar) goes into the :attr:`stderr` attribute of the Parameter. The correlations with all other variables will be put into the :attr:`correl` attribute of the Parameter -- a dictionary with keys for all other Parameters and values of the corresponding correlation. In some cases, it may not be possible to estimate the errors and correlations. For example, if a variable actually has no practical effect on the fit, it will likely cause the covariance matrix to be singular, making standard errors impossible to estimate. Placing bounds on varied Parameters makes it more likely that errors cannot be estimated, as being near the maximum or minimum value makes the covariance matrix singular. In these cases, the :attr:`errorbars` attribute of the fit result (:class:`Minimizer` object) will be ``False``. .. _information_criteria_label: Akaike and Bayesian Information Criteria ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`MinimizerResult` includes the traditional chi-square and reduced chi-square statistics: .. math:: :nowrap: \begin{eqnarray*} \chi^2 &=& \sum_i^N r_i^2 \\ \chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys}) \end{eqnarray*} where :math:`r` is the residual array returned by the objective function (likely to be ``(data-model)/uncertainty`` for data modeling usages), :math:`N` is the number of data points (``ndata``), and :math:`N_{\rm varys}` is number of variable parameters. Also included are the `Akaike Information Criterion `_, and `Bayesian Information Criterion `_ statistics, held in the ``aic`` and ``bic`` attributes, respectively. These give slightly different measures of the relative quality for a fit, trying to balance quality of fit with the number of variable parameters used in the fit. These are calculated as: .. math:: :nowrap: \begin{eqnarray*} {\rm aic} &=& N \ln(\chi^2/N) + 2 N_{\rm varys} \\ {\rm bic} &=& N \ln(\chi^2/N) + \ln(N) N_{\rm varys} \\ \end{eqnarray*} When comparing fits with different numbers of varying parameters, one typically selects the model with lowest reduced chi-square, Akaike information criterion, and/or Bayesian information criterion. Generally, the Bayesian information criterion is considered the most conservative of these statistics. Uncertainties in Variable Parameters, and their Correlations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned above, when a fit is complete the uncertainties for fitted Parameters as well as the correlations between pairs of Parameters are usually calculated. This happens automatically either when using the default :meth:`leastsq` method, the :meth:`least_squares` method, or for most other fitting methods if the highly-recommended ``numdifftools`` package is available. The estimated standard error (the :math:`1\sigma` uncertainty) for each variable Parameter will be contained in the :attr:`stderr`, while the :attr:`correl` attribute for each Parameter will contain a dictionary of the correlation with each other variable Parameter. These estimates of the uncertainties are done by inverting the Hessian matrix which represents the second derivative of fit quality for each variable parameter. There are situations for which the uncertainties cannot be estimated, which generally indicates that this matrix cannot be inverted because one of the fit is not actually sensitive to one of the variables. This can happen if a Parameter is stuck at an upper or lower bound, if the variable is simply not used by the fit, or if the value for the variable is such that it has no real influence on the fit. In principle, the scale of the uncertainties in the Parameters is closely tied to the goodness-of-fit statistics chi-square and reduced chi-square (``chisqr`` and ``redchi``). The standard errors or :math:`1 \sigma` uncertainties are those that increase chi-square by 1. Since a "good fit" should have ``redchi`` of around 1, this requires that the data uncertainties (and to some extent the sampling of the N data points) is correct. Unfortunately, it is often not the case that one has high-quality estimates of the data uncertainties (getting the data is hard enough!). Because of this common situation, the uncertainties reported and held in :attr:`stderr` are not those that increase chi-square by 1, but those that increase chi-square by reduced chi-square. This is equivalent to rescaling the uncertainty in the data such that reduced chi-square would be 1. To be clear, this rescaling is done by default because if reduced chi-square is far from 1, this rescaling often makes the reported uncertainties sensible, and if reduced chi-square is near 1 it does little harm. If you have good scaling of the data uncertainty and believe the scale of the residual array is correct, this automatic rescaling can be turned off using ``scale_covar=False``. Note that the simple (and fast!) approach to estimating uncertainties and correlations by inverting the second derivative matrix assumes that the components of the residual array (if, indeed, an array is used) are distributed around 0 with a normal (Gaussian distribution), and that a map of probability distributions for pairs would be elliptical -- the size of the of ellipse gives the uncertainty itself and the eccentricity of the ellipse gives the correlation. This simple approach to assessing uncertainties ignores outliers, highly asymmetric uncertainties, or complex correlations between Parameters. In fact, it is not too hard to come up with problems where such effects are important. Our experience is that the automated results are usually the right scale and quite reasonable as initial estimates, but a more thorough exploration of the Parameter space using the tools described in :ref:`label-emcee` and :ref:`label-confidence-advanced` can give a more complete understanding of the distributions and relations between Parameters. .. _fit-reports-label: Getting and Printing Fit Reports =========================================== .. currentmodule:: lmfit.printfuncs .. autofunction:: fit_report An example using this to write out a fit report would be: .. jupyter-execute:: ../examples/doc_fitting_withreport.py :hide-output: which would give as output: .. jupyter-execute:: :hide-code: print(fit_report(out)) To be clear, you can get at all of these values from the fit result ``out`` and ``out.params``. For example, a crude printout of the best fit variables and standard errors could be done as .. jupyter-execute:: print('-------------------------------') print('Parameter Value Stderr') for name, param in out.params.items(): print('{:7s} {:11.5f} {:11.5f}'.format(name, param.value, param.stderr)) .. _fit-itercb-label: Using a Iteration Callback Function ==================================== .. currentmodule:: lmfit.minimizer An iteration callback function is a function to be called at each iteration, just after the objective function is called. The iteration callback allows user-supplied code to be run at each iteration, and can be used to abort a fit. .. function:: iter_cb(params, iter, resid, *args, **kws): User-supplied function to be run at each iteration. :param params: Parameters. :type params: :class:`~lmfit.parameter.Parameters` :param iter: Iteration number. :type iter: int :param resid: Residual array. :type resid: numpy.ndarray :param args: Positional arguments. Must match ``args`` argument to :func:`minimize` :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize` :return: Residual array (generally data-model) to be minimized in the least-squares sense. :rtype: None for normal behavior, any value like True to abort the fit. Normally, the iteration callback would have no return value or return ``None``. To abort a fit, have this function return a value that is ``True`` (including any non-zero integer). The fit will also abort if any exception is raised in the iteration callback. When a fit is aborted this way, the parameters will have the values from the last iteration. The fit statistics are not likely to be meaningful, and uncertainties will not be computed. .. _fit-minimizer-label: Using the :class:`Minimizer` class ======================================= .. currentmodule:: lmfit.minimizer For full control of the fitting process, you will want to create a :class:`Minimizer` object. .. autoclass :: Minimizer The Minimizer object has a few public methods: .. automethod:: Minimizer.minimize .. automethod:: Minimizer.leastsq .. automethod:: Minimizer.least_squares .. automethod:: Minimizer.scalar_minimize .. automethod:: Minimizer.prepare_fit .. automethod:: Minimizer.brute For more information, check the examples in ``examples/lmfit_brute_example.ipynb``. .. automethod:: Minimizer.basinhopping .. automethod:: Minimizer.ampgo .. automethod:: Minimizer.shgo .. automethod:: Minimizer.dual_annealing .. automethod:: Minimizer.emcee .. _label-emcee: :meth:`Minimizer.emcee` - calculating the posterior probability distribution of parameters ============================================================================================== :meth:`Minimizer.emcee` can be used to obtain the posterior probability distribution of parameters, given a set of experimental data. Note that this method does *not* actually perform a fit at all. Instead, it explores parameter space to determine the probability distributions for the parameters, but without an explicit goal of attempting to refine the solution. It should not be used for fitting, but it is a useful method to to more thoroughly explore the parameter space around the solution after a fit has been done and thereby get an improved understanding of the probability distribution for the parameters. It may be able to refine your estimate of the most likely values for a set of parameters, but it will not iteratively find a good solution to the minimization problem. To use this method effectively, you should first use another minimization method and then use this method to explore the parameter space around thosee best-fit values. To illustrate this, we'll use an example problem of fitting data to function of a double exponential decay, including a modest amount of Gaussian noise to the data. Note that this example is the same problem used in :ref:`label-confidence-advanced` for evaluating confidence intervals in the parameters, which is a similar goal to the one here. .. jupyter-execute:: :hide-code: import warnings warnings.filterwarnings(action="ignore") import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['figure.dpi'] = 150 %matplotlib inline %config InlineBackend.figure_format = 'svg' .. jupyter-execute:: import matplotlib.pyplot as plt import numpy as np import lmfit x = np.linspace(1, 10, 250) np.random.seed(0) y = 3.0 * np.exp(-x / 2) - 5.0 * np.exp(-(x - 0.1) / 10.) + 0.1 * np.random.randn(x.size) plt.plot(x, y, 'b') plt.show() Create a Parameter set for the initial guesses: .. jupyter-execute:: p = lmfit.Parameters() p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3., True)) def residual(p): v = p.valuesdict() return v['a1'] * np.exp(-x / v['t1']) + v['a2'] * np.exp(-(x - 0.1) / v['t2']) - y Solving with :func:`minimize` gives the Maximum Likelihood solution. Note that we use the robust Nelder-Mead method here. The default Levenberg-Marquardt method seems to have difficulty with exponential decays, though it can refine the solution if starting near the solution: .. jupyter-execute:: mi = lmfit.minimize(residual, p, method='nelder', nan_policy='omit') lmfit.printfuncs.report_fit(mi.params, min_correl=0.5) and plotting the fit using the Maximum Likelihood solution gives the graph below: .. jupyter-execute:: plt.plot(x, y, 'b') plt.plot(x, residual(mi.params) + y, 'r', label='best fit') plt.legend(loc='best') plt.show() Note that the fit here (for which the ``numdifftools`` package is installed) does estimate and report uncertainties in the parameters and correlations for the parameters, and reports the correlation of parameters ``a2`` and ``t2`` to be very high. As we'll see, these estimates are pretty good, but when faced with such high correlation, it can be helpful to get the full probability distribution for the parameters. MCMC methods are very good for this. Furthermore, we wish to deal with the data uncertainty. This is called marginalisation of a nuisance parameter. ``emcee`` requires a function that returns the log-posterior probability. The log-posterior probability is a sum of the log-prior probability and log-likelihood functions. The log-prior probability is assumed to be zero if all the parameters are within their bounds and ``-np.inf`` if any of the parameters are outside their bounds. If the objective function returns an array of unweighted residuals (i.e., ``data-model``) as is the case here, you can use ``is_weighted=False`` as an argument for ``emcee``. In that case, ``emcee`` will automatically add/use the ``__lnsigma`` parameter to estimate the true uncertainty in the data. To place boundaries on this parameter one can do: .. jupyter-execute:: mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2)) Now we have to set up the minimizer and do the sampling (again, just to be clear, this is *not* doing a fit): .. jupyter-execute:: :hide-output: res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300, steps=1000, thin=20, params=mi.params, is_weighted=False, progress=False) As mentioned in the Notes for :meth:`Minimizer.emcee`, the ``is_weighted`` argument will be ignored if your objective function returns a float instead of an array. For the documentation we set ``progress=False``; the default is to print a progress bar to the Terminal if the ``tqdm`` package is installed. The success of the method (i.e., whether or not the sampling went well) can be assessed by checking the integrated autocorrelation time and/or the acceptance fraction of the walkers. For this specific example the autocorrelation time could not be estimated because the "chain is too short". Instead, we plot the acceptance fraction per walker and its mean value suggests that the sampling worked as intended (as a rule of thumb the value should be between 0.2 and 0.5). .. jupyter-execute:: plt.plot(res.acceptance_fraction) plt.xlabel('walker') plt.ylabel('acceptance fraction') plt.show() With the results from ``emcee``, we can visualize the posterior distributions for the parameters using the ``corner`` package: .. jupyter-execute:: import corner emcee_plot = corner.corner(res.flatchain, labels=res.var_names, truths=list(res.params.valuesdict().values())) The values reported in the :class:`MinimizerResult` are the medians of the probability distributions and a 1 :math:`\sigma` quantile, estimated as half the difference between the 15.8 and 84.2 percentiles. Printing these values: .. jupyter-execute:: print('median of posterior probability distribution') print('--------------------------------------------') lmfit.report_fit(res.params) You can see that this recovered the right uncertainty level on the data. Note that these values agree pretty well with the results, uncertainties and correlations found by the fit and using ``numdifftools`` to estimate the covariance matrix. That is, even though the parameters ``a2``, ``t1``, and ``t2`` are all highly correlated and do not display perfectly Gaussian probability distributions, the probability distributions found by explicitly sampling the parameter space are not so far from elliptical as to make the simple (and much faster) estimates from inverting the covariance matrix completely invalid. As mentioned above, the result from ``emcee`` reports the median values, which are not necessarily the same as the Maximum Likelihood Estimate. To obtain the values for the Maximum Likelihood Estimation (MLE) we find the location in the chain with the highest probability: .. jupyter-execute:: highest_prob = np.argmax(res.lnprob) hp_loc = np.unravel_index(highest_prob, res.lnprob.shape) mle_soln = res.chain[hp_loc] for i, par in enumerate(p): p[par].value = mle_soln[i] print('\nMaximum Likelihood Estimation from emcee ') print('-------------------------------------------------') print('Parameter MLE Value Median Value Uncertainty') fmt = ' {:5s} {:11.5f} {:11.5f} {:11.5f}'.format for name, param in p.items(): print(fmt(name, param.value, res.params[name].value, res.params[name].stderr)) Here the difference between MLE and median value are seen to be below 0.5%, and well within the estimated 1-:math:`\sigma` uncertainty. Finally, we can use the samples from ``emcee`` to work out the 1- and 2-:math:`\sigma` error estimates. .. jupyter-execute:: print('\nError Estimates from emcee ') print('------------------------------------------------------') print('Parameter -2sigma -1sigma median +1sigma +2sigma ') for name in p.keys(): quantiles = np.percentile(res.flatchain[name], [2.275, 15.865, 50, 84.135, 97.275]) median = quantiles[2] err_m2 = quantiles[0] - median err_m1 = quantiles[1] - median err_p1 = quantiles[3] - median err_p2 = quantiles[4] - median fmt = ' {:5s} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format print(fmt(name, err_m2, err_m1, median, err_p1, err_p2)) And we see that the initial estimates for the 1-:math:`\sigma` standard error using ``numdifftools`` was not too bad. We'll return to this example problem in :ref:`label-confidence-advanced` and use a different method to calculate the 1- and 2-:math:`\sigma` error bars. lmfit-py-1.0.0/doc/index.rst000066400000000000000000000055671357751001700157020ustar00rootroot00000000000000.. lmfit documentation master file, Non-Linear Least-Squares Minimization and Curve-Fitting for Python =========================================================================== .. _Levenberg-Marquardt: https://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm .. _scipy.optimize: https://docs.scipy.org/doc/scipy/reference/optimize.html .. _lmfit GitHub repository: https://github.com/lmfit/lmfit-py Lmfit provides a high-level interface to non-linear optimization and curve fitting problems for Python. It builds on and extends many of the optimization methods of `scipy.optimize`_. Initially inspired by (and named for) extending the `Levenberg-Marquardt`_ method from :scipydoc:`optimize.leastsq`, lmfit now provides a number of useful enhancements to optimization and data fitting problems, including: * Using :class:`~lmfit.parameter.Parameter` objects instead of plain floats as variables. A :class:`~lmfit.parameter.Parameter` has a value that can be varied during the fit or kept at a fixed value. It can have upper and/or lower bounds. A Parameter can even have a value that is constrained by an algebraic expression of other Parameter values. As a Python object, a Parameter can also have attributes such as a standard error, after a fit that can estimate uncertainties. * Ease of changing fitting algorithms. Once a fitting model is set up, one can change the fitting algorithm used to find the optimal solution without changing the objective function. * Improved estimation of confidence intervals. While :scipydoc:`optimize.leastsq` will automatically calculate uncertainties and correlations from the covariance matrix, the accuracy of these estimates is sometimes questionable. To help address this, lmfit has functions to explicitly explore parameter space and determine confidence levels even for the most difficult cases. Additionally, lmfit will use the ``numdifftools`` package (if installed) to estimate parameter uncertainties and correlations for algorithms that do not natively support this in SciPy. * Improved curve-fitting with the :class:`~lmfit.model.Model` class. This extends the capabilities of :scipydoc:`optimize.curve_fit`, allowing you to turn a function that models your data into a Python class that helps you parametrize and fit data with that model. * Many :ref:`built-in models ` for common lineshapes are included and ready to use. The lmfit package is Free software, using an Open Source license. The software and this document are works in progress. If you are interested in participating in this effort please use the `lmfit GitHub repository`_. .. toctree:: :maxdepth: 2 intro installation support faq parameters fitting model builtin_models confidence bounds constraints whatsnew examples/index lmfit-py-1.0.0/doc/installation.rst000066400000000000000000000076141357751001700172670ustar00rootroot00000000000000==================================== Downloading and Installation ==================================== .. _lmfit github repository: https://github.com/lmfit/lmfit-py .. _python: https://python.org .. _scipy: https://scipy.org/scipylib/index.html .. _numpy: http://numpy.org/ .. _pytest: https://pytest.org/ .. _emcee: http://dan.iel.fm/emcee/ .. _pandas: https://pandas.pydata.org/ .. _jupyter: https://jupyter.org/ .. _matplotlib: https://matplotlib.org/ .. _dill: https://github.com/uqfoundation/dill .. _asteval: https://github.com/newville/asteval .. _uncertainties: https://github.com/lebigot/uncertainties .. _numdifftools: https://github.com/pbrod/numdifftools .. _contributing.md: https://github.com/lmfit/lmfit-py/blob/master/.github/CONTRIBUTING.md .. _corner: https://github.com/dfm/corner.py .. _sphinx: https://www.sphinx-doc.org .. _jupyter_sphinx: https://jupyter-sphinx.readthedocs.io .. _ImageMagick: https://www.imagemagick.org/ .. _release_notes: https://lmfit.github.io/lmfit-py/whatsnew.html Prerequisites ~~~~~~~~~~~~~~~ Lmfit works with `Python`_ versions 3.5 and higher. Version 0.9.15 is the final version to support Python 2.7. Lmfit requires the following Python packages, with versions given: * `NumPy`_ version 1.16 or higher. * `SciPy`_ version 1.2 or higher. * `asteval`_ version 0.9.16 or higher. * `uncertainties`_ version 3.0.1 or higher. All of these are readily available on PyPI, and should be installed automatically if installing with ``pip install lmfit``. In order to run the test suite, the `pytest`_ package is required. Some functionality requires the `emcee`_ (version 3+), `corner`_, `pandas`_, `Jupyter`_, `matplotlib`_, `dill`_, or `numdifftools`_ packages. These are not installed automatically, but we highly recommend each of these packages. For building the documentation, `matplotlib`_, `emcee`_ (version 3+), `corner`_, `Sphinx`_, `jupyter_sphinx`_, and `ImageMagick`_ are required (the latter one only when generating the PDF document). Please refer to ``requirements-dev.txt`` for a list of all dependencies that are needed if you want to participate in the development of lmfit. Downloads ~~~~~~~~~~~~~ The latest stable version of lmfit is |release| and is available from `PyPI `_. Check the `release_notes`_ for a list of changes compared to earlier releases. Installation ~~~~~~~~~~~~~~~~~ The easiest way to install lmfit is with:: pip install lmfit For Anaconda Python, lmfit is not an official package, but several Anaconda channels provide it, allowing installation with (for example):: conda install -c GSECARS lmfit or:: conda install -c conda-forge lmfit Development Version ~~~~~~~~~~~~~~~~~~~~~~~~ To get the latest development version from the `lmfit GitHub repository`_, use:: git clone https://github.com/lmfit/lmfit-py.git and install using:: python setup.py install We welcome all contributions to lmfit! If you cloned the repository for this purpose, please read `CONTRIBUTING.md`_ for more detailed instructions. Testing ~~~~~~~~~~ A battery of tests scripts that can be run with the `pytest`_ testing framework is distributed with lmfit in the ``tests`` folder. These are automatically run as part of the development process. For any release or any master branch from the git repository, running ``pytest`` should run all of these tests to completion without errors or failures. Many of the examples in this documentation are distributed with lmfit in the ``examples`` folder, and should also run for you. Some of these examples assume that `matplotlib`_ has been installed and is working correctly. Acknowledgements ~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../THANKS.txt :language: none Copyright, Licensing, and Re-distribution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The LMFIT-py code is distributed under the following license: .. literalinclude:: ../LICENSE :language: none lmfit-py-1.0.0/doc/intro.rst000066400000000000000000000164721357751001700157230ustar00rootroot00000000000000.. _intro_chapter: =========================================================== Getting started with Non-Linear Least-Squares Fitting =========================================================== The lmfit package provides simple tools to help you build complex fitting models for non-linear least-squares problems and apply these models to real data. This section gives an overview of the concepts and describes how to set up and perform simple fits. Some basic knowledge of Python, NumPy, and modeling data are assumed -- this is not a tutorial on why or how to perform a minimization or fit data, but is rather aimed at explaining how to use lmfit to do these things. In order to do a non-linear least-squares fit of a model to data or for any other optimization problem, the main task is to write an *objective function* that takes the values of the fitting variables and calculates either a scalar value to be minimized or an array of values that are to be minimized, typically in the least-squares sense. For many data fitting processes, the latter approach is used, and the objective function should return an array of (data-model), perhaps scaled by some weighting factor such as the inverse of the uncertainty in the data. For such a problem, the chi-square (:math:`\chi^2`) statistic is often defined as: .. math:: \chi^2 = \sum_i^{N} \frac{[y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})]^2}{\epsilon_i^2} where :math:`y_i^{\rm meas}` is the set of measured data, :math:`y_i^{\rm model}({\bf{v}})` is the model calculation, :math:`{\bf{v}}` is the set of variables in the model to be optimized in the fit, and :math:`\epsilon_i` is the estimated uncertainty in the data. In a traditional non-linear fit, one writes an objective function that takes the variable values and calculates the residual array :math:`y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})`, or the residual array scaled by the data uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor. As a simple concrete example, one might want to model data with a decaying sine wave, and so write an objective function like this:: from numpy import exp, sin def residual(variables, x, data, eps_data): """Model a decaying sine wave and subtract data.""" amp = variables[0] phaseshift = variables[1] freq = variables[2] decay = variables[3] model = amp * sin(x*freq + phaseshift) * exp(-x*x*decay) return (data-model) / eps_data To perform the minimization with :mod:`scipy.optimize`, one would do this:: from scipy.optimize import leastsq variables = [10.0, 0.2, 3.0, 0.007] out = leastsq(residual, variables, args=(x, data, eps_data)) Though it is wonderful to be able to use Python for such optimization problems, and the SciPy library is robust and easy to use, the approach here is not terribly different from how one would do the same fit in C or Fortran. There are several practical challenges to using this approach, including: a) The user has to keep track of the order of the variables, and their meaning -- variables[0] is the amplitude, variables[2] is the frequency, and so on, although there is no intrinsic meaning to this order. b) If the user wants to fix a particular variable (*not* vary it in the fit), the residual function has to be altered to have fewer variables, and have the corresponding constant value passed in some other way. While reasonable for simple cases, this quickly becomes a significant work for more complex models, and greatly complicates modeling for people not intimately familiar with the details of the fitting code. c) There is no simple, robust way to put bounds on values for the variables, or enforce mathematical relationships between the variables. In fact, the optimization methods that do provide bounds, require bounds to be set for all variables with separate arrays that are in the same arbitrary order as variable values. Again, this is acceptable for small or one-off cases, but becomes painful if the fitting model needs to change. These shortcomings are due to the use of traditional arrays to hold the variables, which matches closely the implementation of the underlying Fortran code, but does not fit very well with Python's rich selection of objects and data structures. The key concept in lmfit is to define and use :class:`Parameter` objects instead of plain floating point numbers as the variables for the fit. Using :class:`Parameter` objects (or the closely related :class:`Parameters` -- a dictionary of :class:`Parameter` objects), allows one to: a) forget about the order of variables and refer to Parameters by meaningful names. b) place bounds on Parameters as attributes, without worrying about preserving the order of arrays for variables and boundaries. c) fix Parameters, without having to rewrite the objective function. d) place algebraic constraints on Parameters. To illustrate the value of this approach, we can rewrite the above example for the decaying sine wave as:: from numpy import exp, sin from lmfit import minimize, Parameters def residual(params, x, data, eps_data): amp = params['amp'] phaseshift = params['phase'] freq = params['frequency'] decay = params['decay'] model = amp * sin(x*freq + phaseshift) * exp(-x*x*decay) return (data-model) / eps_data params = Parameters() params.add('amp', value=10) params.add('decay', value=0.007) params.add('phase', value=0.2) params.add('frequency', value=3.0) out = minimize(residual, params, args=(x, data, eps_data)) At first look, we simply replaced a list of values with a dictionary, accessed by name -- not a huge improvement. But each of the named :class:`Parameter` in the :class:`Parameters` object holds additional attributes to modify the value during the fit. For example, Parameters can be fixed or bounded. This can be done during definition:: params = Parameters() params.add('amp', value=10, vary=False) params.add('decay', value=0.007, min=0.0) params.add('phase', value=0.2) params.add('frequency', value=3.0, max=10) where ``vary=False`` will prevent the value from changing in the fit, and ``min=0.0`` will set a lower bound on that parameter's value. It can also be done later by setting the corresponding attributes after they have been created:: params['amp'].vary = False params['decay'].min = 0.10 Importantly, our objective function remains unchanged. This means the objective function can simply express the parameterized phenomenon to be modeled, and is separate from the choice of parameters to be varied in the fit. The ``params`` object can be copied and modified to make many user-level changes to the model and fitting process. Of course, most of the information about how your data is modeled goes into the objective function, but the approach here allows some external control; that is, control by the **user** performing the fit, instead of by the author of the objective function. Finally, in addition to the :class:`Parameters` approach to fitting data, lmfit allows switching optimization methods without changing the objective function, provides tools for generating fitting reports, and provides a better determination of Parameters confidence levels. lmfit-py-1.0.0/doc/model.rst000066400000000000000000001070061357751001700156620ustar00rootroot00000000000000.. _model_chapter: ================================================= Modeling Data and Curve Fitting ================================================= .. module:: lmfit.model A common use of least-squares minimization is *curve fitting*, where one has a parametrized model function meant to explain some phenomena and wants to adjust the numerical values for the model so that it most closely matches some data. With :mod:`scipy`, such problems are typically solved with :scipydoc:`optimize.curve_fit`, which is a wrapper around :scipydoc:`optimize.leastsq`. Since lmfit's :func:`~lmfit.minimizer.minimize` is also a high-level wrapper around :scipydoc:`optimize.leastsq` it can be used for curve-fitting problems. While it offers many benefits over :scipydoc:`optimize.leastsq`, using :func:`~lmfit.minimizer.minimize` for many curve-fitting problems still requires more effort than using :scipydoc:`optimize.curve_fit`. The :class:`Model` class in lmfit provides a simple and flexible approach to curve-fitting problems. Like :scipydoc:`optimize.curve_fit`, a :class:`Model` uses a *model function* -- a function that is meant to calculate a model for some phenomenon -- and then uses that to best match an array of supplied data. Beyond that similarity, its interface is rather different from :scipydoc:`optimize.curve_fit`, for example in that it uses :class:`~lmfit.parameter.Parameters`, but also offers several other important advantages. In addition to allowing you to turn any model function into a curve-fitting method, lmfit also provides canonical definitions for many known line shapes such as Gaussian or Lorentzian peaks and Exponential decays that are widely used in many scientific domains. These are available in the :mod:`models` module that will be discussed in more detail in the next chapter (:ref:`builtin_models_chapter`). We mention it here as you may want to consult that list before writing your own model. For now, we focus on turning Python functions into high-level fitting models with the :class:`Model` class, and using these to fit data. Motivation and simple example: Fit data to Gaussian profile ============================================================= Let's start with a simple and common example of fitting data to a Gaussian peak. As we will see, there is a buit-in :class:`GaussianModel` class that can help do this, but here we'll build our own. We start with a simple definition of the model function: .. jupyter-execute:: :hide-code: import matplotlib as mpl mpl.rcParams['figure.dpi'] = 150 %matplotlib inline %config InlineBackend.figure_format = 'svg' .. jupyter-execute:: from numpy import exp, linspace, random def gaussian(x, amp, cen, wid): return amp * exp(-(x-cen)**2 / wid) We want to use this function to fit to data :math:`y(x)` represented by the arrays ``y`` and ``x``. With :scipydoc:`optimize.curve_fit`, this would be: .. jupyter-execute:: from scipy.optimize import curve_fit x = linspace(-10, 10, 101) y = gaussian(x, 2.33, 0.21, 1.51) + random.normal(0, 0.2, x.size) init_vals = [1, 0, 1] # for [amp, cen, wid] best_vals, covar = curve_fit(gaussian, x, y, p0=init_vals) print('best_vals: {}'.format(best_vals)) That is, we create data, make an initial guess of the model values, and run :scipydoc:`optimize.curve_fit` with the model function, data arrays, and initial guesses. The results returned are the optimal values for the parameters and the covariance matrix. It's simple and useful, but it misses the benefits of lmfit. With lmfit, we create a :class:`Model` that wraps the ``gaussian`` model function, which automatically generates the appropriate residual function, and determines the corresponding parameter names from the function signature itself: .. jupyter-execute:: from lmfit import Model gmodel = Model(gaussian) print('parameter names: {}'.format(gmodel.param_names)) print('independent variables: {}'.format(gmodel.independent_vars)) As you can see, the Model ``gmodel`` determined the names of the parameters and the independent variables. By default, the first argument of the function is taken as the independent variable, held in :attr:`independent_vars`, and the rest of the functions positional arguments (and, in certain cases, keyword arguments -- see below) are used for Parameter names. Thus, for the ``gaussian`` function above, the independent variable is ``x``, and the parameters are named ``amp``, ``cen``, and ``wid``, and -- all taken directly from the signature of the model function. As we will see below, you can modify the default assignment of independent variable / arguments and specify yourself what the independent variable is and which function arguments should be identified as parameter names. The Parameters are *not* created when the model is created. The model knows what the parameters should be named, but nothing about the scale and range of your data. You will normally have to make these parameters and assign initial values and other attributes. To help you do this, each model has a :meth:`make_params` method that will generate parameters with the expected names:: params = gmodel.make_params() This creates the :class:`~lmfit.parameter.Parameters` but does not automaticaly give them initial values since it has no idea what the scale should be. You can set initial values for parameters with keyword arguments to :meth:`make_params`:: params = gmodel.make_params(cen=5, amp=200, wid=1) or assign them (and other parameter properties) after the :class:`~lmfit.parameter.Parameters` class has been created. A :class:`Model` has several methods associated with it. For example, one can use the :meth:`eval` method to evaluate the model or the :meth:`fit` method to fit data to this model with a :class:`Parameter` object. Both of these methods can take explicit keyword arguments for the parameter values. For example, one could use :meth:`eval` to calculate the predicted function:: x_eval = linspace(0, 10, 201) y_eval = gmodel.eval(params, x=x_eval) or with:: y_eval = gmodel.eval(x=x_eval, cen=6.5, amp=100, wid=2.0) Admittedly, this a slightly long-winded way to calculate a Gaussian function, given that you could have called your ``gaussian`` function directly. But now that the model is set up, we can use its :meth:`fit` method to fit this model to data, as with:: result = gmodel.fit(y, params, x=x) or with:: result = gmodel.fit(y, x=x, cen=6.5, amp=100, wid=2.0) Putting everything together, included in the ``examples`` folder with the source code, is: .. jupyter-execute:: ../examples/doc_model_gaussian.py :hide-output: which is pretty compact and to the point. The returned ``result`` will be a :class:`ModelResult` object. As we will see below, this has many components, including a :meth:`fit_report` method, which will show: .. jupyter-execute:: :hide-code: print(result.fit_report()) As the script shows, the result will also have :attr:`init_fit` for the fit with the initial parameter values and a :attr:`best_fit` for the fit with the best fit parameter values. These can be used to generate the following plot: .. jupyter-execute:: :hide-code: plt.plot(x, y, 'bo') plt.plot(x, result.init_fit, 'k--', label='initial fit') plt.plot(x, result.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() which shows the data in blue dots, the best fit as a solid red line, and the initial fit as a dashed black line. Note that the model fitting was really performed with:: gmodel = Model(gaussian) result = gmodel.fit(y, params, x=x, amp=5, cen=5, wid=1) These lines clearly express that we want to turn the ``gaussian`` function into a fitting model, and then fit the :math:`y(x)` data to this model, starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``. In addition, all the other features of lmfit are included: :class:`~lmfit.parameter.Parameters` can have bounds and constraints and the result is a rich object that can be reused to explore the model fit in detail. The :class:`Model` class ======================================= The :class:`Model` class provides a general way to wrap a pre-defined function as a fitting model. .. autoclass:: Model :class:`Model` class Methods --------------------------------- .. automethod:: Model.eval .. automethod:: Model.fit .. automethod:: Model.guess .. automethod:: Model.make_params .. automethod:: Model.set_param_hint See :ref:`model_param_hints_section`. .. automethod:: Model.print_param_hints :class:`Model` class Attributes --------------------------------- .. attribute:: func The model function used to calculate the model. .. attribute:: independent_vars List of strings for names of the independent variables. .. attribute:: nan_policy Describes what to do for NaNs that indicate missing values in the data. The choices are: * 'raise': Raise a ValueError (default) * 'propagate': Do not check for NaNs or missing values. The fit will try to ignore them. * 'omit': Remove NaNs or missing observations in data. If pandas is installed, :func:`pandas.isnull` is used, otherwise :func:`numpy.isnan` is used. .. attribute:: name Name of the model, used only in the string representation of the model. By default this will be taken from the model function. .. attribute:: opts Extra keyword arguments to pass to model function. Normally this will be determined internally and should not be changed. .. attribute:: param_hints Dictionary of parameter hints. See :ref:`model_param_hints_section`. .. attribute:: param_names List of strings of parameter names. .. attribute:: prefix Prefix used for name-mangling of parameter names. The default is ''. If a particular :class:`Model` has arguments ``amplitude``, ``center``, and ``sigma``, these would become the parameter names. Using a prefix of ``'g1_'`` would convert these parameter names to ``g1_amplitude``, ``g1_center``, and ``g1_sigma``. This can be essential to avoid name collision in composite models. Determining parameter names and independent variables for a function ----------------------------------------------------------------------- The :class:`Model` created from the supplied function ``func`` will create a :class:`~lmfit.parameter.Parameters` object, and names are inferred from the function arguments, and a residual function is automatically constructed. By default, the independent variable is taken as the first argument to the function. You can, of course, explicitly set this, and will need to do so if the independent variable is not first in the list, or if there is actually more than one independent variable. If not specified, Parameters are constructed from all positional arguments and all keyword arguments that have a default value that is numerical, except the independent variable, of course. Importantly, the Parameters can be modified after creation. In fact, you will have to do this because none of the parameters have valid initial values. In addition, one can place bounds and constraints on Parameters, or fix their values. Explicitly specifying ``independent_vars`` ------------------------------------------------- As we saw for the Gaussian example above, creating a :class:`Model` from a function is fairly easy. Let's try another one: .. jupyter-execute:: import numpy as np from lmfit import Model def decay(t, tau, N): return N*np.exp(-t/tau) decay_model = Model(decay) print('independent variables: {}'.format(decay_model.independent_vars)) params = decay_model.make_params() print('\nParameters:') for pname, par in params.items(): print(pname, par) Here, ``t`` is assumed to be the independent variable because it is the first argument to the function. The other function arguments are used to create parameters for the model. If you want ``tau`` to be the independent variable in the above example, you can say so: .. jupyter-execute:: decay_model = Model(decay, independent_vars=['tau']) print('independent variables: {}'.format(decay_model.independent_vars)) params = decay_model.make_params() print('\nParameters:') for pname, par in params.items(): print(pname, par) You can also supply multiple values for multi-dimensional functions with multiple independent variables. In fact, the meaning of *independent variable* here is simple, and based on how it treats arguments of the function you are modeling: independent variable A function argument that is not a parameter or otherwise part of the model, and that will be required to be explicitly provided as a keyword argument for each fit with :meth:`Model.fit` or evaluation with :meth:`Model.eval`. Note that independent variables are not required to be arrays, or even floating point numbers. Functions with keyword arguments ----------------------------------------- If the model function had keyword parameters, these would be turned into Parameters if the supplied default value was a valid number (but not None, True, or False). .. jupyter-execute:: def decay2(t, tau, N=10, check_positive=False): if check_small: arg = abs(t)/max(1.e-9, abs(tau)) else: arg = t/tau return N*np.exp(arg) mod = Model(decay2) params = mod.make_params() print('Parameters:') for pname, par in params.items(): print(pname, par) Here, even though ``N`` is a keyword argument to the function, it is turned into a parameter, with the default numerical value as its initial value. By default, it is permitted to be varied in the fit -- the 10 is taken as an initial value, not a fixed value. On the other hand, the ``check_positive`` keyword argument, was not converted to a parameter because it has a boolean default value. In some sense, ``check_positive`` becomes like an independent variable to the model. However, because it has a default value it is not required to be given for each model evaluation or fit, as independent variables are. Defining a ``prefix`` for the Parameters -------------------------------------------- As we will see in the next chapter when combining models, it is sometimes necessary to decorate the parameter names in the model, but still have them be correctly used in the underlying model function. This would be necessary, for example, if two parameters in a composite model (see :ref:`composite_models_section` or examples in the next chapter) would have the same name. To avoid this, we can add a ``prefix`` to the :class:`Model` which will automatically do this mapping for us. .. jupyter-execute:: def myfunc(x, amplitude=1, center=0, sigma=1): # function definition, for now just ``pass`` pass mod = Model(myfunc, prefix='f1_') params = mod.make_params() print('Parameters:') for pname, par in params.items(): print(pname, par) You would refer to these parameters as ``f1_amplitude`` and so forth, and the model will know to map these to the ``amplitude`` argument of ``myfunc``. Initializing model parameters -------------------------------- As mentioned above, the parameters created by :meth:`Model.make_params` are generally created with invalid initial values of None. These values **must** be initialized in order for the model to be evaluated or used in a fit. There are four different ways to do this initialization that can be used in any combination: 1. You can supply initial values in the definition of the model function. 2. You can initialize the parameters when creating parameters with :meth:`Model.make_params`. 3. You can give parameter hints with :meth:`Model.set_param_hint`. 4. You can supply initial values for the parameters when you use the :meth:`Model.eval` or :meth:`Model.fit` methods. Of course these methods can be mixed, allowing you to overwrite initial values at any point in the process of defining and using the model. Initializing values in the function definition ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To supply initial values for parameters in the definition of the model function, you can simply supply a default value:: def myfunc(x, a=1, b=0): ... instead of using:: def myfunc(x, a, b): ... This has the advantage of working at the function level -- all parameters with keywords can be treated as options. It also means that some default initial value will always be available for the parameter. Initializing values with :meth:`Model.make_params` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When creating parameters with :meth:`Model.make_params` you can specify initial values. To do this, use keyword arguments for the parameter names and initial values:: mod = Model(myfunc) pars = mod.make_params(a=3, b=0.5) Initializing values by setting parameter hints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After a model has been created, but prior to creating parameters with :meth:`Model.make_params`, you can set parameter hints. These allows you to set not only a default initial value but also to set other parameter attributes controlling bounds, whether it is varied in the fit, or a constraint expression. To set a parameter hint, you can use :meth:`Model.set_param_hint`, as with:: mod = Model(myfunc) mod.set_param_hint('a', value=1.0) mod.set_param_hint('b', value=0.3, min=0, max=1.0) pars = mod.make_params() Parameter hints are discussed in more detail in section :ref:`model_param_hints_section`. Initializing values when using a model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Finally, you can explicitly supply initial values when using a model. That is, as with :meth:`Model.make_params`, you can include values as keyword arguments to either the :meth:`Model.eval` or :meth:`Model.fit` methods:: y1 = mod.eval(x=x, a=7.0, b=-2.0) out = mod.fit(x=x, pars, a=3.0, b=0.0) These approaches to initialization provide many opportunities for setting initial values for parameters. The methods can be combined, so that you can set parameter hints but then change the initial value explicitly with :meth:`Model.fit`. .. _model_param_hints_section: Using parameter hints -------------------------------- After a model has been created, you can give it hints for how to create parameters with :meth:`Model.make_params`. This allows you to set not only a default initial value but also to set other parameter attributes controlling bounds, whether it is varied in the fit, or a constraint expression. To set a parameter hint, you can use :meth:`Model.set_param_hint`, as with:: mod = Model(myfunc) mod.set_param_hint('a', value=1.0) mod.set_param_hint('b', value=0.3, min=0, max=1.0) Parameter hints are stored in a model's :attr:`param_hints` attribute, which is simply a nested dictionary:: print('Parameter hints:') for pname, par in mod.param_hints.items(): print(pname, par) :: Parameter hints: a OrderedDict([('value', 1.0)]) b OrderedDict([('value', 0.3), ('min', 0), ('max', 1.0)]) You can change this dictionary directly, or with the :meth:`Model.set_param_hint` method. Either way, these parameter hints are used by :meth:`Model.make_params` when making parameters. An important feature of parameter hints is that you can force the creation of new parameters with parameter hints. This can be useful to make derived parameters with constraint expressions. For example to get the full-width at half maximum of a Gaussian model, one could use a parameter hint of:: mod = Model(gaussian) mod.set_param_hint('fwhm', expr='2.3548*sigma') .. _model_saveload_sec: Saving and Loading Models ----------------------------------- .. versionadded:: 0.9.8 It is sometimes desirable to save a :class:`Model` for later use outside of the code used to define the model. Lmfit provides a :func:`save_model` function that will save a :class:`Model` to a file. There is also a companion :func:`load_model` function that can read this file and reconstruct a :class:`Model` from it. Saving a model turns out to be somewhat challenging. The main issue is that Python is not normally able to *serialize* a function (such as the model function making up the heart of the Model) in a way that can be reconstructed into a callable Python object. The ``dill`` package can sometimes serialize functions, but with the limitation that it can be used only in the same version of Python. In addition, class methods used as model functions will not retain the rest of the class attributes and methods, and so may not be usable. With all those warnings, it should be emphasized that if you are willing to save or reuse the definition of the model function as Python code, then saving the Parameters and rest of the components that make up a model presents no problem. If the ``dill`` package is installed, the model function will be saved using it. But because saving the model function is not always reliable, saving a model will always save the *name* of the model function. The :func:`load_model` takes an optional :attr:`funcdefs` argument that can contain a dictionary of function definitions with the function names as keys and function objects as values. If one of the dictionary keys matches the saved name, the corresponding function object will be used as the model function. With this approach, if you save a model and can provide the code used for the model function, the model can be saved and reliably reloaded and used. .. autofunction:: save_model .. autofunction:: load_model As a simple example, one can save a model as: .. jupyter-execute:: ../examples/doc_model_savemodel.py To load that later, one might do: .. jupyter-execute:: ../examples/doc_model_loadmodel.py :hide-output: See also :ref:`modelresult_saveload_sec`. The :class:`ModelResult` class ======================================= A :class:`ModelResult` (which had been called ``ModelFit`` prior to version 0.9) is the object returned by :meth:`Model.fit`. It is a subclass of :class:`~lmfit.minimizer.Minimizer`, and so contains many of the fit results. Of course, it knows the :class:`Model` and the set of :class:`~lmfit.parameter.Parameters` used in the fit, and it has methods to evaluate the model, to fit the data (or re-fit the data with changes to the parameters, or fit with different or modified data) and to print out a report for that fit. While a :class:`Model` encapsulates your model function, it is fairly abstract and does not contain the parameters or data used in a particular fit. A :class:`ModelResult` *does* contain parameters and data as well as methods to alter and re-do fits. Thus the :class:`Model` is the idealized model while the :class:`ModelResult` is the messier, more complex (but perhaps more useful) object that represents a fit with a set of parameters to data with a model. A :class:`ModelResult` has several attributes holding values for fit results, and several methods for working with fits. These include statistics inherited from :class:`~lmfit.minimizer.Minimizer` useful for comparing different models, including ``chisqr``, ``redchi``, ``aic``, and ``bic``. .. autoclass:: ModelResult :class:`ModelResult` methods --------------------------------- .. automethod:: ModelResult.eval .. automethod:: ModelResult.eval_components .. automethod:: ModelResult.fit .. automethod:: ModelResult.fit_report .. automethod:: ModelResult.conf_interval .. automethod:: ModelResult.ci_report .. automethod:: ModelResult.eval_uncertainty .. automethod:: ModelResult.plot .. automethod:: ModelResult.plot_fit .. automethod:: ModelResult.plot_residuals :class:`ModelResult` attributes --------------------------------- .. attribute:: aic Floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`). .. attribute:: best_fit numpy.ndarray result of model function, evaluated at provided independent variables and with best-fit parameters. .. attribute:: best_values Dictionary with parameter names as keys, and best-fit values as values. .. attribute:: bic Floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`). .. attribute:: chisqr Floating point best-fit chi-square statistic (see :ref:`fit-results-label`). .. attribute:: ci_out Confidence interval data (see :ref:`confidence_chapter`) or None if the confidence intervals have not been calculated. .. attribute:: covar numpy.ndarray (square) covariance matrix returned from fit. .. attribute:: data numpy.ndarray of data to compare to model. .. attribute:: errorbars Boolean for whether error bars were estimated by fit. .. attribute:: ier Integer returned code from :scipydoc:`optimize.leastsq`. .. attribute:: init_fit numpy.ndarray result of model function, evaluated at provided independent variables and with initial parameters. .. attribute:: init_params Initial parameters. .. attribute:: init_values Dictionary with parameter names as keys, and initial values as values. .. attribute:: iter_cb Optional callable function, to be called at each fit iteration. This must take take arguments of ``(params, iter, resid, *args, **kws)``, where ``params`` will have the current parameter values, ``iter`` the iteration, ``resid`` the current residual array, and ``*args`` and ``**kws`` as passed to the objective function. See :ref:`fit-itercb-label`. .. attribute:: jacfcn Optional callable function, to be called to calculate Jacobian array. .. attribute:: lmdif_message String message returned from :scipydoc:`optimize.leastsq`. .. attribute:: message String message returned from :func:`~lmfit.minimizer.minimize`. .. attribute:: method String naming fitting method for :func:`~lmfit.minimizer.minimize`. .. attribute:: model Instance of :class:`Model` used for model. .. attribute:: ndata Integer number of data points. .. attribute:: nfev Integer number of function evaluations used for fit. .. attribute:: nfree Integer number of free parameters in fit. .. attribute:: nvarys Integer number of independent, freely varying variables in fit. .. attribute:: params Parameters used in fit. Will have best-fit values. .. attribute:: redchi Floating point reduced chi-square statistic (see :ref:`fit-results-label`). .. attribute:: residual numpy.ndarray for residual. .. attribute:: scale_covar Boolean flag for whether to automatically scale covariance matrix. .. attribute:: success Boolean value of whether fit succeeded. .. attribute:: weights numpy.ndarray (or None) of weighting values to be used in fit. If not None, it will be used as a multiplicative factor of the residual array, so that ``weights*(data - fit)`` is minimized in the least-squares sense. Calculating uncertainties in the model function ------------------------------------------------- We return to the first example above and ask not only for the uncertainties in the fitted parameters but for the range of values that those uncertainties mean for the model function itself. We can use the :meth:`ModelResult.eval_uncertainty` method of the model result object to evaluate the uncertainty in the model with a specified level for :math:`\sigma`. That is, adding: .. jupyter-execute:: ../examples/doc_model_gaussian.py :hide-output: :hide-code: .. jupyter-execute:: :hide-output: dely = result.eval_uncertainty(sigma=3) plt.fill_between(x, result.best_fit-dely, result.best_fit+dely, color="#ABABAB", label='3-$\sigma$ uncertainty band') to the example fit to the Gaussian at the beginning of this chapter will give 3-:math:`\sigma` bands for the best-fit Gaussian, and produce the figure below. .. jupyter-execute:: :hide-code: plt.plot(x, y, 'bo') plt.plot(x, result.init_fit, 'k--', label='initial fit') plt.plot(x, result.best_fit, 'r-', label='best fit') plt.fill_between(x, result.best_fit-dely, result.best_fit+dely, color="#ABABAB", label='3-$\sigma$ uncertainty band') plt.legend(loc='best') plt.show() .. _modelresult_saveload_sec: Saving and Loading ModelResults -------------------------------------- .. versionadded:: 0.9.8 As with saving models (see section :ref:`model_saveload_sec`), it is sometimes desirable to save a :class:`ModelResult`, either for later use or to organize and compare different fit results. Lmfit provides a :func:`save_modelresult` function that will save a :class:`ModelResult` to a file. There is also a companion :func:`load_modelresult` function that can read this file and reconstruct a :class:`ModelResult` from it. As discussed in section :ref:`model_saveload_sec`, there are challenges to saving model functions that may make it difficult to restore a saved a :class:`ModelResult` in a way that can be used to perform a fit. Use of the optional :attr:`funcdefs` argument is generally the most reliable way to ensure that a loaded :class:`ModelResult` can be used to evaluate the model function or redo the fit. .. autofunction:: save_modelresult .. autofunction:: load_modelresult An example of saving a :class:`ModelResult` is: .. jupyter-execute:: ../examples/doc_model_savemodelresult.py :hide-output: To load that later, one might do: .. jupyter-execute:: ../examples/doc_model_loadmodelresult.py :hide-output: .. index:: Composite models .. _composite_models_section: Composite Models : adding (or multiplying) Models ============================================================== One of the more interesting features of the :class:`Model` class is that Models can be added together or combined with basic algebraic operations (add, subtract, multiply, and divide) to give a composite model. The composite model will have parameters from each of the component models, with all parameters being available to influence the whole model. This ability to combine models will become even more useful in the next chapter, when pre-built subclasses of :class:`Model` are discussed. For now, we'll consider a simple example, and build a model of a Gaussian plus a line, as to model a peak with a background. For such a simple problem, we could just build a model that included both components:: def gaussian_plus_line(x, amp, cen, wid, slope, intercept): """line + 1-d gaussian""" gauss = (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2)) line = slope*x + intercept return gauss + line and use that with:: mod = Model(gaussian_plus_line) But we already had a function for a gaussian function, and maybe we'll discover that a linear background isn't sufficient which would mean the model function would have to be changed. Instead, lmfit allows models to be combined into a :class:`CompositeModel`. As an alternative to including a linear background in our model function, we could define a linear function:: def line(x, slope, intercept): """a line""" return slope*x + intercept and build a composite model with just:: mod = Model(gaussian) + Model(line) This model has parameters for both component models, and can be used as: .. jupyter-execute:: ../examples/doc_model_two_components.py :hide-output: which prints out the results: .. jupyter-execute:: :hide-code: print(result.fit_report()) and shows the plot on the left. .. jupyter-execute:: :hide-code: fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) axes[0].plot(x, y, 'bo') axes[0].plot(x, result.init_fit, 'k--', label='initial fit') axes[0].plot(x, result.best_fit, 'r-', label='best fit') axes[0].legend(loc='best') comps = result.eval_components() axes[1].plot(x, y, 'bo') axes[1].plot(x, comps['gaussian'], 'k--', label='Gaussian component') axes[1].plot(x, comps['line'], 'r--', label='Line component') axes[1].legend(loc='best') plt.show() On the left, data is shown in blue dots, the total fit is shown in solid red line, and the initial fit is shown as a black dashed line. The figure on the right shows again the data in blue dots, the Gaussian component as a black dashed line and the linear component as a red dashed line. It is created using the following code:: comps = result.eval_components() plt.plot(x, y, 'bo') plt.plot(x, comps['gaussian'], 'k--', label='Gaussian component') plt.plot(x, comps['line'], 'r--', label='Line component') The components were generated after the fit using the :meth:`ModelResult.eval_components` method of the ``result``, which returns a dictionary of the components, using keys of the model name (or ``prefix`` if that is set). This will use the parameter values in `result.params` and the independent variables (``x``) used during the fit. Note that while the :class:`ModelResult` held in ``result`` does store the best parameters and the best estimate of the model in ``result.best_fit``, the original model and parameters in ``pars`` are left unaltered. You can apply this composite model to other data sets, or evaluate the model at other values of ``x``. You may want to do this to give a finer or coarser spacing of data point, or to extrapolate the model outside the fitting range. This can be done with:: xwide = np.linspace(-5, 25, 3001) predicted = mod.eval(x=xwide) In this example, the argument names for the model functions do not overlap. If they had, the ``prefix`` argument to :class:`Model` would have allowed us to identify which parameter went with which component model. As we will see in the next chapter, using composite models with the built-in models provides a simple way to build up complex models. .. autoclass:: CompositeModel(left, right, op[, **kws]) Note that when using built-in Python binary operators, a :class:`CompositeModel` will automatically be constructed for you. That is, doing:: mod = Model(fcn1) + Model(fcn2) * Model(fcn3) will create a :class:`CompositeModel`. Here, ``left`` will be ``Model(fcn1)``, ``op`` will be :meth:`operator.add`, and ``right`` will be another CompositeModel that has a ``left`` attribute of ``Model(fcn2)``, an ``op`` of :meth:`operator.mul`, and a ``right`` of ``Model(fcn3)``. To use a binary operator other than '+', '-', '*', or '/' you can explicitly create a :class:`CompositeModel` with the appropriate binary operator. For example, to convolve two models, you could define a simple convolution function, perhaps as:: import numpy as np def convolve(dat, kernel): """simple convolution of two arrays""" npts = min(len(dat), len(kernel)) pad = np.ones(npts) tmp = np.concatenate((pad*dat[0], dat, pad*dat[-1])) out = np.convolve(tmp, kernel, mode='valid') noff = int((len(out) - npts) / 2) return (out[noff:])[:npts] which extends the data in both directions so that the convolving kernel function gives a valid result over the data range. Because this function takes two array arguments and returns an array, it can be used as the binary operator. A full script using this technique is here: .. jupyter-execute:: ../examples/doc_model_composite.py :hide-output: which prints out the results: .. jupyter-execute:: :hide-code: print(result.fit_report()) and shows the plots: .. jupyter-execute:: :hide-code: fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) axes[0].plot(x, y, 'bo') axes[0].plot(x, result.init_fit, 'k--', label='initial fit') axes[0].plot(x, result.best_fit, 'r-', label='best fit') axes[0].legend(loc='best') axes[1].plot(x, y, 'bo') axes[1].plot(x, 10*comps['jump'], 'k--', label='Jump component') axes[1].plot(x, 10*comps['gaussian'], 'r-', label='Gaussian component') axes[1].legend(loc='best') plt.show() Using composite models with built-in or custom operators allows you to build complex models from testable sub-components. lmfit-py-1.0.0/doc/parameters.rst000066400000000000000000000074641357751001700167340ustar00rootroot00000000000000.. _parameters_chapter: .. module:: lmfit.parameter ================================================ :class:`Parameter` and :class:`Parameters` ================================================ This chapter describes the :class:`Parameter` object, which is a key concept of lmfit. A :class:`Parameter` is the quantity to be optimized in all minimization problems, replacing the plain floating point number used in the optimization routines from :mod:`scipy.optimize`. A :class:`Parameter` has a value that can either be varied in the fit or held at a fixed value, and can have upper and/or lower bounds placed on the value. It can even have a value that is constrained by an algebraic expression of other Parameter values. Since :class:`Parameter` objects live outside the core optimization routines, they can be used in **all** optimization routines from :mod:`scipy.optimize`. By using :class:`Parameter` objects instead of plain variables, the objective function does not have to be modified to reflect every change of what is varied in the fit, or whether bounds can be applied. This simplifies the writing of models, allowing general models that describe the phenomenon and gives the user more flexibility in using and testing variations of that model. Whereas a :class:`Parameter` expands on an individual floating point variable, the optimization methods actually still need an ordered group of floating point variables. In the :mod:`scipy.optimize` routines this is required to be a one-dimensional :numpydoc:`ndarray`. In lmfit, this one-dimensional array is replaced by a :class:`Parameters` object, which works as an ordered dictionary of :class:`Parameter` objects with a few additional features and methods. That is, while the concept of a :class:`Parameter` is central to lmfit, one normally creates and interacts with a :class:`Parameters` instance that contains many :class:`Parameter` objects. For example, the objective functions you write for lmfit will take an instance of :class:`Parameters` as its first argument. A table of parameter values, bounds and other attributes can be printed using :meth:`Parameters.pretty_print`. The :class:`Parameter` class ======================================== .. autoclass:: Parameter See :ref:`bounds_chapter` for details on the math used to implement the bounds with :attr:`min` and :attr:`max`. The :attr:`expr` attribute can contain a mathematical expression that will be used to compute the value for the Parameter at each step in the fit. See :ref:`constraints_chapter` for more details and examples of this feature. .. index:: Removing a Constraint Expression .. automethod:: set The :class:`Parameters` class ======================================== .. autoclass:: Parameters .. automethod:: add .. automethod:: add_many .. automethod:: pretty_print .. automethod:: valuesdict .. automethod:: dumps .. automethod:: dump .. automethod:: eval .. automethod:: loads .. automethod:: load Simple Example ================== A basic example making use of :class:`~lmfit.parameter.Parameters` and the :func:`~lmfit.minimizer.minimize` function (discussed in the next chapter) might look like this: .. jupyter-execute:: ../examples/doc_parameters_basic.py :hide-output: Here, the objective function explicitly unpacks each Parameter value. This can be simplified using the :class:`Parameters` :meth:`valuesdict` method, which would make the objective function ``fcn2min`` above look like: .. jupyter-execute:: def fcn2min(params, x, data): """Model a decaying sine wave and subtract data.""" v = params.valuesdict() model = v['amp'] * np.sin(x*v['omega'] + v['shift']) * np.exp(-x*x*v['decay']) return model - data The results are identical, and the difference is a stylistic choice. lmfit-py-1.0.0/doc/sphinx/000077500000000000000000000000001357751001700153355ustar00rootroot00000000000000lmfit-py-1.0.0/doc/sphinx/ext_imgmath.py000066400000000000000000000006151357751001700202170ustar00rootroot00000000000000# sphinx extensions for mathjax extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks', 'sphinx.ext.imgmath', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'IPython.sphinxext.ipython_console_highlighting', 'jupyter_sphinx.execute', 'sphinx.ext.imgconverter'] lmfit-py-1.0.0/doc/sphinx/ext_mathjax.py000066400000000000000000000006201357751001700202210ustar00rootroot00000000000000# sphinx extensions for mathjax extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'IPython.sphinxext.ipython_console_highlighting', 'jupyter_sphinx.execute', 'sphinx_gallery.gen_gallery'] lmfit-py-1.0.0/doc/sphinx/theme/000077500000000000000000000000001357751001700164375ustar00rootroot00000000000000lmfit-py-1.0.0/doc/sphinx/theme/lmfitdoc/000077500000000000000000000000001357751001700202405ustar00rootroot00000000000000lmfit-py-1.0.0/doc/sphinx/theme/lmfitdoc/layout.html000066400000000000000000000050161357751001700224450ustar00rootroot00000000000000{# sphinxdoc/layout.html ~~~~~~~~~~~~~~~~~~~~~ Sphinx layout template for the sphinxdoc theme. :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. #} {%- extends "basic/layout.html" %} {%- block extrahead %} {% endblock %} {% block rootrellink %}

  • [intro|
  • parameters|
  • minimize|
  • model|
  • built-in models|
  • confidence intervals|
  • bounds|
  • constraints|
  • {% endblock %} {% block relbar1 %}
    LMFIT Contents Examples Download
    Non-Linear Least-Squares Minimization and Curve-Fitting for Python FAQ Support Develop
    {{ super() }} {% endblock %} {# put the sidebar before the body #} {% block sidebar1 %}{{ sidebar() }}{% endblock %} {% block sidebar2 %}{% endblock %} lmfit-py-1.0.0/doc/sphinx/theme/lmfitdoc/static/000077500000000000000000000000001357751001700215275ustar00rootroot00000000000000lmfit-py-1.0.0/doc/sphinx/theme/lmfitdoc/static/contents.png000066400000000000000000000003121357751001700240660ustar00rootroot00000000000000‰PNG  IHDR(?¶wsRGB®Îé pHYs  šœtIMEØ 7C{´ÌtEXtCommentCreated with GIMPW7IDAT×¥9 ÂZþÿ^']ÔxŒ.$@”Z[‚!£8EÈž-«oöÃoì\éà¦K©IEND®B`‚lmfit-py-1.0.0/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t000066400000000000000000000144631357751001700243750ustar00rootroot00000000000000/* * lmfitdoc.css_t * minor riff on sphinxdoc.css_t * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Sphinx stylesheet -- sphinxdoc theme. Originally created by * Armin Ronacher for Werkzeug. * * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-size: 14px; letter-spacing: -0.01em; line-height: 150%; text-align: center; background-color: #D6DAC4; color: black; padding: 0; border: 0px solid #D0D0C0; margin: 15px 15px 15px 15px; min-width: 740px; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } div.bodywrapper { margin: 0 {{ theme_sidebarwidth|toint + 10 }}px 0 0; border-right: 1px solid #ccc; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.related { font-size: 1em; background-color: #0D0; } div.related ul { height: 2em; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; background-color: #F0EFE4; color: #157; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; background-color: #D0000; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; color: #157; } div.related ul li a:hover { color: #822; } div.sphinxsidebarwrapper { padding: 0; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: {{ theme_sidebarwidth|toint - 20 }}px; float: right; font-size: 1em; text-align: left; } div.sphinxsidebar h3, div.sphinxsidebar h4 { margin: 1em 0 0.5em 0; font-size: 1em; padding: 0.1em 0 0.1em 0.5em; color: #157; border: 1px solid #A0A090; background-color: #D0D0C4; } div.sphinxsidebar h3 a { color: #157; background-color: #D0D0C4; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { margin-left: 20px; } div.footer { background-color: #E0E8D4; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } /* -- body styles ----------------------------------------------------------- */ p { margin: 0.8em 0 0.5em 0; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } div.body a { text-decoration: underline; } h1 { padding: 0.2em 0 0.2em 0; margin: 0.7em 0 0.3em 0; font-size: 1.5em; color: #157; background-color: #F0EFE4; } h2 { padding: 0.2em 0 0.2em 0; margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; background-color: #FAFAF0; } h3 { padding: 0.2em 0 0.2em 0; margin: 1em 0 -0.3em 0; font-size: 1.2em; background-color: #FBFBF3; } div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; } a.headerlink:hover { background-color: #ccc; color: white!important; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname, tt.descclassname, tt.xref { border: 0; } hr { border: 1px solid #abc; margin: 2em; } a tt { border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } pre { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; line-height: 120%; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } pre a { color: inherit; text-decoration: underline; } td.linenos pre { padding: 0.5em 0; } div.quotebar { background-color: #f8f8f8; max-width: 250px; float: right; padding: 2px 7px; border: 1px solid #ccc; } div.topic { background-color: #f8f8f8; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 1em 0; border: 1px solid #86989B; background-color: #f7f7f7; padding: 0; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } .viewcode-back { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; } lmfit-py-1.0.0/doc/sphinx/theme/lmfitdoc/static/navigation.png000066400000000000000000000003321357751001700243720ustar00rootroot00000000000000‰PNG  IHDR<§ñÐúsRGB®Îé pHYs  šœtIMEØ y݉štEXtCommentCreated with GIMPWGIDATÓ½ÁÀ0 ÂûOëB©t~8qgª*°m,© 0Ö{,šB†ÀÌt—Ø6oœ.™qú\Y›~t7Ö"Lð“ßIEND®B`‚lmfit-py-1.0.0/doc/sphinx/theme/lmfitdoc/theme.conf000066400000000000000000000001141357751001700222050ustar00rootroot00000000000000[theme] inherit = basic stylesheet = lmfitdoc.css pygments_style = friendly lmfit-py-1.0.0/doc/support.rst000066400000000000000000000027771357751001700163070ustar00rootroot00000000000000.. _support_chapter: =========================== Getting Help =========================== .. _mailing list: https://groups.google.com/group/lmfit-py .. _github issues: https://github.com/lmfit/lmfit-py/issues If you have questions, comments, or suggestions for LMFIT, please use the `mailing list`_. This provides an on-line conversation that is and archived well and can be searched well with standard web searches. If you find a bug in the code or documentation, use `GitHub Issues`_ to submit a report. If you have an idea for how to solve the problem and are familiar with Python and GitHub, submitting a GitHub Pull Request would be greatly appreciated. If you are unsure whether to use the mailing list or the Issue tracker, please start a conversation on the `mailing list`_. That is, the problem you're having may or may not be due to a bug. If it is due to a bug, creating an Issue from the conversation is easy. If it is not a bug, the problem will be discussed and then the Issue will be closed. While one *can* search through closed Issues on GitHub, these are not so easily searched, and the conversation is not easily useful to others later. Starting the conversation on the mailing list with "How do I do this?" or "Why didn't this work?" instead of "This should work and doesn't" is generally preferred, and will better help others with similar questions. Of course, there is not always an obvious way to decide if something is a Question or an Issue, and we will try our best to engage in all discussions. lmfit-py-1.0.0/doc/whatsnew.rst000066400000000000000000000342221357751001700164210ustar00rootroot00000000000000.. _whatsnew_chapter: ===================== Release Notes ===================== .. _lmfit GitHub repository: https://github.com/lmfit/lmfit-py This section discusses changes between versions, especially changes significant to the use and behavior of the library. This is not meant to be a comprehensive list of changes. For such a complete record, consult the `lmfit GitHub repository`_. .. _whatsnew_100_label: Version 1.0.0 Release Notes ============================ **Version 1.0.0 supports Python 3.5, 3.6, 3.7, and 3.8** New features: - no new features are introduced in 1.0.0. Improvements: - support for Python 2 and use of the ``six`` package are removed. (PR #612) Various: - documentation updates to clarify the use of ``emcee``. (PR #614) .. _whatsnew_0915_label: Version 0.9.15 Release Notes ============================ **Version 0.9.15 is the last release that supports Python 2.7**; it now also fully suports Python 3.8. New features, improvements, and bug fixes: - move application of parameter bounds to setter instead of getter (PR #587) - add support for non-array Jacobian types in least_squares (Issue #588, @ezwelty in PR #589) - add more information (i.e., acor and acceptance_fraction) about emcee fit (@j-zimmermann in PR #593) - "name" is now a required positional argument for Parameter class, update the magic methods (PR #595) - fix nvars count and bound handling in confidence interval calculations (Issue #597, PR #598) - support Python 3.8; requires asteval >= 0.9.16 (PR #599) - only support emcee version 3 (i.e., no PTSampler anymore) (PR #600) - fix and refactor prob_bunc in confidence interval calculations (PR #604) - fix adding Parameters with custom user-defined symbols (Issue #607, PR #608; thanks to @gbouvignies for the report) Various: - bump requirements to LTS version of SciPy/ NumPy and code clean-up (PR #591) - documentation updates (PR #596, and others) - improve test coverage and Travis CI updates (PR #595, and others) - update pre-commit hooks and configuration in setup.cfg To-be deprecated: - function Parameter.isParameter and conversion from uncertainties.core.Variable to value in _getval (PR #595) .. _whatsnew_0914_label: Version 0.9.14 Release Notes ========================================== New features: - the global optimizers ``shgo`` and ``dual_annealing`` (new in SciPy v1.2) are now supported (Issue #527; PRs #545 and #556) - ``eval`` method added to the Parameter class (PR #550 by @zobristnicholas) - avoid ZeroDivisionError in ``printfuncs.params_html_table`` (PR #552 by @aaristov and PR #559) - add parallelization to ``brute`` method (PR #564, requires SciPy v1.3) Bug fixes: - consider only varying parameters when reporting potential issues with calculating errorbars (PR #549) and compare ``value`` to both ``min`` and ``max`` (PR #571) - guard against division by zero in lineshape functions and ``FWHM`` and ``height`` expression calculations (PR #545) - fix issues with restoring a saved Model (Issue #553; PR #554) - always set ``result.method`` for ``emcee`` algorithm (PR #558) - more careful adding of parameters to handle out-of-order constraint expressions (Issue #560; PR #561) - make sure all parameters in Model.guess() use prefixes (PRs #567 and #569) - use ``inspect.signature`` for PY3 to support wrapped functions (Issue #570; PR #576) - fix ``result.nfev``` for ``brute`` method when using parallelization (Issue #578; PR #579) Various: - remove "missing" in the Model class (replaced by nan_policy) and "drop" as option to nan_policy (replaced by omit) deprecated since 0.9 (PR #565). - deprecate 'report_errors' in printfuncs.py (PR #571) - updates to the documentation to use ``jupyter-sphinx`` to include examples/output (PRs #573 and #575) - include a Gallery with examples in the documentation using ``sphinx-gallery`` (PR #574 and #583) - improve test-coverage (PRs #571, #572 and #585) - add/clarify warning messages when NaN values are detected (PR #586) - several updates to docstrings (Issue #584; PR #583, and others) - update pre-commit hooks and several docstrings .. _whatsnew_0913_label: Version 0.9.13 Release Notes ========================================== New features: - Clearer warning message in fit reports when uncertainties should but cannot be estimated, including guesses of which Parameters to examine (#521, #543) - SplitLorenztianModel and split_lorentzian function (#523) - HTML representations for Parameter, MinimizerResult, and Model so that they can be printed better with Jupyter (#524, #548) - support parallelization for differential evolution (#526) Bug fixes: - delay import of matplotlib (and so, the selection of its backend) as late as possible (#528, #529) - fix for saving, loading, and reloading ModelResults (#534) - fix to leastsq to report the best-fit values, not the values tried last (#535, #536) - fix synchronization of all parameter values on Model.guess() (#539, #542) - improve deprecation warnings for outdated nan_policy keywords (#540) - fix for edge case in gformat() (#547) Project management: - using pre-commit framework to improve and enforce coding style (#533) - added code coverage report to github main page - updated docs, github templates, added several tests. - dropped support and testing for Python 3.4. .. _whatsnew_0912_label: Version 0.9.12 Release Notes ========================================== Lmfit package is now licensed under BSD-3. New features: - SkewedVoigtModel was added as built-in model (Issue #493) - Parameter uncertainties and correlations are reported for least_squares - Plotting of complex-valued models is now handled in ModelResult class (PR #503) - A model's independent variable is allowed to be an object (Issue #492) - Added ``usersyms`` to Parameters() initialization to make it easier to add custom functions and symbols (Issue #507) - the ``numdifftools`` package can be used to calculate parameter uncertainties and correlations for all solvers that do not natively support this (PR #506) - ``emcee`` can now be used as method keyword-argument to Minimizer.minimize and minimize function, which allows for using ``emcee`` in the Model class (PR #512; see ``examples/example_emcee_with_Model.py``) (Bug)fixes: - asteval errors are now flushed after raising (Issue #486) - max_time and evaluation time for ExpressionModel increased to 1 hour (Issue #489) - loading a saved ModelResult now restores all attributes (Issue #491) - development versions of scipy and emcee are now supported (Issue #497 and PR #496) - ModelResult.eval() do no longer overwrite the userkws dictionary (Issue #499) - running the test suite requires ``pytest`` only (Issue #504) - improved FWHM calculation for VoigtModel (PR #514) .. _whatsnew_0910_label: .. _Andrea Gavana: http://infinity77.net/global_optimization/index.html .. _AMPGO paper: http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf Version 0.9.10 Release Notes ========================================== Two new global algorithms were added: basinhopping and AMPGO. Basinhopping wraps the method present in ``scipy``, and more information can be found in the documentation (:func:`~lmfit.minimizer.Minimizer.basinhopping` and :scipydoc:`optimize.basinhopping`). The Adaptive Memory Programming for Global Optimization (AMPGO) algorithm was adapted from Python code written by `Andrea Gavana`_. A more detailed explanation of the algorithm is available in the `AMPGO paper`_ and specifics for lmfit can be found in the :func:`~lmfit.minimizer.Minimizer.ampgo` function. Lmfit uses the external uncertainties (https://github.com/lebigot/uncertainties) package (available on PyPI), instead of distributing its own fork. An ``AbortFitException`` is now raised when the fit is aborted by the user (i.e., by using ``iter_cb``). Bugfixes: - all exceptions are allowed when trying to import matplotlib - simplify and fix corner-case errors when testing closeness of large integers .. _whatsnew_099_label: Version 0.9.9 Release Notes ========================================== Lmfit now uses the asteval (https://github.com/newville/asteval) package instead of distributing its own copy. The minimum required asteval version is 0.9.12, which is available on PyPI. If you see import errors related to asteval, please make sure that you actually have the latest version installed. .. _whatsnew_096_label: Version 0.9.6 Release Notes ========================================== Support for SciPy 0.14 has been dropped: SciPy 0.15 is now required. This is especially important for lmfit maintenance, as it means we can now rely on SciPy having code for differential evolution and do not need to keep a local copy. A brute force method was added, which can be used either with :meth:`Minimizer.brute` or using the ``method='brute'`` option to :meth:`Minimizer.minimize`. This method requires finite bounds on all varying parameters, or that parameters have a finite ``brute_step`` attribute set to specify the step size. Custom cost functions can now be used for the scalar minimizers using the ``reduce_fcn`` option. Many improvements to documentation and docstrings in the code were made. As part of that effort, all API documentation in this main Sphinx documentation now derives from the docstrings. Uncertainties in the resulting best-fit for a model can now be calculated from the uncertainties in the model parameters. Parameters have two new attributes: ``brute_step``, to specify the step size when using the ``brute`` method, and ``user_data``, which is unused but can be used to hold additional information the user may desire. This will be preserved on copy and pickling. Several bug fixes and cleanups. Versioneer was updated to 0.18. Tests can now be run either with nose or pytest. .. _whatsnew_095_label: Version 0.9.5 Release Notes ========================================== Support for Python 2.6 and SciPy 0.13 has been dropped. .. _whatsnew_094_label: Version 0.9.4 Release Notes ========================================== Some support for the new ``least_squares`` routine from SciPy 0.17 has been added. Parameters can now be used directly in floating point or array expressions, so that the Parameter value does not need ``sigma = params['sigma'].value``. The older, explicit usage still works, but the docs, samples, and tests have been updated to use the simpler usage. Support for Python 2.6 and SciPy 0.13 is now explicitly deprecated and wil be dropped in version 0.9.5. .. _whatsnew_093_label: Version 0.9.3 Release Notes ========================================== Models involving complex numbers have been improved. The ``emcee`` module can now be used for uncertainty estimation. Many bug fixes, and an important fix for performance slowdown on getting parameter values. ASV benchmarking code added. .. _whatsnew_090_label: Version 0.9.0 Release Notes ========================================== This upgrade makes an important, non-backward-compatible change to the way many fitting scripts and programs will work. Scripts that work with version 0.8.3 will not work with version 0.9.0 and vice versa. The change was not made lightly or without ample discussion, and is really an improvement. Modifying scripts that did work with 0.8.3 to work with 0.9.0 is easy, but needs to be done. Summary ~~~~~~~~~~~~ The upgrade from 0.8.3 to 0.9.0 introduced the :class:`MinimizerResult` class (see :ref:`fit-results-label`) which is now used to hold the return value from :func:`minimize` and :meth:`Minimizer.minimize`. This returned object contains many goodness of fit statistics, and holds the optimized parameters from the fit. Importantly, the parameters passed into :func:`minimize` and :meth:`Minimizer.minimize` are no longer modified by the fit. Instead, a copy of the passed-in parameters is made which is changed and returns as the :attr:`params` attribute of the returned :class:`MinimizerResult`. Impact ~~~~~~~~~~~~~ This upgrade means that a script that does:: my_pars = Parameters() my_pars.add('amp', value=300.0, min=0) my_pars.add('center', value= 5.0, min=0, max=10) my_pars.add('decay', value= 1.0, vary=False) result = minimize(objfunc, my_pars) will still work, but that ``my_pars`` will **NOT** be changed by the fit. Instead, ``my_pars`` is copied to an internal set of parameters that is changed in the fit, and this copy is then put in ``result.params``. To look at fit results, use ``result.params``, not ``my_pars``. This has the effect that ``my_pars`` will still hold the starting parameter values, while all of the results from the fit are held in the ``result`` object returned by :func:`minimize`. If you want to do an initial fit, then refine that fit to, for example, do a pre-fit, then refine that result different fitting method, such as:: result1 = minimize(objfunc, my_pars, method='nelder') result1.params['decay'].vary = True result2 = minimize(objfunc, result1.params, method='leastsq') and have access to all of the starting parameters ``my_pars``, the result of the first fit ``result1``, and the result of the final fit ``result2``. Discussion ~~~~~~~~~~~~~~ The main goal for making this change were to 1. give a better return value to :func:`minimize` and :meth:`Minimizer.minimize` that can hold all of the information about a fit. By having the return value be an instance of the :class:`MinimizerResult` class, it can hold an arbitrary amount of information that is easily accessed by attribute name, and even be given methods. Using objects is good! 2. To limit or even eliminate the amount of "state information" a :class:`Minimizer` holds. By state information, we mean how much of the previous fit is remembered after a fit is done. Keeping (and especially using) such information about a previous fit means that a :class:`Minimizer` might give different results even for the same problem if run a second time. While it's desirable to be able to adjust a set of :class:`Parameters` re-run a fit to get an improved result, doing this by changing an internal attribute (:attr:`Minimizer.params`) has the undesirable side-effect of not being able to "go back", and makes it somewhat cumbersome to keep track of changes made while adjusting parameters and re-running fits. lmfit-py-1.0.0/examples/000077500000000000000000000000001357751001700150755ustar00rootroot00000000000000lmfit-py-1.0.0/examples/NIST_Gauss2.dat000066400000000000000000000200031357751001700175630ustar00rootroot00000000000000# NIST/ITL StRD # Dataset Name: Gauss2 (Gauss2.dat) # # File Format: ASCII # Starting Values (lines 41 to 48) # Certified Values (lines 41 to 53) # Data (lines 61 to 310) # # Procedure: Nonlinear Least Squares Regression # # Description: The data are two slightly-blended Gaussians on a # decaying exponential baseline plus normally # distributed zero-mean noise with variance = 6.25. # # Reference: Rust, B., NIST (1996). # # # # # # # # # # Data: 1 Response (y) # 1 Predictor (x) # 250 Observations # Lower Level of Difficulty # Generated Data # # Model: Exponential Class # 8 Parameters (b1 to b8) # # y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 ) # + b6*exp( -(x-b7)**2 / b8**2 ) + e # # # Starting values Certified Values # # Start 1 Start 2 Parameter Standard Deviation # b1 = 96.0 98.0 9.9018328406E+01 5.3748766879E-01 # b2 = 0.009 0.0105 1.0994945399E-02 1.3335306766E-04 # b3 = 103.0 103.0 1.0188022528E+02 5.9217315772E-01 # b4 = 106.0 105.0 1.0703095519E+02 1.5006798316E-01 # b5 = 18.0 20.0 2.3578584029E+01 2.2695595067E-01 # b6 = 72.0 73.0 7.2045589471E+01 6.1721965884E-01 # b7 = 151.0 150.0 1.5327010194E+02 1.9466674341E-01 # b8 = 18.0 20.0 1.9525972636E+01 2.6416549393E-01 # # Residual Sum of Squares: 1.2475282092E+03 # Residual Standard Deviation: 2.2704790782E+00 # Degrees of Freedom: 242 # Number of Observations: 250 # # # # # # # Data: y x 97.58776 1.000000 97.76344 2.000000 96.56705 3.000000 92.52037 4.000000 91.15097 5.000000 95.21728 6.000000 90.21355 7.000000 89.29235 8.000000 91.51479 9.000000 89.60966 10.000000 86.56187 11.00000 85.55316 12.00000 87.13054 13.00000 85.67940 14.00000 80.04851 15.00000 82.18925 16.00000 87.24081 17.00000 80.79407 18.00000 81.28570 19.00000 81.56940 20.00000 79.22715 21.00000 79.43275 22.00000 77.90195 23.00000 76.75468 24.00000 77.17377 25.00000 74.27348 26.00000 73.11900 27.00000 73.84826 28.00000 72.47870 29.00000 71.92292 30.00000 66.92176 31.00000 67.93835 32.00000 69.56207 33.00000 69.07066 34.00000 66.53983 35.00000 63.87883 36.00000 69.71537 37.00000 63.60588 38.00000 63.37154 39.00000 60.01835 40.00000 62.67481 41.00000 65.80666 42.00000 59.14304 43.00000 56.62951 44.00000 61.21785 45.00000 54.38790 46.00000 62.93443 47.00000 56.65144 48.00000 57.13362 49.00000 58.29689 50.00000 58.91744 51.00000 58.50172 52.00000 55.22885 53.00000 58.30375 54.00000 57.43237 55.00000 51.69407 56.00000 49.93132 57.00000 53.70760 58.00000 55.39712 59.00000 52.89709 60.00000 52.31649 61.00000 53.98720 62.00000 53.54158 63.00000 56.45046 64.00000 51.32276 65.00000 53.11676 66.00000 53.28631 67.00000 49.80555 68.00000 54.69564 69.00000 56.41627 70.00000 54.59362 71.00000 54.38520 72.00000 60.15354 73.00000 59.78773 74.00000 60.49995 75.00000 65.43885 76.00000 60.70001 77.00000 63.71865 78.00000 67.77139 79.00000 64.70934 80.00000 70.78193 81.00000 70.38651 82.00000 77.22359 83.00000 79.52665 84.00000 80.13077 85.00000 85.67823 86.00000 85.20647 87.00000 90.24548 88.00000 93.61953 89.00000 95.86509 90.00000 93.46992 91.00000 105.8137 92.00000 107.8269 93.00000 114.0607 94.00000 115.5019 95.00000 118.5110 96.00000 119.6177 97.00000 122.1940 98.00000 126.9903 99.00000 125.7005 100.00000 123.7447 101.00000 130.6543 102.00000 129.7168 103.00000 131.8240 104.00000 131.8759 105.00000 131.9994 106.0000 132.1221 107.0000 133.4414 108.0000 133.8252 109.0000 133.6695 110.0000 128.2851 111.0000 126.5182 112.0000 124.7550 113.0000 118.4016 114.0000 122.0334 115.0000 115.2059 116.0000 118.7856 117.0000 110.7387 118.0000 110.2003 119.0000 105.17290 120.0000 103.44720 121.0000 94.54280 122.0000 94.40526 123.0000 94.57964 124.0000 88.76605 125.0000 87.28747 126.0000 92.50443 127.0000 86.27997 128.0000 82.44307 129.0000 80.47367 130.0000 78.36608 131.0000 78.74307 132.0000 76.12786 133.0000 79.13108 134.0000 76.76062 135.0000 77.60769 136.0000 77.76633 137.0000 81.28220 138.0000 79.74307 139.0000 81.97964 140.0000 80.02952 141.0000 85.95232 142.0000 85.96838 143.0000 79.94789 144.0000 87.17023 145.0000 90.50992 146.0000 93.23373 147.0000 89.14803 148.0000 93.11492 149.0000 90.34337 150.0000 93.69421 151.0000 95.74256 152.0000 91.85105 153.0000 96.74503 154.0000 87.60996 155.0000 90.47012 156.0000 88.11690 157.0000 85.70673 158.0000 85.01361 159.0000 78.53040 160.0000 81.34148 161.0000 75.19295 162.0000 72.66115 163.0000 69.85504 164.0000 66.29476 165.0000 63.58502 166.0000 58.33847 167.0000 57.50766 168.0000 52.80498 169.0000 50.79319 170.0000 47.03490 171.0000 46.47090 172.0000 43.09016 173.0000 34.11531 174.0000 39.28235 175.0000 32.68386 176.0000 30.44056 177.0000 31.98932 178.0000 23.63330 179.0000 23.69643 180.0000 20.26812 181.0000 19.07074 182.0000 17.59544 183.0000 16.08785 184.0000 18.94267 185.0000 18.61354 186.0000 17.25800 187.0000 16.62285 188.0000 13.48367 189.0000 15.37647 190.0000 13.47208 191.0000 15.96188 192.0000 12.32547 193.0000 16.33880 194.0000 10.438330 195.0000 9.628715 196.0000 13.12268 197.0000 8.772417 198.0000 11.76143 199.0000 12.55020 200.0000 11.33108 201.0000 11.20493 202.0000 7.816916 203.0000 6.800675 204.0000 14.26581 205.0000 10.66285 206.0000 8.911574 207.0000 11.56733 208.0000 11.58207 209.0000 11.59071 210.0000 9.730134 211.0000 11.44237 212.0000 11.22912 213.0000 10.172130 214.0000 12.50905 215.0000 6.201493 216.0000 9.019605 217.0000 10.80607 218.0000 13.09625 219.0000 3.914271 220.0000 9.567886 221.0000 8.038448 222.0000 10.231040 223.0000 9.367410 224.0000 7.695971 225.0000 6.118575 226.0000 8.793207 227.0000 7.796692 228.0000 12.45065 229.0000 10.61601 230.0000 6.001003 231.0000 6.765098 232.0000 8.764653 233.0000 4.586418 234.0000 8.390783 235.0000 7.209202 236.0000 10.012090 237.0000 7.327461 238.0000 6.525136 239.0000 2.840065 240.0000 10.323710 241.0000 4.790035 242.0000 8.376431 243.0000 6.263980 244.0000 2.705892 245.0000 8.362109 246.0000 8.983507 247.0000 3.362469 248.0000 1.182678 249.0000 4.875312 250.0000 lmfit-py-1.0.0/examples/doc_builtinmodels_nistgauss.py000066400000000000000000000025531357751001700232530ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit.models import ExponentialModel, GaussianModel dat = np.loadtxt('NIST_Gauss2.dat') x = dat[:, 1] y = dat[:, 0] exp_mod = ExponentialModel(prefix='exp_') pars = exp_mod.guess(y, x=x) gauss1 = GaussianModel(prefix='g1_') pars.update(gauss1.make_params()) pars['g1_center'].set(value=105, min=75, max=125) pars['g1_sigma'].set(value=15, min=3) pars['g1_amplitude'].set(value=2000, min=10) gauss2 = GaussianModel(prefix='g2_') pars.update(gauss2.make_params()) pars['g2_center'].set(value=155, min=125, max=175) pars['g2_sigma'].set(value=15, min=3) pars['g2_amplitude'].set(value=2000, min=10) mod = gauss1 + gauss2 + exp_mod init = mod.eval(pars, x=x) out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.5)) fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) axes[0].plot(x, y, 'b') axes[0].plot(x, init, 'k--', label='initial fit') axes[0].plot(x, out.best_fit, 'r-', label='best fit') axes[0].legend(loc='best') comps = out.eval_components(x=x) axes[1].plot(x, y, 'b') axes[1].plot(x, comps['g1_'], 'g--', label='Gaussian component 1') axes[1].plot(x, comps['g2_'], 'm--', label='Gaussian component 2') axes[1].plot(x, comps['exp_'], 'k--', label='Exponential component') axes[1].legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_builtinmodels_nistgauss2.py000066400000000000000000000020031357751001700233230ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit.models import ExponentialModel, GaussianModel dat = np.loadtxt('NIST_Gauss2.dat') x = dat[:, 1] y = dat[:, 0] exp_mod = ExponentialModel(prefix='exp_') gauss1 = GaussianModel(prefix='g1_') gauss2 = GaussianModel(prefix='g2_') def index_of(arrval, value): """return index of array *at or below* value """ if value < min(arrval): return 0 return max(np.where(arrval <= value)[0]) ix1 = index_of(x, 75) ix2 = index_of(x, 135) ix3 = index_of(x, 175) pars1 = exp_mod.guess(y[:ix1], x=x[:ix1]) pars2 = gauss1.guess(y[ix1:ix2], x=x[ix1:ix2]) pars3 = gauss2.guess(y[ix2:ix3], x=x[ix2:ix3]) pars = pars1 + pars2 + pars3 mod = gauss1 + gauss2 + exp_mod out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.5)) plt.plot(x, y, 'b') plt.plot(x, out.init_fit, 'k--', label='initial fit') plt.plot(x, out.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_builtinmodels_peakmodels.py000066400000000000000000000025201357751001700233510ustar00rootroot00000000000000# import matplotlib.pyplot as plt from numpy import loadtxt from lmfit.models import GaussianModel, LorentzianModel, VoigtModel data = loadtxt('test_peak.dat') x = data[:, 0] y = data[:, 1] # Gaussian model mod = GaussianModel() pars = mod.guess(y, x=x) out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.25)) plt.plot(x, y, 'b-') plt.plot(x, out.best_fit, 'r-', label='Gaussian Model') plt.legend(loc='best') plt.show() # Lorentzian model mod = LorentzianModel() pars = mod.guess(y, x=x) out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.25)) plt.figure() plt.plot(x, y, 'b-') plt.plot(x, out.best_fit, 'r-', label='Lorentzian Model') plt.legend(loc='best') plt.show() # Voigt model mod = VoigtModel() pars = mod.guess(y, x=x) out = mod.fit(y, pars, x=x) print(out.fit_report(min_correl=0.25)) fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) axes[0].plot(x, y, 'b-') axes[0].plot(x, out.best_fit, 'r-', label='Voigt Model\ngamma constrained') axes[0].legend(loc='best') # free gamma parameter pars['gamma'].set(value=0.7, vary=True, expr='') out_gamma = mod.fit(y, pars, x=x) axes[1].plot(x, y, 'b-') axes[1].plot(x, out_gamma.best_fit, 'r-', label='Voigt Model\ngamma unconstrained') axes[1].legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_builtinmodels_stepmodel.py000066400000000000000000000014411357751001700232220ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit.models import LinearModel, StepModel x = np.linspace(0, 10, 201) y = np.ones_like(x) y[:48] = 0.0 y[48:77] = np.arange(77-48)/(77.0-48) np.random.seed(0) y = 110.2 * (y + 9e-3*np.random.randn(x.size)) + 12.0 + 2.22*x step_mod = StepModel(form='erf', prefix='step_') line_mod = LinearModel(prefix='line_') pars = line_mod.make_params(intercept=y.min(), slope=0) pars += step_mod.guess(y, x=x, center=2.5) mod = step_mod + line_mod out = mod.fit(y, pars, x=x) print(out.fit_report()) plt.plot(x, y, 'b') plt.plot(x, out.init_fit, 'k--', label='initial fit') plt.plot(x, out.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_confidence_advanced.py000066400000000000000000000036771357751001700222330ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np import lmfit x = np.linspace(1, 10, 250) np.random.seed(0) y = 3.0*np.exp(-x/2) - 5.0*np.exp(-(x-0.1)/10.) + 0.1*np.random.randn(x.size) p = lmfit.Parameters() p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3.)) def residual(p): return p['a1']*np.exp(-x/p['t1']) + p['a2']*np.exp(-(x-0.1)/p['t2']) - y # create Minimizer mini = lmfit.Minimizer(residual, p, nan_policy='propagate') # first solve with Nelder-Mead algorithm out1 = mini.minimize(method='Nelder') # then solve with Levenberg-Marquardt using the # Nelder-Mead solution as a starting point out2 = mini.minimize(method='leastsq', params=out1.params) lmfit.report_fit(out2.params, min_correl=0.5) ci, trace = lmfit.conf_interval(mini, out2, sigmas=[1, 2], trace=True) lmfit.printfuncs.report_ci(ci) # plot data and best fit plt.figure() plt.plot(x, y, 'b') plt.plot(x, residual(out2.params) + y, 'r-') # plot confidence intervals (a1 vs t2 and a2 vs t2) fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a1', 't2', 30, 30) ctp = axes[0].contourf(cx, cy, grid, np.linspace(0, 1, 11)) fig.colorbar(ctp, ax=axes[0]) axes[0].set_xlabel('a1') axes[0].set_ylabel('t2') cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a2', 't2', 30, 30) ctp = axes[1].contourf(cx, cy, grid, np.linspace(0, 1, 11)) fig.colorbar(ctp, ax=axes[1]) axes[1].set_xlabel('a2') axes[1].set_ylabel('t2') # plot dependence between two parameters fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'], trace['a1']['prob'] cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'], trace['t2']['prob'] axes[0].scatter(cx1, cy1, c=prob, s=30) axes[0].set_xlabel('a1') axes[0].set_ylabel('t2') axes[1].scatter(cx2, cy2, c=prob2, s=30) axes[1].set_xlabel('t2') axes[1].set_ylabel('a1') plt.show() # lmfit-py-1.0.0/examples/doc_confidence_basic.py000066400000000000000000000007701357751001700215360ustar00rootroot00000000000000# import numpy as np import lmfit x = np.linspace(0.3, 10, 100) np.random.seed(0) y = 1/(0.1*x) + 2 + 0.1*np.random.randn(x.size) pars = lmfit.Parameters() pars.add_many(('a', 0.1), ('b', 1)) def residual(p): return 1/(p['a']*x) + p['b'] - y mini = lmfit.Minimizer(residual, pars) result = mini.minimize() print(lmfit.fit_report(result.params)) ci = lmfit.conf_interval(mini, result) lmfit.printfuncs.report_ci(ci) # lmfit-py-1.0.0/examples/doc_fitting_emcee.py000066400000000000000000000065641357751001700211110ustar00rootroot00000000000000# import numpy as np import lmfit try: import matplotlib.pyplot as plt HASPYLAB = True except ImportError: HASPYLAB = False HASPYLAB = False try: import corner HASCORNER = True except ImportError: HASCORNER = False x = np.linspace(1, 10, 250) np.random.seed(0) y = (3.0*np.exp(-x/2) - 5.0*np.exp(-(x-0.1) / 10.) + 0.1*np.random.randn(x.size)) if HASPYLAB: plt.plot(x, y, 'b') plt.show() p = lmfit.Parameters() p.add_many(('a1', 4), ('a2', 4), ('t1', 3), ('t2', 3., True)) def residual(p): v = p.valuesdict() return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1) / v['t2']) - y mi = lmfit.minimize(residual, p, method='nelder', nan_policy='omit') lmfit.printfuncs.report_fit(mi.params, min_correl=0.5) if HASPYLAB: plt.figure() plt.plot(x, y, 'b') plt.plot(x, residual(mi.params) + y, 'r', label='best fit') plt.legend(loc='best') plt.show() # Place bounds on the ln(sigma) parameter that emcee will automatically add # to estimate the true uncertainty in the data since is_weighted=False mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2)) res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300, steps=1000, thin=20, params=mi.params, is_weighted=False, progress=False) if HASPYLAB and HASCORNER: emcee_corner = corner.corner(res.flatchain, labels=res.var_names, truths=list(res.params.valuesdict().values())) plt.show() if HASPYLAB: plt.plot(res.acceptance_fraction) plt.xlabel('walker') plt.ylabel('acceptance fraction') plt.show() if hasattr(res, "acor"): print("Autocorrelation time for the parameters:") print("----------------------------------------") for i, par in enumerate(p): print(par, res.acor[i]) print("\nmedian of posterior probability distribution") print('--------------------------------------------') lmfit.report_fit(res.params) # find the maximum likelihood solution highest_prob = np.argmax(res.lnprob) hp_loc = np.unravel_index(highest_prob, res.lnprob.shape) mle_soln = res.chain[hp_loc] for i, par in enumerate(p): p[par].value = mle_soln[i] print('\nMaximum Likelihood Estimation from emcee ') print('-------------------------------------------------') print('Parameter MLE Value Median Value Uncertainty') fmt = ' {:5s} {:11.5f} {:11.5f} {:11.5f}'.format for name, param in p.items(): print(fmt(name, param.value, res.params[name].value, res.params[name].stderr)) if HASPYLAB: plt.figure() plt.plot(x, y, 'b') plt.plot(x, residual(mi.params) + y, 'r', label='Nelder-Mead') plt.plot(x, residual(res.params) + y, 'k--', label='emcee') plt.legend() plt.show() print('\nError Estimates from emcee ') print('------------------------------------------------------') print('Parameter -2sigma -1sigma median +1sigma +2sigma ') for name in p.keys(): quantiles = np.percentile(res.flatchain[name], [2.275, 15.865, 50, 84.135, 97.275]) median = quantiles[2] err_m2 = quantiles[0] - median err_m1 = quantiles[1] - median err_p1 = quantiles[3] - median err_p2 = quantiles[4] - median fmt = ' {:5s} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format print(fmt(name, err_m2, err_m1, median, err_p1, err_p2)) lmfit-py-1.0.0/examples/doc_fitting_withreport.py000066400000000000000000000021701357751001700222270ustar00rootroot00000000000000# from numpy import exp, linspace, pi, random, sign, sin from lmfit import Parameters, fit_report, minimize p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.46) p_true.add('shift', value=0.123) p_true.add('decay', value=0.032) def residual(pars, x, data=None): """Model a decaying sine wave and subtract data.""" vals = pars.valuesdict() amp = vals['amp'] per = vals['period'] shift = vals['shift'] decay = vals['decay'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = amp * sin(shift + x/per) * exp(-x*x*decay*decay) if data is None: return model return model - data random.seed(0) x = linspace(0.0, 250., 1001) noise = random.normal(scale=0.7215, size=x.size) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) out = minimize(residual, fit_params, args=(x,), kws={'data': data}) print(fit_report(out)) # lmfit-py-1.0.0/examples/doc_model_composite.py000066400000000000000000000034231357751001700214600ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit import CompositeModel, Model from lmfit.lineshapes import gaussian, step # create data from broadened step x = np.linspace(0, 10, 201) y = step(x, amplitude=12.5, center=4.5, sigma=0.88, form='erf') np.random.seed(0) y = y + np.random.normal(scale=0.35, size=x.size) def jump(x, mid): """Heaviside step function.""" o = np.zeros(x.size) imid = max(np.where(x <= mid)[0]) o[imid:] = 1.0 return o def convolve(arr, kernel): """Simple convolution of two arrays.""" npts = min(arr.size, kernel.size) pad = np.ones(npts) tmp = np.concatenate((pad*arr[0], arr, pad*arr[-1])) out = np.convolve(tmp, kernel, mode='valid') noff = int((len(out) - npts) / 2) return out[noff:noff+npts] # create Composite Model using the custom convolution operator mod = CompositeModel(Model(jump), Model(gaussian), convolve) pars = mod.make_params(amplitude=1, center=3.5, sigma=1.5, mid=5.0) # 'mid' and 'center' should be completely correlated, and 'mid' is # used as an integer index, so a very poor fit variable: pars['mid'].vary = False # fit this model to data array y result = mod.fit(y, params=pars, x=x) print(result.fit_report()) # generate components comps = result.eval_components(x=x) # plot results fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8)) axes[0].plot(x, y, 'bo') axes[0].plot(x, result.init_fit, 'k--', label='initial fit') axes[0].plot(x, result.best_fit, 'r-', label='best fit') axes[0].legend(loc='best') axes[1].plot(x, y, 'bo') axes[1].plot(x, 10*comps['jump'], 'k--', label='Jump component') axes[1].plot(x, 10*comps['gaussian'], 'r-', label='Gaussian component') axes[1].legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_model_gaussian.py000066400000000000000000000012301357751001700212620ustar00rootroot00000000000000# import matplotlib.pyplot as plt from numpy import exp, loadtxt, pi, sqrt from lmfit import Model data = loadtxt('model1d_gauss.dat') x = data[:, 0] y = data[:, 1] def gaussian(x, amp, cen, wid): """1-d gaussian: gaussian(x, amp, cen, wid)""" return (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2)) gmodel = Model(gaussian) result = gmodel.fit(y, x=x, amp=5, cen=5, wid=1) print(result.fit_report()) plt.plot(x, y, 'bo') plt.plot(x, result.init_fit, 'k--', label='initial fit') plt.plot(x, result.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_model_loadmodel.py000066400000000000000000000011641357751001700214160ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit.model import load_model def mysine(x, amp, freq, shift): return amp * np.sin(x*freq + shift) data = np.loadtxt('sinedata.dat') x = data[:, 0] y = data[:, 1] model = load_model('sinemodel.sav', funcdefs={'mysine': mysine}) params = model.make_params(amp=3, freq=0.52, shift=0) params['shift'].max = 1 params['shift'].min = -1 params['amp'].min = 0.0 result = model.fit(y, params, x=x) print(result.fit_report()) plt.plot(x, y, 'bo') plt.plot(x, result.best_fit, 'r-') plt.show() # lmfit-py-1.0.0/examples/doc_model_loadmodelresult.py000066400000000000000000000006161357751001700226560ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit.model import load_modelresult data = np.loadtxt('model1d_gauss.dat') x = data[:, 0] y = data[:, 1] result = load_modelresult('gauss_modelresult.sav') print(result.fit_report()) plt.plot(x, y, 'bo') plt.plot(x, result.best_fit, 'r-') plt.show() # lmfit-py-1.0.0/examples/doc_model_loadmodelresult2.py000066400000000000000000000006171357751001700227410ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit.model import load_modelresult dat = np.loadtxt('NIST_Gauss2.dat') x = dat[:, 1] y = dat[:, 0] result = load_modelresult('nistgauss_modelresult.sav') print(result.fit_report()) plt.plot(x, y, 'bo') plt.plot(x, result.best_fit, 'r-') plt.show() # lmfit-py-1.0.0/examples/doc_model_savemodel.py000066400000000000000000000005211357751001700214310ustar00rootroot00000000000000# import numpy as np from lmfit.model import Model, save_model def mysine(x, amp, freq, shift): return amp * np.sin(x*freq + shift) sinemodel = Model(mysine) pars = sinemodel.make_params(amp=1, freq=0.25, shift=0) save_model(sinemodel, 'sinemodel.sav') # lmfit-py-1.0.0/examples/doc_model_savemodelresult.py000066400000000000000000000006471357751001700227010ustar00rootroot00000000000000# import numpy as np from lmfit.model import save_modelresult from lmfit.models import GaussianModel data = np.loadtxt('model1d_gauss.dat') x = data[:, 0] y = data[:, 1] gmodel = GaussianModel() result = gmodel.fit(y, x=x, amplitude=5, center=5, sigma=1) save_modelresult(result, 'gauss_modelresult.sav') print(result.fit_report()) # lmfit-py-1.0.0/examples/doc_model_savemodelresult2.py000066400000000000000000000016441357751001700227610ustar00rootroot00000000000000# import numpy as np from lmfit.model import save_modelresult from lmfit.models import ExponentialModel, GaussianModel dat = np.loadtxt('NIST_Gauss2.dat') x = dat[:, 1] y = dat[:, 0] exp_mod = ExponentialModel(prefix='exp_') pars = exp_mod.guess(y, x=x) gauss1 = GaussianModel(prefix='g1_') pars.update(gauss1.make_params()) pars['g1_center'].set(value=105, min=75, max=125) pars['g1_sigma'].set(value=15, min=3) pars['g1_amplitude'].set(value=2000, min=10) gauss2 = GaussianModel(prefix='g2_') pars.update(gauss2.make_params()) pars['g2_center'].set(value=155, min=125, max=175) pars['g2_sigma'].set(value=15, min=3) pars['g2_amplitude'].set(value=2000, min=10) mod = gauss1 + gauss2 + exp_mod init = mod.eval(pars, x=x) result = mod.fit(y, pars, x=x) save_modelresult(result, 'nistgauss_modelresult.sav') print(result.fit_report()) # lmfit-py-1.0.0/examples/doc_model_two_components.py000066400000000000000000000015001357751001700225260ustar00rootroot00000000000000# import matplotlib.pyplot as plt from numpy import exp, loadtxt, pi, sqrt from lmfit import Model data = loadtxt('model1d_gauss.dat') x = data[:, 0] y = data[:, 1] + 0.25*x - 1.0 def gaussian(x, amp, cen, wid): """1-d gaussian: gaussian(x, amp, cen, wid)""" return (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2)) def line(x, slope, intercept): """a line""" return slope*x + intercept mod = Model(gaussian) + Model(line) pars = mod.make_params(amp=5, cen=5, wid=1, slope=0, intercept=1) result = mod.fit(y, pars, x=x) print(result.fit_report()) plt.plot(x, y, 'bo') plt.plot(x, result.init_fit, 'k--', label='initial fit') plt.plot(x, result.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_model_uncertainty.py000066400000000000000000000015171357751001700220250ustar00rootroot00000000000000# import matplotlib.pyplot as plt from numpy import exp, loadtxt, pi, sqrt from lmfit import Model data = loadtxt('model1d_gauss.dat') x = data[:, 0] y = data[:, 1] def gaussian(x, amp, cen, wid): """1-d gaussian: gaussian(x, amp, cen, wid)""" return (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2)) gmodel = Model(gaussian) result = gmodel.fit(y, x=x, amp=5, cen=5, wid=1) print(result.fit_report()) dely = result.eval_uncertainty(sigma=3) plt.plot(x, y, 'bo') plt.plot(x, result.init_fit, 'k--', label='initial fit') plt.plot(x, result.best_fit, 'r-', label='best fit') plt.fill_between(x, result.best_fit-dely, result.best_fit+dely, color="#ABABAB", label=r'3-$\sigma$ uncertainty band') plt.legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_model_with_iter_callback.py000066400000000000000000000017001357751001700232640ustar00rootroot00000000000000# import matplotlib.pyplot as plt from numpy import linspace, random from lmfit.lineshapes import gaussian from lmfit.models import GaussianModel, LinearModel def per_iteration(pars, iter, resid, *args, **kws): print(" ITER ", iter, ["%.5f" % p for p in pars.values()]) x = linspace(0., 20, 401) y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23) y = y - .20*x + 3.333 + random.normal(scale=0.23, size=x.size) mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_') pars = mod.make_params() pars['peak_amplitude'].value = 3.0 pars['peak_center'].value = 6.0 pars['peak_sigma'].value = 2.0 pars['bkg_intercept'].value = 0.0 pars['bkg_slope'].value = 0.0 out = mod.fit(y, pars, x=x, iter_cb=per_iteration) plt.plot(x, y, 'b--') print('Nfev = ', out.nfev) print(out.fit_report()) plt.plot(x, out.best_fit, 'k-', label='best fit') plt.legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_model_with_nan_policy.py000066400000000000000000000014241357751001700226430ustar00rootroot00000000000000# import matplotlib.pyplot as plt import numpy as np from lmfit.models import GaussianModel data = np.loadtxt('model1d_gauss.dat') x = data[:, 0] y = data[:, 1] y[44] = np.nan y[65] = np.nan # nan_policy = 'raise' # nan_policy = 'propagate' nan_policy = 'omit' gmodel = GaussianModel() result = gmodel.fit(y, x=x, amplitude=5, center=6, sigma=1, nan_policy=nan_policy) print(result.fit_report()) # make sure nans are removed for plotting: x_ = x[np.where(np.isfinite(y))] y_ = y[np.where(np.isfinite(y))] plt.plot(x_, y_, 'bo') plt.plot(x_, result.init_fit, 'k--', label='initial fit') plt.plot(x_, result.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() # lmfit-py-1.0.0/examples/doc_parameters_basic.py000066400000000000000000000023641357751001700216050ustar00rootroot00000000000000# import numpy as np from lmfit import Minimizer, Parameters, report_fit # create data to be fitted x = np.linspace(0, 15, 301) data = (5.0 * np.sin(2.0*x - 0.1) * np.exp(-x*x*0.025) + np.random.normal(size=x.size, scale=0.2)) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """Model a decaying sine wave and subtract data.""" amp = params['amp'] shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x*omega + shift) * np.exp(-x*x*decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0) params.add('decay', value=0.1) params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2.) params.add('omega', value=3.0) # do fit, here with the default leastsq algorithm minner = Minimizer(fcn2min, params, fcn_args=(x, data)) result = minner.minimize() # calculate final result final = data + result.residual # write error report report_fit(result) # try to plot results try: import matplotlib.pyplot as plt plt.plot(x, data, 'k+') plt.plot(x, final, 'r') plt.show() except ImportError: pass # lmfit-py-1.0.0/examples/doc_parameters_valuesdict.py000066400000000000000000000023041357751001700226610ustar00rootroot00000000000000# import numpy as np from lmfit import Minimizer, Parameters, report_fit # create data to be fitted x = np.linspace(0, 15, 301) data = (5.0 * np.sin(2.0*x - 0.1) * np.exp(-x*x*0.025) + np.random.normal(size=x.size, scale=0.2)) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """Model a decaying sine wave and subtract data.""" v = params.valuesdict() model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay']) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0) params.add('decay', value=0.1) params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2) params.add('omega', value=3.0) # do fit, here with the default leastsq algorithm minner = Minimizer(fcn2min, params, fcn_args=(x, data)) result = minner.minimize() # calculate final result final = data + result.residual # write error report report_fit(result) # try to plot results try: import matplotlib.pyplot as plt plt.plot(x, data, 'k+') plt.plot(x, final, 'r') plt.show() except ImportError: pass # lmfit-py-1.0.0/examples/example_Model_interface.py000066400000000000000000000170031357751001700222430ustar00rootroot00000000000000""" Fit using the Model interface ============================= This notebook shows a simple example of using the ``lmfit.Model`` class. For more information please refer to: https://lmfit.github.io/lmfit-py/model.html#the-model-class. """ import numpy as np from pandas import Series from lmfit import Model, Parameter, report_fit ############################################################################### # The ``Model`` class is a flexible, concise curve fitter. I will illustrate # fitting example data to an exponential decay. def decay(t, N, tau): return N*np.exp(-t/tau) ############################################################################### # The parameters are in no particular order. We'll need some example data. I # will use ``N=7`` and ``tau=3``, and add a little noise. t = np.linspace(0, 5, num=1000) data = decay(t, 7, 3) + np.random.randn(*t.shape) ############################################################################### # **Simplest Usage** model = Model(decay, independent_vars=['t']) result = model.fit(data, t=t, N=10, tau=1) ############################################################################### # The Model infers the parameter names by inspecting the arguments of the # function, ``decay``. Then I passed the independent variable, ``t``, and # initial guesses for each parameter. A residual function is automatically # defined, and a least-squared regression is performed. # # We can immediately see the best-fit values: print(result.values) ############################################################################### # and use these best-fit parameters for plotting with the ``plot`` function: result.plot() ############################################################################### # We can review the best-fit `Parameters` by accessing `result.params`: result.params.pretty_print() ############################################################################### # More information about the fit is stored in the result, which is an # ``lmfit.MimimizerResult`` object (see: # https://lmfit.github.io/lmfit-py/fitting.html#lmfit.minimizer.MinimizerResult) ############################################################################### # **Specifying Bounds and Holding Parameters Constant** # # Above, the ``Model`` class implicitly builds ``Parameter`` objects from # keyword arguments of ``fit`` that match the argments of ``decay``. You can # build the ``Parameter`` objects explicity; the following is equivalent. result = model.fit(data, t=t, N=Parameter('N', value=10), tau=Parameter('tau', value=1)) report_fit(result.params) ############################################################################### # By building ``Parameter`` objects explicitly, you can specify bounds # (``min``, ``max``) and set parameters constant (``vary=False``). result = model.fit(data, t=t, N=Parameter('N', value=7, vary=False), tau=Parameter('tau', value=1, min=0)) report_fit(result.params) ############################################################################### # **Defining Parameters in Advance** # # Passing parameters to ``fit`` can become unwieldly. As an alternative, you # can extract the parameters from ``model`` like so, set them individually, # and pass them to ``fit``. params = model.make_params() params['N'].value = 10 params['tau'].value = 1 params['tau'].min = 0 result = model.fit(data, params, t=t) report_fit(result.params) ############################################################################## # Keyword arguments override ``params``, resetting ``value`` and all other # properties (``min``, ``max``, ``vary``). result = model.fit(data, params, t=t, tau=1) report_fit(result.params) ############################################################################### # The input parameters are not modified by ``fit``. They can be reused, # retaining the same initial value. If you want to use the result of one fit # as the initial guess for the next, simply pass ``params=result.params``. ############################################################################### # #TODO/FIXME: not sure if there ever way a "helpful exception", but currently # #it raises a ``ValueError: The input contains nan values``. # # #*A Helpful Exception* # # #All this implicit magic makes it very easy for the user to neglect to set a # #parameter. The ``fit`` function checks for this and raises a helpful exception. # #result = model.fit(data, t=t, tau=1) # N unspecified ############################################################################### # #An *extra* parameter that cannot be matched to the model function will # #throw a ``UserWarning``, but it will not raise, leaving open the possibility # #of unforeseen extensions calling for some parameters. ############################################################################### # *Weighted Fits* # # Use the ``sigma`` argument to perform a weighted fit. If you prefer to think # of the fit in term of ``weights``, ``sigma=1/weights``. weights = np.arange(len(data)) result = model.fit(data, params, t=t, weights=weights) report_fit(result.params) ############################################################################### # *Handling Missing Data* # # By default, attemping to fit data that includes a ``NaN``, which # conventionally indicates a "missing" observation, raises a lengthy exception. # You can choose to ``omit`` (i.e., skip over) missing values instead. data_with_holes = data.copy() data_with_holes[[5, 500, 700]] = np.nan # Replace arbitrary values with NaN. model = Model(decay, independent_vars=['t'], nan_policy='omit') result = model.fit(data_with_holes, params, t=t) report_fit(result.params) ############################################################################### # If you don't want to ignore missing values, you can set the model to raise # proactively, checking for missing values before attempting the fit. # # Uncomment to see the error # #model = Model(decay, independent_vars=['t'], nan_policy='raise') # #result = model.fit(data_with_holes, params, t=t) # # The default setting is ``nan_policy='raise'``, which does check for NaNs and # raises an exception when present. # # Null-chekcing relies on ``pandas.isnull`` if it is available. If pandas # cannot be imported, it silently falls back on ``numpy.isnan``. ############################################################################### # *Data Alignment* # # Imagine a collection of time series data with different lengths. It would be # convenient to define one sufficiently long array ``t`` and use it for each # time series, regardless of length. ``pandas`` # (https://pandas.pydata.org/pandas-docs/stable/) provides tools for aligning # indexed data. And, unlike most wrappers to ``scipy.leastsq``, ``Model`` can # handle pandas objects out of the box, using its data alignment features. # # Here I take just a slice of the ``data`` and fit it to the full ``t``. It is # automatically aligned to the correct section of ``t`` using Series' index. model = Model(decay, independent_vars=['t']) truncated_data = Series(data)[200:800] # data points 200-800 t = Series(t) # all 1000 points result = model.fit(truncated_data, params, t=t) report_fit(result.params) ############################################################################### # Data with missing entries and an unequal length still aligns properly. model = Model(decay, independent_vars=['t'], nan_policy='omit') truncated_data_with_holes = Series(data_with_holes)[200:800] result = model.fit(truncated_data_with_holes, params, t=t) report_fit(result.params) lmfit-py-1.0.0/examples/example_brute.py000066400000000000000000000421641357751001700203120ustar00rootroot00000000000000""" Global minimization using the ``brute`` method (a.k.a. grid search) =================================================================== """ ############################################################################### # This notebook shows a simple example of using ``lmfit.minimize.brute`` that # uses the method with the same name from ``scipy.optimize``. # # The method computes the function’s value at each point of a multidimensional # grid of points, to find the global minimum of the function. It behaves # identically to ``scipy.optimize.brute`` in case finite bounds are given on # all varying parameters, but will also deal with non-bounded parameters # (see below). import copy import matplotlib.pyplot as plt import numpy as np from lmfit import Minimizer, Parameters, fit_report ############################################################################### # Let's start with the example given in the documentation of SciPy: # # "We illustrate the use of brute to seek the global minimum of a function of # two variables that is given as the sum of a positive-definite quadratic and # two deep “Gaussian-shaped†craters. Specifically, define the objective # function f as the sum of three other functions, ``f = f1 + f2 + f3``. We # suppose each of these has a signature ``(z, *params), where z = (x, y)``, # and params and the functions are as defined below." # # First, we create a set of Parameters where all variables except ``x`` and # ``y`` are given fixed values. params = Parameters() params.add_many( ('a', 2, False), ('b', 3, False), ('c', 7, False), ('d', 8, False), ('e', 9, False), ('f', 10, False), ('g', 44, False), ('h', -1, False), ('i', 2, False), ('j', 26, False), ('k', 1, False), ('l', -2, False), ('scale', 0.5, False), ('x', 0.0, True), ('y', 0.0, True)) ############################################################################### # Second, create the three functions and the objective function: def f1(p): par = p.valuesdict() return (par['a'] * par['x']**2 + par['b'] * par['x'] * par['y'] + par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] + par['f']) def f2(p): par = p.valuesdict() return (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 + (par['y']-par['i'])**2) / par['scale'])) def f3(p): par = p.valuesdict() return (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 + (par['y']-par['l'])**2) / par['scale'])) def f(params): return f1(params) + f2(params) + f3(params) ############################################################################### # Just as in the documentation we will do a grid search between ``-4`` and # ``4`` and use a stepsize of ``0.25``. The bounds can be set as usual with # the ``min`` and ``max`` attributes, and the stepsize is set using # ``brute_step``. params['x'].set(min=-4, max=4, brute_step=0.25) params['y'].set(min=-4, max=4, brute_step=0.25) ############################################################################### # Performing the actual grid search is done with: fitter = Minimizer(f, params) result = fitter.minimize(method='brute') ############################################################################### # , which will increment ``x`` and ``y`` between ``-4`` in increments of # ``0.25`` until ``4`` (not inclusive). grid_x, grid_y = [np.unique(par.ravel()) for par in result.brute_grid] print(grid_x) ############################################################################### # The objective function is evaluated on this grid, and the raw output from # ``scipy.optimize.brute`` is stored in the MinimizerResult as # ``brute_`` attributes. These attributes are: # # ``result.brute_x0`` -- A 1-D array containing the coordinates of a point at # which the objective function had its minimum value. print(result.brute_x0) ############################################################################### # ``result.brute_fval`` -- Function value at the point x0. print(result.brute_fval) ############################################################################### # ``result.brute_grid`` -- Representation of the evaluation grid. It has the # same length as x0. print(result.brute_grid) ############################################################################### # ``result.brute_Jout`` -- Function values at each point of the evaluation # grid, i.e., Jout = func(\*grid). print(result.brute_Jout) ############################################################################### # **Reassuringly, the obtained results are indentical to using the method in # SciPy directly!** ############################################################################### # Example 2: fit of a decaying sine wave # # In this example, will explain some of the options ot the algorithm. # # We start off by generating some synthetic data with noise for a decaying # sine wave, define an objective function and create a Parameter set. x = np.linspace(0, 15, 301) np.random.seed(7) noise = np.random.normal(size=x.size, scale=0.2) data = (5. * np.sin(2*x - 0.1) * np.exp(-x*x*0.025) + noise) plt.plot(x, data, 'b') def fcn2min(params, x, data): """Model decaying sine wave, subtract data.""" amp = params['amp'] shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x*omega + shift) * np.exp(-x*x*decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=7, min=2.5) params.add('decay', value=0.05) params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2) params.add('omega', value=3, max=5) ############################################################################### # In contrast to the implementation in SciPy (as shown in the first example), # varying parameters do not need to have finite bounds in lmfit. However, in # that case they **do** need the ``brute_step`` attribute specified, so let's # do that: params['amp'].set(brute_step=0.25) params['decay'].set(brute_step=0.005) params['omega'].set(brute_step=0.25) ############################################################################### # Our initial parameter set is now defined as shown below and this will # determine how the grid is set-up. params.pretty_print() ############################################################################### # First, we initialize a Minimizer and perform the grid search: fitter = Minimizer(fcn2min, params, fcn_args=(x, data)) result_brute = fitter.minimize(method='brute', Ns=25, keep=25) ############################################################################### # We used two new parameters here: ``Ns`` and ``keep``. The parameter ``Ns`` # determines the \'number of grid points along the axes\' similarly to its usage # in SciPy. Together with ``brute_step``, ``min`` and ``max`` for a Parameter # it will dictate how the grid is set-up: # # **(1)** finite bounds are specified ("SciPy implementation"): uses # ``brute_step`` if present (in the example above) or uses ``Ns`` to generate # the grid. The latter scenario that interpolates ``Ns`` points from ``min`` # to ``max`` (inclusive), is here shown for the parameter ``shift``: par_name = 'shift' indx_shift = result_brute.var_names.index(par_name) grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel()) print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name, len(grid_shift), grid_shift)) ############################################################################### # If finite bounds are not set for a certain parameter then the user **must** # specify ``brute_step`` - three more scenarios are considered here: # # **(2)** lower bound (min) and brute_step are specified: # range = (min, min + Ns * brute_step, brute_step) par_name = 'amp' indx_shift = result_brute.var_names.index(par_name) grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel()) print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name, len(grid_shift), grid_shift)) ############################################################################### # **(3)** upper bound (max) and brute_step are specified: # range = (max - Ns * brute_step, max, brute_step) par_name = 'omega' indx_shift = result_brute.var_names.index(par_name) grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel()) print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name, len(grid_shift), grid_shift)) ############################################################################### # **(4)** numerical value (value) and brute_step are specified: # range = (value - (Ns//2) * brute_step, value + (Ns//2) * brute_step, brute_step) par_name = 'decay' indx_shift = result_brute.var_names.index(par_name) grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel()) print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name, len(grid_shift), grid_shift)) ############################################################################### # The ``MinimizerResult`` contains all the usual best-fit parameters and # fitting statistics. For example, the optimal solution from the grid search # is given below together with a plot: print(fit_report(result_brute)) plt.plot(x, data, 'b') plt.plot(x, data + fcn2min(result_brute.params, x, data), 'r--') ############################################################################### # We can see that this fit is already very good, which is what we should expect # since our ``brute`` force grid is sampled rather finely and encompasses the # "correct" values. # # In a more realistic, complicated example the ``brute`` method will be used # to get reasonable values for the parameters and perform another minimization # (e.g., using ``leastsq``) using those as starting values. That is where the # `keep`` parameter comes into play: it determines the "number of best # candidates from the brute force method that are stored in the ``candidates`` # attribute". In the example above we store the best-ranking 25 solutions (the # default value is ``50`` and storing all the grid points can be accomplished # by choosing ``all``). The ``candidates`` attribute contains the parameters # and ``chisqr`` from the brute force method as a namedtuple, # ``(‘Candidate’, [‘params’, ‘score’])``, sorted on the (lowest) ``chisqr`` # value. To access the values for a particular candidate one can use # ``result.candidate[#].params`` or ``result.candidate[#].score``, where a # lower # represents a better candidate. The ``show_candidates(#)`` uses the # ``pretty_print()`` method to show a specific candidate-# or all candidates # when no number is specified. # # The optimal fit is, as usual, stored in the ``MinimizerResult.params`` # attribute and is, therefore, identical to ``result_brute.show_candidates(1)``. result_brute.show_candidates(1) ############################################################################### # In this case, the next-best scoring candidate has already a ``chisqr`` that # increased quite a bit: result_brute.show_candidates(2) ############################################################################### # and is, therefore, probably not so likely... However, as said above, in most # cases you'll want to do another minimization using the solutions from the # ``brute`` method as starting values. That can be easily accomplished as # shown in the code below, where we now perform a ``leastsq`` minimization # starting from the top-25 solutions and accept the solution if the ``chisqr`` # is lower than the previously 'optimal' solution: best_result = copy.deepcopy(result_brute) for candidate in result_brute.candidates: trial = fitter.minimize(method='leastsq', params=candidate.params) if trial.chisqr < best_result.chisqr: best_result = trial ############################################################################### # From the ``leastsq`` minimization we obtain the following parameters for the # most optimal result: print(fit_report(best_result)) ############################################################################### # As expected the parameters have not changed significantly as they were # already very close to the "real" values, which can also be appreciated from # the plots below. plt.plot(x, data, 'b') plt.plot(x, data + fcn2min(result_brute.params, x, data), 'r--', label='brute') plt.plot(x, data + fcn2min(best_result.params, x, data), 'g--', label='brute followed by leastsq') plt.legend() ############################################################################### # Finally, the results from the ``brute`` force grid-search can be visualized # using the rather lengthy Python function below (which might get incorporated # in lmfit at some point). def plot_results_brute(result, best_vals=True, varlabels=None, output=None): """Visualize the result of the brute force grid search. The output file will display the chi-square value per parameter and contour plots for all combination of two parameters. Inspired by the `corner` package (https://github.com/dfm/corner.py). Parameters ---------- result : :class:`~lmfit.minimizer.MinimizerResult` Contains the results from the :meth:`brute` method. best_vals : bool, optional Whether to show the best values from the grid search (default is True). varlabels : list, optional If None (default), use `result.var_names` as axis labels, otherwise use the names specified in `varlabels`. output : str, optional Name of the output PDF file (default is 'None') """ from matplotlib.colors import LogNorm npars = len(result.var_names) fig, axes = plt.subplots(npars, npars) if not varlabels: varlabels = result.var_names if best_vals and isinstance(best_vals, bool): best_vals = result.params for i, par1 in enumerate(result.var_names): for j, par2 in enumerate(result.var_names): # parameter vs chi2 in case of only one parameter if npars == 1: axes.plot(result.brute_grid, result.brute_Jout, 'o', ms=3) axes.set_ylabel(r'$\chi^{2}$') axes.set_xlabel(varlabels[i]) if best_vals: axes.axvline(best_vals[par1].value, ls='dashed', color='r') # parameter vs chi2 profile on top elif i == j and j < npars-1: if i == 0: axes[0, 0].axis('off') ax = axes[i, j+1] red_axis = tuple([a for a in range(npars) if a != i]) ax.plot(np.unique(result.brute_grid[i]), np.minimum.reduce(result.brute_Jout, axis=red_axis), 'o', ms=3) ax.set_ylabel(r'$\chi^{2}$') ax.yaxis.set_label_position("right") ax.yaxis.set_ticks_position('right') ax.set_xticks([]) if best_vals: ax.axvline(best_vals[par1].value, ls='dashed', color='r') # parameter vs chi2 profile on the left elif j == 0 and i > 0: ax = axes[i, j] red_axis = tuple([a for a in range(npars) if a != i]) ax.plot(np.minimum.reduce(result.brute_Jout, axis=red_axis), np.unique(result.brute_grid[i]), 'o', ms=3) ax.invert_xaxis() ax.set_ylabel(varlabels[i]) if i != npars-1: ax.set_xticks([]) elif i == npars-1: ax.set_xlabel(r'$\chi^{2}$') if best_vals: ax.axhline(best_vals[par1].value, ls='dashed', color='r') # contour plots for all combinations of two parameters elif j > i: ax = axes[j, i+1] red_axis = tuple([a for a in range(npars) if a != i and a != j]) X, Y = np.meshgrid(np.unique(result.brute_grid[i]), np.unique(result.brute_grid[j])) lvls1 = np.linspace(result.brute_Jout.min(), np.median(result.brute_Jout)/2.0, 7, dtype='int') lvls2 = np.linspace(np.median(result.brute_Jout)/2.0, np.median(result.brute_Jout), 3, dtype='int') lvls = np.unique(np.concatenate((lvls1, lvls2))) ax.contourf(X.T, Y.T, np.minimum.reduce(result.brute_Jout, axis=red_axis), lvls, norm=LogNorm()) ax.set_yticks([]) if best_vals: ax.axvline(best_vals[par1].value, ls='dashed', color='r') ax.axhline(best_vals[par2].value, ls='dashed', color='r') ax.plot(best_vals[par1].value, best_vals[par2].value, 'rs', ms=3) if j != npars-1: ax.set_xticks([]) elif j == npars-1: ax.set_xlabel(varlabels[i]) if j - i >= 2: axes[i, j].axis('off') if output is not None: plt.savefig(output) ############################################################################### # and finally, to generated the figure: plot_results_brute(result_brute, best_vals=True, varlabels=None) lmfit-py-1.0.0/examples/example_complex_resonator_model.py000066400000000000000000000112351357751001700241070ustar00rootroot00000000000000""" Complex Resonator Model ======================= This notebook shows how to fit the parameters of a complex resonator, using `lmfit.Model` and defining a custom `Model` class. Following Khalil et al. (https://arxiv.org/abs/1108.3117), we can model the forward transmission of a microwave resonator with total quality factor :math:`Q`, coupling quality factor :math:`Q_e`, and resonant frequency :math:`f_0` using: .. math:: S_{21}(f) = 1 - \\frac{Q Q_e^{-1}}{1+2jQ(f-f_0)/f_0} :math:`S_{21}` is thus a complex function of a real frequency. By allowing :math:`Q_e` to be complex, this model can take into account mismatches in the input and output transmission impedances. """ import matplotlib.pyplot as plt import numpy as np import lmfit ############################################################################### # Since ``scipy.optimize`` and ``lmfit`` require real parameters, we represent # :math:`Q_e` as ``Q_e_real + 1j*Q_e_imag``. def linear_resonator(f, f_0, Q, Q_e_real, Q_e_imag): Q_e = Q_e_real + 1j*Q_e_imag return (1 - (Q * Q_e**-1 / (1 + 2j * Q * (f - f_0) / f_0))) ############################################################################### # The standard practice of defining an ``lmfit`` model is as follows: class ResonatorModel(lmfit.model.Model): __doc__ = "resonator model" + lmfit.models.COMMON_DOC def __init__(self, *args, **kwargs): # pass in the defining equation so the user doesn't have to later. super().__init__(linear_resonator, *args, **kwargs) self.set_param_hint('Q', min=0) # Enforce Q is positive def guess(self, data, f=None, **kwargs): verbose = kwargs.pop('verbose', None) if f is None: return argmin_s21 = np.abs(data).argmin() fmin = f.min() fmax = f.max() f_0_guess = f[argmin_s21] # guess that the resonance is the lowest point Q_min = 0.1 * (f_0_guess/(fmax-fmin)) # assume the user isn't trying to fit just a small part of a resonance curve. delta_f = np.diff(f) # assume f is sorted min_delta_f = delta_f[delta_f > 0].min() Q_max = f_0_guess/min_delta_f # assume data actually samples the resonance reasonably Q_guess = np.sqrt(Q_min*Q_max) # geometric mean, why not? Q_e_real_guess = Q_guess/(1-np.abs(data[argmin_s21])) if verbose: print("fmin=", fmin, "fmax=", fmax, "f_0_guess=", f_0_guess) print("Qmin=", Q_min, "Q_max=", Q_max, "Q_guess=", Q_guess, "Q_e_real_guess=", Q_e_real_guess) params = self.make_params(Q=Q_guess, Q_e_real=Q_e_real_guess, Q_e_imag=0, f_0=f_0_guess) params['%sQ' % self.prefix].set(min=Q_min, max=Q_max) params['%sf_0' % self.prefix].set(min=fmin, max=fmax) return lmfit.models.update_param_vals(params, self.prefix, **kwargs) ############################################################################### # Now let's use the model to generate some fake data: resonator = ResonatorModel() true_params = resonator.make_params(f_0=100, Q=10000, Q_e_real=9000, Q_e_imag=-9000) f = np.linspace(99.95, 100.05, 100) true_s21 = resonator.eval(params=true_params, f=f) noise_scale = 0.02 np.random.seed(123) measured_s21 = true_s21 + noise_scale*(np.random.randn(100) + 1j*np.random.randn(100)) plt.figure() plt.plot(f, 20*np.log10(np.abs(measured_s21))) plt.ylabel('|S21| (dB)') plt.xlabel('MHz') plt.title('simulated measurement') ############################################################################### # Try out the guess method we added: guess = resonator.guess(measured_s21, f=f, verbose=True) ############################################################################## # And now fit the data using the guess as a starting point: result = resonator.fit(measured_s21, params=guess, f=f, verbose=True) print(result.fit_report() + '\n') result.params.pretty_print() ############################################################################### # Now we'll make some plots of the data and fit. Define a convenience function # for plotting complex quantities: def plot_ri(data, *args, **kwargs): plt.plot(data.real, data.imag, *args, **kwargs) fit_s21 = resonator.eval(params=result.params, f=f) guess_s21 = resonator.eval(params=guess, f=f) plt.figure() plot_ri(measured_s21, '.') plot_ri(fit_s21, 'r.-', label='best fit') plot_ri(guess_s21, 'k--', label='inital fit') plt.legend(loc='best') plt.xlabel('Re(S21)') plt.ylabel('Im(S21)') plt.figure() plt.plot(f, 20*np.log10(np.abs(measured_s21)), '.') plt.plot(f, 20*np.log10(np.abs(fit_s21)), 'r.-', label='best fit') plt.plot(f, 20*np.log10(np.abs(guess_s21)), 'k--', label='initial fit') plt.legend(loc='best') plt.ylabel('|S21| (dB)') plt.xlabel('MHz') lmfit-py-1.0.0/examples/example_confidence_interval.py000066400000000000000000000077231357751001700231740ustar00rootroot00000000000000""" Calculate Confidence Intervals ============================== """ import matplotlib.pyplot as plt from numpy import argsort, exp, linspace, pi, random, sign, sin, unique from scipy.interpolate import interp1d from lmfit import (Minimizer, Parameters, conf_interval, conf_interval2d, report_ci, report_fit) ############################################################################### # Define the residual function, specify "true" parameter values, and generate # a synthetic data set with some noise: def residual(pars, x, data=None): argu = (x*pars['decay'])**2 shift = pars['shift'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = pars['amp']*sin(shift + x/pars['period']) * exp(-argu) if data is None: return model return model - data p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) x = linspace(0.0, 250.0, 2500) noise = random.normal(scale=0.7215, size=x.size) data = residual(p_true, x) + noise ############################################################################### # Create fitting parameters and set initial values: fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) ############################################################################### # Set-up the minimizer and perform the fit using leastsq algorithm, and show # the report: mini = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data}) out = mini.leastsq() fit = residual(out.params, x) report_fit(out) ############################################################################### # Calculate the confidence intervals for parameters and display the results: ci, tr = conf_interval(mini, out, trace=True) report_ci(ci) names = out.params.keys() i = 0 gs = plt.GridSpec(4, 4) sx = {} sy = {} for fixed in names: j = 0 for free in names: if j in sx and i in sy: ax = plt.subplot(gs[i, j], sharex=sx[j], sharey=sy[i]) elif i in sy: ax = plt.subplot(gs[i, j], sharey=sy[i]) sx[j] = ax elif j in sx: ax = plt.subplot(gs[i, j], sharex=sx[j]) sy[i] = ax else: ax = plt.subplot(gs[i, j]) sy[i] = ax sx[j] = ax if i < 3: plt.setp(ax.get_xticklabels(), visible=False) else: ax.set_xlabel(free) if j > 0: plt.setp(ax.get_yticklabels(), visible=False) else: ax.set_ylabel(fixed) res = tr[fixed] prob = res['prob'] f = prob < 0.96 x, y = res[free], res[fixed] ax.scatter(x[f], y[f], c=1-prob[f], s=200*(1-prob[f]+0.5)) ax.autoscale(1, 1) j += 1 i += 1 ############################################################################### # It is also possible to calculate the confidence regions for two fixed # parameters using the function ``conf_interval2d``: names = list(out.params.keys()) plt.figure() cm = plt.cm.coolwarm for i in range(4): for j in range(4): plt.subplot(4, 4, 16-j*4-i) if i != j: x, y, m = conf_interval2d(mini, out, names[i], names[j], 20, 20) plt.contourf(x, y, m, linspace(0, 1, 10), cmap=cm) plt.xlabel(names[i]) plt.ylabel(names[j]) x = tr[names[i]][names[i]] y = tr[names[i]][names[j]] pr = tr[names[i]]['prob'] s = argsort(x) plt.scatter(x[s], y[s], c=pr[s], s=30, lw=1, cmap=cm) else: x = tr[names[i]][names[i]] y = tr[names[i]]['prob'] t, s = unique(x, True) f = interp1d(t, y[s], 'slinear') xn = linspace(x.min(), x.max(), 50) plt.plot(xn, f(xn), 'g', lw=1) plt.xlabel(names[i]) plt.ylabel('prob') plt.show() lmfit-py-1.0.0/examples/example_detect_outliers.py000066400000000000000000000062731357751001700223700ustar00rootroot00000000000000""" Outlier detection via leave-one-out =================================== Outliers can sometimes be identified by assessing the influence of each datapoint. To assess the influence of one point, we fit the dataset while the point and compare the result with the fit of the full dataset. The code below shows how to do this with lmfit. Note that the presented method is very basic. """ from collections import defaultdict import matplotlib.pyplot as plt import numpy as np import lmfit plt.rcParams['figure.dpi'] = 130 plt.rcParams['figure.autolayout'] = True ############################################################################### # Generate test data and model. Apply the model to the data x = np.linspace(0.3, 10, 100) np.random.seed(1) y = 1.0 / (0.1 * x) + 2.0 + 3 * np.random.randn(x.size) params = lmfit.Parameters() params.add_many(('a', 0.1), ('b', 1)) def func(x, a, b): return 1.0 / (a * x) + b # Make 5 points outliers idx = np.random.randint(0, x.size, 5) y[idx] += 10 * np.random.randn(idx.size) # Fit the data model = lmfit.Model(func, independent_vars=['x']) fit_result = model.fit(y, x=x, a=0.1, b=2) ############################################################################### # and gives the plot and fitting results below: fit_result.plot_fit() plt.plot(x[idx], y[idx], 'o', color='r', label='outliers') plt.show() print(fit_result.fit_report()) ############################################################################### # Fit the dataset while omitting one data point best_vals = defaultdict(lambda: np.zeros(x.size)) stderrs = defaultdict(lambda: np.zeros(x.size)) chi_sq = np.zeros_like(x) for i in range(x.size): idx2 = np.arange(0, x.size) idx2 = np.delete(idx2, i) tmp_x = x[idx2] tmp = model.fit(y[idx2], x=tmp_x, a=fit_result.params['a'], b=fit_result.params['b']) chi_sq[i] = tmp.chisqr for p in tmp.params: tpar = tmp.params[p] best_vals[p][i] = tpar.value stderrs[p][i] = (tpar.stderr / fit_result.params[p].stderr) ############################################################################### # Plot the influence on the red. chisqr of each point fig, ax = plt.subplots() ax.plot(x, (fit_result.chisqr - chi_sq) / chi_sq) ax.scatter(x[idx], fit_result.chisqr / chi_sq[idx] - 1, color='r', label='outlier') ax.set_ylabel(r'Relative red. $\chi^2$ change') ax.set_xlabel('x') ax.legend() ############################################################################### # Plot the influence on the parameter value and error of each point fig, axs = plt.subplots(4, figsize=(4, 7), sharex='col') axs[0].plot(x, best_vals['a']) axs[0].scatter(x[idx], best_vals['a'][idx], color='r', label='outlier') axs[0].set_ylabel('best a') axs[1].plot(x, best_vals['b']) axs[1].scatter(x[idx], best_vals['b'][idx], color='r', label='outlier') axs[1].set_ylabel('best b') axs[2].plot(x, stderrs['a']) axs[2].scatter(x[idx], stderrs['a'][idx], color='r', label='outlier') axs[2].set_ylabel('err a change') axs[3].plot(x, stderrs['b']) axs[3].scatter(x[idx], stderrs['b'][idx], color='r', label='outlier') axs[3].set_ylabel('err b change') axs[3].set_xlabel('x') lmfit-py-1.0.0/examples/example_diffev.py000066400000000000000000000025521357751001700204310ustar00rootroot00000000000000""" Fit Using differential_evolution Algorithm ========================================== This example compares the "leastsq" and "differential_evolution" algorithms on a fairly simple problem. """ import matplotlib.pyplot as plt import numpy as np import lmfit np.random.seed(2) x = np.linspace(0, 10, 101) # Setup example decay = 5 offset = 1.0 amp = 2.0 omega = 4.0 y = offset + amp*np.sin(omega*x) * np.exp(-x/decay) yn = y + np.random.normal(size=y.size, scale=0.450) def resid(params, x, ydata): decay = params['decay'].value offset = params['offset'].value omega = params['omega'].value amp = params['amp'].value y_model = offset + amp * np.sin(x*omega) * np.exp(-x/decay) return y_model - ydata params = lmfit.Parameters() params.add('offset', 2.0, min=0, max=10.0) params.add('omega', 3.3, min=0, max=10.0) params.add('amp', 2.5, min=0, max=10.0) params.add('decay', 1.0, min=0, max=10.0) o1 = lmfit.minimize(resid, params, args=(x, yn), method='leastsq') print("# Fit using leastsq:") lmfit.report_fit(o1) o2 = lmfit.minimize(resid, params, args=(x, yn), method='differential_evolution') print("\n\n# Fit using differential_evolution:") lmfit.report_fit(o2) plt.plot(x, yn, 'ko', lw=2) plt.plot(x, yn+o1.residual, 'r-', lw=2) plt.plot(x, yn+o2.residual, 'b--', lw=2) plt.legend(['data', 'leastsq', 'diffev'], loc='upper left') plt.show() lmfit-py-1.0.0/examples/example_emcee_Model_interface.py000066400000000000000000000074121357751001700234040ustar00rootroot00000000000000""" Emcee and the Model Interface ============================= """ import corner import matplotlib.pyplot as plt import numpy as np import lmfit ############################################################################### # Set up a double-exponential function and create a Model def double_exp(x, a1, t1, a2, t2): return a1*np.exp(-x/t1) + a2*np.exp(-(x-0.1) / t2) model = lmfit.Model(double_exp) ############################################################################### # Generate some fake data from the model with added noise truths = (3.0, 2.0, -5.0, 10.0) x = np.linspace(1, 10, 250) np.random.seed(0) y = double_exp(x, *truths)+0.1*np.random.randn(x.size) ############################################################################### # Create model parameters and give them initial values p = model.make_params(a1=4, t1=3, a2=4, t2=3) ############################################################################### # Fit the model using a traditional minimizer, and show the output: result = model.fit(data=y, params=p, x=x, method='Nelder', nan_policy='omit') lmfit.report_fit(result) result.plot() ############################################################################### # Calculate parameter covariance using emcee: # # - start the walkers out at the best-fit values # - set is_weighted to False to estimate the noise weights # - set some sensible priors on the uncertainty to keep the MCMC in check # emcee_kws = dict(steps=1000, burn=300, thin=20, is_weighted=False, progress=False) emcee_params = result.params.copy() emcee_params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2.0)) ############################################################################### # run the MCMC algorithm and show the results: result_emcee = model.fit(data=y, x=x, params=emcee_params, method='emcee', nan_policy='omit', fit_kws=emcee_kws) lmfit.report_fit(result_emcee) ax = plt.plot(x, model.eval(params=result.params, x=x), label='Nelder', zorder=100) result_emcee.plot_fit(ax=ax, data_kws=dict(color='gray', markersize=2)) plt.show() ############################################################################### # check the acceptance fraction to see whether emcee performed well plt.plot(result_emcee.acceptance_fraction) plt.xlabel('walker') plt.ylabel('acceptance fraction') plt.show() ############################################################################### # try to compute the autocorrelation time if hasattr(result_emcee, "acor"): print("Autocorrelation time for the parameters:") print("----------------------------------------") for i, p in enumerate(result.params): print(p, result.acor[i]) ############################################################################### # Plot the parameter covariances returned by emcee using corner emcee_corner = corner.corner(result_emcee.flatchain, labels=result_emcee.var_names, truths=list(result_emcee.params.valuesdict().values())) ############################################################################### # print("\nmedian of posterior probability distribution") print('--------------------------------------------') lmfit.report_fit(result_emcee.params) # find the maximum likelihood solution highest_prob = np.argmax(result_emcee.lnprob) hp_loc = np.unravel_index(highest_prob, result_emcee.lnprob.shape) mle_soln = result_emcee.chain[hp_loc] print("\nMaximum likelihood Estimation") print('-----------------------------') for ix, param in enumerate(emcee_params): print(param + ': ' + str(mle_soln[ix])) quantiles = np.percentile(result_emcee.flatchain['t1'], [2.28, 15.9, 50, 84.2, 97.7]) print("\n\n1 sigma spread", 0.5 * (quantiles[3] - quantiles[1])) print("2 sigma spread", 0.5 * (quantiles[4] - quantiles[0])) lmfit-py-1.0.0/examples/example_expression_model.py000066400000000000000000000022501357751001700225400ustar00rootroot00000000000000""" Using an ExpressionModel ======================== ExpressionModels allow a model to be built from a user-supplied expression. See: https://lmfit.github.io/lmfit-py/builtin_models.html#user-defined-models """ import matplotlib.pyplot as plt import numpy as np from lmfit.models import ExpressionModel ############################################################################### # Generate synthetic data for the user-supplied model: x = np.linspace(-10, 10, 201) amp, cen, wid = 3.4, 1.8, 0.5 y = amp * np.exp(-(x-cen)**2 / (2*wid**2)) / (np.sqrt(2*np.pi)*wid) y = y + np.random.normal(size=x.size, scale=0.01) ############################################################################### # Define the ExpressionModel and perform the fit: gmod = ExpressionModel("amp * exp(-(x-cen)**2 /(2*wid**2))/(sqrt(2*pi)*wid)") result = gmod.fit(y, x=x, amp=5, cen=5, wid=1) ############################################################################### # this results in the following output: print(result.fit_report()) plt.plot(x, y, 'bo') plt.plot(x, result.init_fit, 'k--', label='initial fit') plt.plot(x, result.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() lmfit-py-1.0.0/examples/example_fit_multi_datasets.py000066400000000000000000000052761357751001700230600ustar00rootroot00000000000000""" Fit Multiple Data Sets ====================== Fitting multiple (simulated) Gaussian data sets simultaneously. All minimizers require the residual array to be one-dimensional. Therefore, in the ``objective`` we need to ```flatten``` the array before returning it. TODO: this should be using the Model interface / built-in models! """ import matplotlib.pyplot as plt import numpy as np from lmfit import Parameters, minimize, report_fit def gauss(x, amp, cen, sigma): """Gaussian lineshape.""" return amp * np.exp(-(x-cen)**2 / (2.*sigma**2)) def gauss_dataset(params, i, x): """Calculate Gaussian lineshape from parameters for data set.""" amp = params['amp_%i' % (i+1)] cen = params['cen_%i' % (i+1)] sig = params['sig_%i' % (i+1)] return gauss(x, amp, cen, sig) def objective(params, x, data): """Calculate total residual for fits of Gaussians to several data sets.""" ndata, _ = data.shape resid = 0.0*data[:] # make residual per data set for i in range(ndata): resid[i, :] = data[i, :] - gauss_dataset(params, i, x) # now flatten this to a 1D array, as minimize() needs return resid.flatten() ############################################################################### # Create five simulated Gaussian data sets x = np.linspace(-1, 2, 151) data = [] for i in np.arange(5): params = Parameters() amp = 0.60 + 9.50*np.random.rand() cen = -0.20 + 1.20*np.random.rand() sig = 0.25 + 0.03*np.random.rand() dat = gauss(x, amp, cen, sig) + np.random.normal(size=x.size, scale=0.1) data.append(dat) data = np.array(data) ############################################################################### # Create five sets of fitting parameters, one per data set fit_params = Parameters() for iy, y in enumerate(data): fit_params.add('amp_%i' % (iy+1), value=0.5, min=0.0, max=200) fit_params.add('cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0) fit_params.add('sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0) ############################################################################### # Constrain the values of sigma to be the same for all peaks by assigning # sig_2, ..., sig_5 to be equal to sig_1. for iy in (2, 3, 4, 5): fit_params['sig_%i' % iy].expr = 'sig_1' ############################################################################### # Run the global fit and show the fitting result out = minimize(objective, fit_params, args=(x, data)) report_fit(out.params) ############################################################################### # Plot the data sets and fits plt.figure() for i in range(5): y_fit = gauss_dataset(out.params, i, x) plt.plot(x, data[i, :], 'o', x, y_fit, '-') plt.show() lmfit-py-1.0.0/examples/example_fit_with_algebraic_constraint.py000066400000000000000000000032151357751001700252350ustar00rootroot00000000000000""" Fit with Algebraic Constraint ============================= """ import matplotlib.pyplot as plt from numpy import linspace, random from lmfit import Minimizer, Parameters from lmfit.lineshapes import gaussian, lorentzian from lmfit.printfuncs import report_fit def residual(pars, x, sigma=None, data=None): yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g']) yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l']) slope = pars['line_slope'] offset = pars['line_off'] model = yg + yl + offset + x*slope if data is None: return model if sigma is None: return model - data return (model - data) / sigma random.seed(0) x = linspace(0.0, 20.0, 601) data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) + random.normal(scale=0.23, size=x.size) + x*0.5) pfit = Parameters() pfit.add(name='amp_g', value=10) pfit.add(name='cen_g', value=9) pfit.add(name='wid_g', value=1) pfit.add(name='amp_tot', value=20) pfit.add(name='amp_l', expr='amp_tot - amp_g') pfit.add(name='cen_l', expr='1.5+cen_g') pfit.add(name='wid_l', expr='2*wid_g') pfit.add(name='line_slope', value=0.0) pfit.add(name='line_off', value=0.0) sigma = 0.021 # estimate of data error (for all data points) myfit = Minimizer(residual, pfit, fcn_args=(x,), fcn_kws={'sigma': sigma, 'data': data}, scale_covar=True) result = myfit.leastsq() init = residual(pfit, x) fit = residual(result.params, x) report_fit(result) plt.plot(x, data, 'r+') plt.plot(x, init, 'b--', label='initial fit') plt.plot(x, fit, 'k-', label='best fit') plt.legend(loc='best') plt.show() lmfit-py-1.0.0/examples/example_fit_with_bounds.py000066400000000000000000000034701357751001700223550ustar00rootroot00000000000000""" Fit Using Bounds ================ A major advantage of using lmfit is that one can specify boundaries on fitting parameters, even if the underlying algorithm in SciPy does not support this. For more information on how this is implemented, please refer to: https://lmfit.github.io/lmfit-py/bounds.html The example below shows how to set boundaries using the ``min`` and ``max`` attributes to fitting parameters. """ import matplotlib.pyplot as plt from numpy import exp, linspace, pi, random, sign, sin from lmfit import Parameters, minimize from lmfit.printfuncs import report_fit p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.4321) p_true.add('shift', value=0.12345) p_true.add('decay', value=0.01000) def residual(pars, x, data=None): argu = (x * pars['decay'])**2 shift = pars['shift'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = pars['amp'] * sin(shift + x/pars['period']) * exp(-argu) if data is None: return model return model - data random.seed(0) x = linspace(0, 250, 1500) noise = random.normal(scale=2.80, size=x.size) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0, max=20, min=0.0) fit_params.add('period', value=2, max=10) fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.) fit_params.add('decay', value=0.02, max=0.10, min=0.00) out = minimize(residual, fit_params, args=(x,), kws={'data': data}) fit = residual(out.params, x) ############################################################################### # This gives the following fitting results: report_fit(out, show_correl=True, modelpars=p_true) ############################################################################### # and shows the plot below: # plt.plot(x, data, 'ro') plt.plot(x, fit, 'b') plt.show() lmfit-py-1.0.0/examples/example_fit_with_derivfunc.py000066400000000000000000000044471357751001700230550ustar00rootroot00000000000000""" Fit Specifying a Function to Compute the Jacobian ================================================= Specifying an analytical function to calculate the Jacobian can speed-up the fitting procedure. """ import matplotlib.pyplot as plt import numpy as np from lmfit import Minimizer, Parameters def func(pars, x, data=None): a, b, c = pars['a'], pars['b'], pars['c'] model = a * np.exp(-b*x) + c if data is None: return model return model - data def dfunc(pars, x, data=None): a, b = pars['a'], pars['b'] v = np.exp(-b*x) return np.array([v, -a*x*v, np.ones(len(x))]) def f(var, x): return var[0] * np.exp(-var[1]*x) + var[2] params = Parameters() params.add('a', value=10) params.add('b', value=10) params.add('c', value=10) a, b, c = 2.5, 1.3, 0.8 x = np.linspace(0, 4, 50) y = f([a, b, c], x) data = y + 0.15*np.random.normal(size=x.size) # fit without analytic derivative min1 = Minimizer(func, params, fcn_args=(x,), fcn_kws={'data': data}) out1 = min1.leastsq() fit1 = func(out1.params, x) # fit with analytic derivative min2 = Minimizer(func, params, fcn_args=(x,), fcn_kws={'data': data}) out2 = min2.leastsq(Dfun=dfunc, col_deriv=1) fit2 = func(out2.params, x) ############################################################################### # Comparison of fit to exponential decay with/without analytical derivatives # to model = a*exp(-b*x) + c print(''' "true" parameters are: a = %.3f, b = %.3f, c = %.3f ============================================== Statistic/Parameter| Without | With | ---------------------------------------------- N Function Calls | %3i | %3i | Chi-square | %.4f | %.4f | a | %.4f | %.4f | b | %.4f | %.4f | c | %.4f | %.4f | ---------------------------------------------- ''' % (a, b, c, out1.nfev, out2.nfev, out1.chisqr, out2.chisqr, out1.params['a'], out2.params['a'], out1.params['b'], out2.params['b'], out1.params['c'], out2.params['c'])) ############################################################################### # and the best-fit to the synthetic data (with added noise) is the same for # both methods: plt.plot(x, data, 'ro') plt.plot(x, fit1, 'b') plt.plot(x, fit2, 'k') plt.show() lmfit-py-1.0.0/examples/example_fit_with_inequality.py000066400000000000000000000043401357751001700232440ustar00rootroot00000000000000""" Fit Using Inequality Constraint =============================== Sometimes specifying boundaries using ``min`` and ``max`` are not sufficient, and more complicated (inequality) constraints are needed. In the example below the center of the Lorentzian peak is constrained to be between 0-5 away from the center of the Gaussian peak. See also: https://lmfit.github.io/lmfit-py/constraints.html#using-inequality-constraints """ import matplotlib.pyplot as plt import numpy as np from lmfit import Minimizer, Parameters, report_fit from lmfit.lineshapes import gaussian, lorentzian def residual(pars, x, data): model = (gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g']) + lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])) return model - data ############################################################################### # Generate the simulated data using a Gaussian and Lorentzian line shape: np.random.seed(0) x = np.linspace(0, 20.0, 601) data = (gaussian(x, 21, 6.1, 1.2) + lorentzian(x, 10, 9.6, 1.3) + np.random.normal(scale=0.1, size=x.size)) ############################################################################### # Create the fitting parameters and set an inequality constraint for ``cen_l``. # First, we add a new fitting parameter ``peak_split``, which can take values # between 0 and 5. Afterwards, we constrain the value for ``cen_l`` using the # expression to be ``'peak_split+cen_g'``: pfit = Parameters() pfit.add(name='amp_g', value=10) pfit.add(name='amp_l', value=10) pfit.add(name='cen_g', value=5) pfit.add(name='peak_split', value=2.5, min=0, max=5, vary=True) pfit.add(name='cen_l', expr='peak_split+cen_g') pfit.add(name='wid_g', value=1) pfit.add(name='wid_l', expr='wid_g') mini = Minimizer(residual, pfit, fcn_args=(x, data)) out = mini.leastsq() best_fit = data + out.residual ############################################################################### # Performing a fit, here using the ``leastsq`` algorithm, gives the following # fitting results: report_fit(out.params) ############################################################################### # and figure: plt.plot(x, data, 'bo') plt.plot(x, best_fit, 'r--', label='best fit') plt.legend(loc='best') plt.show() lmfit-py-1.0.0/examples/example_reduce_fcn.py000066400000000000000000000037021357751001700212610ustar00rootroot00000000000000""" Fit Specifying Different Reduce Function ======================================== The reduce_fcn specifies how to convert a residual array to a scalar value for the scalar minimizers. The default value is None (i.e., "sum of squares of residual") - alternatives are: 'negentropy' and 'neglogcauchy' or a user-specified "callable". For more information please refer to: https://lmfit.github.io/lmfit-py/fitting.html#using-the-minimizer-class Here, we use as an example the Student's t log-likelihood for robust fitting of data with outliers. """ import matplotlib.pyplot as plt import numpy as np import lmfit np.random.seed(2) x = np.linspace(0, 10, 101) # Setup example decay = 5 offset = 1.0 amp = 2.0 omega = 4.0 y = offset + amp * np.sin(omega*x) * np.exp(-x/decay) yn = y + np.random.normal(size=y.size, scale=0.250) outliers = np.random.randint(int(len(x)/3.0), len(x), int(len(x)/12)) yn[outliers] += 5*np.random.random(len(outliers)) def resid(params, x, ydata): decay = params['decay'].value offset = params['offset'].value omega = params['omega'].value amp = params['amp'].value y_model = offset + amp * np.sin(x*omega) * np.exp(-x/decay) return y_model - ydata params = lmfit.Parameters() params.add('offset', 2.0) params.add('omega', 3.3) params.add('amp', 2.5) params.add('decay', 1.0, min=0) method = 'L-BFGS-B' o1 = lmfit.minimize(resid, params, args=(x, yn), method=method) print("# Fit using sum of squares:\n") lmfit.report_fit(o1) o2 = lmfit.minimize(resid, params, args=(x, yn), method=method, reduce_fcn='neglogcauchy') print("\n\n# Robust Fit, using log-likelihood with Cauchy PDF:\n") lmfit.report_fit(o2) plt.plot(x, y, 'ko', lw=2) plt.plot(x, yn, 'k--*', lw=1) plt.plot(x, yn+o1.residual, 'r-', lw=2) plt.plot(x, yn+o2.residual, 'b-', lw=2) plt.legend(['True function', 'with noise+outliers', 'sum of squares fit', 'robust fit'], loc='upper left') plt.show() lmfit-py-1.0.0/examples/example_use_pandas.py000066400000000000000000000014411357751001700213040ustar00rootroot00000000000000""" Fit with Data in a pandas DataFrame =================================== Simple example demonstrating how to read in the data using pandas and supply the elements of the DataFrame from lmfit. """ import matplotlib.pyplot as plt import pandas as pd from lmfit.models import LorentzianModel ############################################################################### # read the data into a pandas DataFrame, and use the 'x' and 'y' columns: dframe = pd.read_csv('peak.csv') model = LorentzianModel() params = model.guess(dframe['y'], x=dframe['x']) result = model.fit(dframe['y'], params, x=dframe['x']) ############################################################################### # and gives the plot and fitting results below: result.plot_fit() plt.show() print(result.fit_report()) lmfit-py-1.0.0/examples/lmfit_emcee_model_selection.py000066400000000000000000000171571357751001700231600ustar00rootroot00000000000000""" Model Selection using lmfit and emcee ===================================== FIXME: this is a useful examples; however, it doesn't run correctly anymore as the PTSampler was removed in emcee v3... """ ############################################################################### # `lmfit.emcee` can be used to obtain the posterior probability distribution # of parameters, given a set of experimental data. This notebook shows how it # can be used for Bayesian model selection. import matplotlib.pyplot as plt import numpy as np import lmfit ############################################################################### # Define a Gaussian lineshape and generate some data: def gauss(x, a_max, loc, sd): return a_max * np.exp(-((x - loc) / sd)**2) x = np.linspace(3, 7, 250) np.random.seed(0) y = 4 + 10 * x + gauss(x, 200, 5, 0.5) + gauss(x, 60, 5.8, 0.2) dy = np.sqrt(y) y += dy * np.random.randn(y.size) ############################################################################### # Plot the data: plt.errorbar(x, y) ############################################################################### # Define the normalised residual for the data: def residual(p, just_generative=False): v = p.valuesdict() generative = v['a'] + v['b'] * x M = 0 while 'a_max%d' % M in v: generative += gauss(x, v['a_max%d' % M], v['loc%d' % M], v['sd%d' % M]) M += 1 if just_generative: return generative return (generative - y) / dy ############################################################################### # Create a Parameter set for the initial guesses: def initial_peak_params(M): p = lmfit.Parameters() # a and b give a linear background a = np.mean(y) b = 1 # a_max, loc and sd are the amplitude, location and SD of each Gaussian # component a_max = np.max(y) loc = np.mean(x) sd = (np.max(x) - np.min(x)) * 0.5 p.add_many(('a', a, True, 0, 10), ('b', b, True, 1, 15)) for i in range(M): p.add_many(('a_max%d' % i, 0.5 * a_max, True, 10, a_max), ('loc%d' % i, loc, True, np.min(x), np.max(x)), ('sd%d' % i, sd, True, 0.1, np.max(x) - np.min(x))) return p ############################################################################### # Solving with `minimize` gives the Maximum Likelihood solution. p1 = initial_peak_params(1) mi1 = lmfit.minimize(residual, p1, method='differential_evolution') lmfit.printfuncs.report_fit(mi1.params, min_correl=0.5) ############################################################################### # From inspection of the data above we can tell that there is going to be more # than 1 Gaussian component, but how many are there? A Bayesian approach can # be used for this model selection problem. We can do this with `lmfit.emcee`, # which uses the `emcee` package to do a Markov Chain Monte Carlo sampling of # the posterior probability distribution. `lmfit.emcee` requires a function # that returns the log-posterior probability. The log-posterior probability is # a sum of the log-prior probability and log-likelihood functions. # # The log-prior probability encodes information about what you already believe # about the system. `lmfit.emcee` assumes that this log-prior probability is # zero if all the parameters are within their bounds and `-np.inf` if any of # the parameters are outside their bounds. As such it's a uniform prior. # # The log-likelihood function is given below. To use non-uniform priors then # should include these terms in `lnprob`. This is the log-likelihood # probability for the sampling. def lnprob(p): resid = residual(p, just_generative=True) return -0.5 * np.sum(((resid - y) / dy)**2 + np.log(2 * np.pi * dy**2)) ############################################################################### # To start with we have to create the minimizers and *burn* them in. We create # 4 different minimizers representing 0, 1, 2 or 3 Gaussian contributions. To # do the model selection we have to integrate the over the log-posterior # distribution to see which has the higher probability. This is done using the # `thermodynamic_integration_log_evidence` method of the `sampler` attribute # contained in the `lmfit.Minimizer` object. # Work out the log-evidence for different numbers of peaks: total_steps = 310 burn = 300 thin = 10 ntemps = 15 workers = 1 # the multiprocessing does not work with sphinx-gallery log_evidence = [] res = [] # set up the Minimizers for i in range(4): p0 = initial_peak_params(i) # you can't use lnprob as a userfcn with minimize because it needs to be # maximised mini = lmfit.Minimizer(residual, p0) out = mini.minimize(method='differential_evolution') res.append(out) mini = [] # burn in the samplers for i in range(4): # do the sampling mini.append(lmfit.Minimizer(lnprob, res[i].params)) out = mini[i].emcee(steps=total_steps, ntemps=ntemps, workers=workers, reuse_sampler=False, float_behavior='posterior', progress=False) # get the evidence print(i, total_steps, mini[i].sampler.thermodynamic_integration_log_evidence()) log_evidence.append(mini[i].sampler.thermodynamic_integration_log_evidence()[0]) ############################################################################### # Once we've burned in the samplers we have to do a collection run. We thin # out the MCMC chain to reduce autocorrelation between successive samples. for j in range(6): total_steps += 100 for i in range(4): # do the sampling res = mini[i].emcee(burn=burn, steps=100, thin=thin, ntemps=ntemps, workers=workers, reuse_sampler=True, progress=False) # get the evidence print(i, total_steps, mini[i].sampler.thermodynamic_integration_log_evidence()) log_evidence.append(mini[i].sampler.thermodynamic_integration_log_evidence()[0]) plt.plot(log_evidence[-4:]) plt.ylabel('Log-evidence') plt.xlabel('number of peaks') ############################################################################### # The Bayes factor is related to the exponential of the difference between the # log-evidence values. Thus, 0 peaks is not very likely compared to 1 peak. # But 1 peak is not as good as 2 peaks. 3 peaks is not that much better than 2 # peaks. r01 = np.exp(log_evidence[-4] - log_evidence[-3]) r12 = np.exp(log_evidence[-3] - log_evidence[-2]) r23 = np.exp(log_evidence[-2] - log_evidence[-1]) print(r01, r12, r23) ############################################################################### # These numbers tell us that zero peaks is 0 times as likely as one peak. Two # peaks is 7e49 times more likely than one peak. Three peaks is 1.1 times more # likely than two peaks. With this data one would say that two peaks is # sufficient. Caution has to be taken with these values. The log-priors for # this sampling are uniform but improper, i.e. they are not normalised properly. # Internally the lnprior probability is calculated as 0 if all parameters are # within their bounds and `-np.inf` if any parameter is outside the bounds. # The `lnprob` function defined above is the log-likelihood alone. Remember, # that the log-posterior probability is equal to the sum of the log-prior and # log-likelihood probabilities. Extra terms can be added to the lnprob function # to calculate the normalised log-probability. These terms would look something # like: # # .. math:: # # \log (\prod_i \frac{1}{max_i - min_i}) # # where :math:`max_i` and :math:`min_i` are the upper and lower bounds for the # parameter, and the prior is a uniform distribution. Other types of prior are # possible. For example, you might expect the prior to be Gaussian. lmfit-py-1.0.0/examples/model1d_gauss.dat000066400000000000000000000045051357751001700203220ustar00rootroot00000000000000#--------------------------------- # col1 col2 0.000000 -0.305196 0.100000 0.004932 0.200000 0.192535 0.300000 0.100639 0.400000 0.244992 0.500000 -0.001095 0.600000 -0.017190 0.700000 -0.138330 0.800000 -0.065546 0.900000 0.150089 1.000000 0.021981 1.100000 0.231610 1.200000 0.186122 1.300000 0.224188 1.400000 0.355904 1.500000 -0.069747 1.600000 0.062342 1.700000 -0.025591 1.800000 0.052080 1.900000 -0.329106 2.000000 -0.012132 2.100000 0.205438 2.200000 0.118093 2.300000 0.018204 2.400000 -0.113374 2.500000 -0.086265 2.600000 -0.074747 2.700000 0.179214 2.800000 0.168398 2.900000 0.067954 3.000000 0.076506 3.100000 0.433768 3.200000 0.019097 3.300000 0.239973 3.400000 0.006607 3.500000 -0.121174 3.600000 0.162577 3.700000 0.042030 3.800000 0.288718 3.900000 0.137440 4.000000 0.593153 4.100000 0.480413 4.200000 0.901715 4.300000 0.868281 4.400000 1.301646 4.500000 1.093022 4.600000 1.531770 4.700000 1.772498 4.800000 2.346719 4.900000 2.716594 5.000000 3.333042 5.100000 3.688503 5.200000 3.821775 5.300000 4.583784 5.400000 4.805664 5.500000 5.125762 5.600000 4.964982 5.700000 4.988856 5.800000 4.854896 5.900000 4.738134 6.000000 4.815129 6.100000 4.070525 6.200000 3.983041 6.300000 3.107054 6.400000 2.841105 6.500000 2.610117 6.600000 2.146078 6.700000 1.683386 6.800000 1.317547 6.900000 0.789538 7.000000 0.585832 7.100000 0.494665 7.200000 0.447038 7.300000 0.441926 7.400000 0.393547 7.500000 -0.033900 7.600000 0.042947 7.700000 -0.116248 7.800000 0.061516 7.900000 0.183615 8.000000 -0.127174 8.100000 0.368512 8.200000 0.194381 8.300000 0.301574 8.400000 0.045097 8.500000 0.110543 8.600000 0.263164 8.700000 0.190722 8.800000 0.425007 8.900000 0.253164 9.000000 0.201519 9.100000 0.132292 9.200000 0.304519 9.300000 0.129096 9.400000 0.269171 9.500000 0.189405 9.600000 0.243728 9.700000 0.411963 9.800000 0.080682 9.900000 0.332672 10.000000 -0.067100 lmfit-py-1.0.0/examples/peak.csv000066400000000000000000000037031357751001700165350ustar00rootroot00000000000000x,y 0.000000, 0.021654 0.200000, 0.385367 0.400000, 0.193304 0.600000, 0.103481 0.800000, 0.404041 1.000000, 0.212585 1.200000, 0.253212 1.400000, -0.037306 1.600000, 0.271415 1.800000, 0.025614 2.000000, 0.066419 2.200000, -0.034347 2.400000, 0.153702 2.600000, 0.161341 2.800000, -0.097676 3.000000, -0.061880 3.200000, 0.085341 3.400000, 0.083674 3.600000, 0.190944 3.800000, 0.222168 4.000000, 0.214417 4.200000, 0.341221 4.400000, 0.634501 4.600000, 0.302566 4.800000, 0.101096 5.000000, -0.106441 5.200000, 0.567396 5.400000, 0.531899 5.600000, 0.459800 5.800000, 0.646655 6.000000, 0.662228 6.200000, 0.820844 6.400000, 0.947696 6.600000, 1.541353 6.800000, 1.763981 7.000000, 1.846081 7.200000, 2.986333 7.400000, 3.182907 7.600000, 3.786487 7.800000, 4.822287 8.000000, 5.739122 8.200000, 6.744448 8.400000, 7.295213 8.600000, 8.737766 8.800000, 9.693782 9.000000, 9.894218 9.200000, 10.193956 9.400000, 10.091519 9.600000, 9.652392 9.800000, 8.670938 10.000000, 8.004205 10.200000, 6.773599 10.400000, 6.076502 10.600000, 5.127315 10.800000, 4.303762 11.000000, 3.426006 11.200000, 2.416431 11.400000, 2.311363 11.600000, 1.748020 11.800000, 1.135594 12.000000, 0.888514 12.200000, 1.030794 12.400000, 0.543024 12.600000, 0.767751 12.800000, 0.657551 13.000000, 0.495730 13.200000, 0.447520 13.400000, 0.173839 13.600000, 0.256758 13.800000, 0.596106 14.000000, 0.065328 14.200000, 0.197267 14.400000, 0.260038 14.600000, 0.460880 14.800000, 0.335248 15.000000, 0.295977 15.200000, -0.010228 15.400000, 0.138670 15.600000, 0.192113 15.800000, 0.304371 16.000000, 0.442517 16.200000, 0.164944 16.400000, 0.001907 16.600000, 0.207504 16.800000, 0.012640 17.000000, 0.090878 17.200000, -0.222967 17.400000, 0.391717 17.600000, 0.180295 17.800000, 0.206875 18.000000, 0.240595 18.200000, -0.037437 18.400000, 0.139918 18.600000, 0.012560 18.800000, -0.053009 19.000000, 0.226069 19.200000, 0.076879 19.400000, 0.078599 19.600000, 0.016125 19.800000, -0.071217 20.000000, -0.091474 lmfit-py-1.0.0/examples/sinedata.dat000066400000000000000000000046401357751001700173630ustar00rootroot00000000000000#--------------------------------- # col1 col2 0.000000 1.546927 1.000000 1.736431 2.000000 2.215431 3.000000 1.784280 4.000000 0.790258 5.000000 0.031843 6.000000 -0.923626 7.000000 -1.751375 8.000000 -2.237504 9.000000 -2.309521 10.000000 -1.808337 11.000000 -0.522004 12.000000 0.193358 13.000000 1.049232 14.000000 2.073874 15.000000 1.975083 16.000000 1.483449 17.000000 1.020072 18.000000 -0.346997 19.000000 -1.718224 20.000000 -1.922197 21.000000 -2.296117 22.000000 -2.064431 23.000000 -0.998217 24.000000 -0.153316 25.000000 1.137056 26.000000 2.110909 27.000000 1.984092 28.000000 2.539031 29.000000 1.459715 30.000000 0.457624 31.000000 -0.802046 32.000000 -1.857762 33.000000 -2.102987 34.000000 -2.338332 35.000000 -1.541560 36.000000 -0.940634 37.000000 1.131820 38.000000 1.487181 39.000000 1.992744 40.000000 2.323562 41.000000 1.477698 42.000000 0.676721 43.000000 -0.474352 44.000000 -1.107867 45.000000 -2.337848 46.000000 -2.019775 47.000000 -2.739905 48.000000 -1.644391 49.000000 0.011781 50.000000 1.034463 51.000000 2.037978 52.000000 2.400420 53.000000 2.344507 54.000000 1.591388 55.000000 0.365871 56.000000 -0.908018 57.000000 -1.543188 58.000000 -2.557156 59.000000 -3.041230 60.000000 -1.713676 61.000000 -0.481723 62.000000 0.549215 63.000000 0.727514 64.000000 1.955187 65.000000 2.162520 66.000000 2.074485 67.000000 1.319350 68.000000 -0.375902 69.000000 -1.156888 70.000000 -2.238698 71.000000 -2.406541 72.000000 -1.861680 73.000000 -1.474703 74.000000 -0.415272 75.000000 1.015717 76.000000 1.598828 77.000000 2.273267 78.000000 2.209011 79.000000 1.758354 80.000000 0.553469 81.000000 -1.257431 82.000000 -1.044855 83.000000 -2.258815 84.000000 -2.004194 85.000000 -1.722751 86.000000 -1.206638 87.000000 0.246201 88.000000 1.377268 89.000000 2.057458 90.000000 2.271500 91.000000 1.919600 92.000000 1.346293 93.000000 -0.184235 94.000000 -1.427797 95.000000 -2.123603 96.000000 -2.248833 97.000000 -2.243055 98.000000 -1.140870 99.000000 -0.627367 100.000000 1.603912 lmfit-py-1.0.0/examples/test_peak.dat000066400000000000000000000224301357751001700175470ustar00rootroot00000000000000# test peak data #--------------------------------- # t y 0.000000 0.021654 0.050000 0.019221 0.100000 -0.146881 0.150000 0.109422 0.200000 0.385367 0.250000 0.426230 0.300000 0.019241 0.350000 0.075568 0.400000 0.193304 0.450000 0.237610 0.500000 -0.107071 0.550000 0.207026 0.600000 0.103481 0.650000 0.175033 0.700000 0.022074 0.750000 0.070510 0.800000 0.404041 0.850000 0.126622 0.900000 -0.138651 0.950000 0.149783 1.000000 0.212585 1.050000 0.133744 1.100000 0.190065 1.150000 -0.254227 1.200000 0.253212 1.250000 0.059663 1.300000 0.187533 1.350000 0.253744 1.400000 -0.037306 1.450000 0.080513 1.500000 0.012607 1.550000 0.224475 1.600000 0.271415 1.650000 0.118073 1.700000 -0.077723 1.750000 0.164330 1.800000 0.025614 1.850000 -0.034864 1.900000 0.068968 1.950000 -0.103238 2.000000 0.066419 2.050000 0.271850 2.100000 0.139049 2.150000 0.162034 2.200000 -0.034347 2.250000 0.135812 2.300000 0.067858 2.350000 -0.161792 2.400000 0.153702 2.450000 0.071054 2.500000 -0.049010 2.550000 0.203306 2.600000 0.161341 2.650000 0.199279 2.700000 0.252416 2.750000 0.355513 2.800000 -0.097676 2.850000 0.254533 2.900000 0.217187 2.950000 0.154375 3.000000 -0.061880 3.050000 0.128343 3.100000 0.205941 3.150000 0.349665 3.200000 0.085341 3.250000 0.125593 3.300000 0.254381 3.350000 0.006456 3.400000 0.083674 3.450000 0.126626 3.500000 0.132028 3.550000 0.367231 3.600000 0.190944 3.650000 -0.004054 3.700000 0.072112 3.750000 0.383266 3.800000 0.222168 3.850000 0.098595 3.900000 0.324558 3.950000 0.125419 4.000000 0.214417 4.050000 0.287499 4.100000 0.230579 4.150000 0.141035 4.200000 0.341221 4.250000 0.162993 4.300000 0.174737 4.350000 0.483097 4.400000 0.634501 4.450000 0.152268 4.500000 0.440815 4.550000 0.125279 4.600000 0.302566 4.650000 0.612674 4.700000 -0.023226 4.750000 0.481199 4.800000 0.101096 4.850000 0.572197 4.900000 0.394625 4.950000 0.461077 5.000000 -0.106441 5.050000 0.635505 5.100000 0.440675 5.150000 0.335979 5.200000 0.567396 5.250000 0.588661 5.300000 0.101309 5.350000 0.370770 5.400000 0.531899 5.450000 0.347064 5.500000 0.387862 5.550000 0.415243 5.600000 0.459800 5.650000 0.559310 5.700000 0.527272 5.750000 0.659222 5.800000 0.646655 5.850000 0.872127 5.900000 0.506336 5.950000 0.832841 6.000000 0.662228 6.050000 0.666240 6.100000 0.745486 6.150000 0.773303 6.200000 0.820844 6.250000 0.949833 6.300000 0.999748 6.350000 1.194918 6.400000 0.947696 6.450000 1.034669 6.500000 1.004666 6.550000 1.155702 6.600000 1.541353 6.650000 1.342422 6.700000 1.477986 6.750000 1.375675 6.800000 1.763981 6.850000 1.638405 6.900000 1.652637 6.950000 2.125423 7.000000 1.846081 7.050000 2.008594 7.100000 1.967327 7.150000 2.420829 7.200000 2.986333 7.250000 2.816069 7.300000 2.779284 7.350000 2.452606 7.400000 3.182907 7.450000 3.345209 7.500000 3.210506 7.550000 3.630722 7.600000 3.786487 7.650000 4.288308 7.700000 4.107791 7.750000 4.223391 7.800000 4.822287 7.850000 4.852727 7.900000 5.153562 7.950000 5.540655 8.000000 5.739122 8.050000 5.965430 8.100000 5.893505 8.150000 6.520379 8.200000 6.744448 8.250000 6.982811 8.300000 6.871811 8.350000 7.381590 8.400000 7.295213 8.450000 7.770220 8.500000 7.855105 8.550000 8.178695 8.600000 8.737766 8.650000 8.659328 8.700000 8.761986 8.750000 9.325407 8.800000 9.693782 8.850000 9.493158 8.900000 9.840173 8.950000 9.591383 9.000000 9.894218 9.050000 9.781619 9.100000 9.787061 9.150000 9.944484 9.200000 10.193956 9.250000 10.452393 9.300000 10.198352 9.350000 10.220196 9.400000 10.091519 9.450000 9.803956 9.500000 9.976457 9.550000 9.644976 9.600000 9.652392 9.650000 9.364996 9.700000 9.141562 9.750000 9.123553 9.800000 8.670938 9.850000 8.830762 9.900000 8.612662 9.950000 8.200565 10.000000 8.004205 10.050000 7.786050 10.100000 7.729310 10.150000 7.287126 10.200000 6.773599 10.250000 6.820778 10.300000 6.790992 10.350000 6.324548 10.400000 6.076502 10.450000 5.768973 10.500000 5.787036 10.550000 5.553690 10.600000 5.127315 10.650000 4.902255 10.700000 4.929891 10.750000 4.171166 10.800000 4.303762 10.850000 3.767545 10.900000 3.791083 10.950000 3.814857 11.000000 3.426006 11.050000 3.078426 11.100000 2.789747 11.150000 2.620130 11.200000 2.416431 11.250000 2.430768 11.300000 2.268585 11.350000 2.235498 11.400000 2.311363 11.450000 2.005221 11.500000 1.970229 11.550000 1.907982 11.600000 1.748020 11.650000 1.481710 11.700000 1.519127 11.750000 1.777618 11.800000 1.135594 11.850000 1.345861 11.900000 1.046777 11.950000 1.040376 12.000000 0.888514 12.050000 0.994942 12.100000 1.002009 12.150000 1.235839 12.200000 1.030794 12.250000 0.894109 12.300000 0.839384 12.350000 0.564763 12.400000 0.543024 12.450000 1.067728 12.500000 0.569039 12.550000 0.546196 12.600000 0.767751 12.650000 0.372794 12.700000 0.506039 12.750000 0.094006 12.800000 0.657551 12.850000 0.689847 12.900000 0.235074 12.950000 0.511880 13.000000 0.495730 13.050000 0.720208 13.100000 0.458972 13.150000 0.515104 13.200000 0.447520 13.250000 0.309378 13.300000 0.336000 13.350000 0.403743 13.400000 0.173839 13.450000 0.542466 13.500000 0.435708 13.550000 0.502801 13.600000 0.256758 13.650000 0.269744 13.700000 0.204110 13.750000 0.219654 13.800000 0.596106 13.850000 0.272604 13.900000 0.228125 13.950000 0.308160 14.000000 0.065328 14.050000 0.491292 14.100000 0.494818 14.150000 0.321783 14.200000 0.197267 14.250000 0.602161 14.300000 0.155016 14.350000 0.333368 14.400000 0.260038 14.450000 0.149090 14.500000 0.164818 14.550000 0.032011 14.600000 0.460880 14.650000 0.275423 14.700000 0.343308 14.750000 0.348898 14.800000 0.335248 14.850000 0.223771 14.900000 0.056021 14.950000 0.146267 15.000000 0.295977 15.050000 0.029256 15.100000 0.188720 15.150000 0.185713 15.200000 -0.010228 15.250000 -0.075438 15.300000 -0.049977 15.350000 0.156545 15.400000 0.138670 15.450000 0.430603 15.500000 0.107233 15.550000 0.268609 15.600000 0.192113 15.650000 -0.089082 15.700000 0.076649 15.750000 0.494606 15.800000 0.304371 15.850000 0.311904 15.900000 0.146849 15.950000 -0.035298 16.000000 0.442517 16.050000 0.129210 16.100000 0.202598 16.150000 -0.038198 16.200000 0.164944 16.250000 0.089727 16.300000 -0.029338 16.350000 0.321681 16.400000 0.001907 16.450000 0.357234 16.500000 0.706248 16.550000 0.189379 16.600000 0.207504 16.650000 0.252780 16.700000 0.337652 16.750000 0.164710 16.800000 0.012640 16.850000 -0.200321 16.900000 0.063620 16.950000 0.014513 17.000000 0.090878 17.050000 0.261647 17.100000 0.140731 17.150000 0.351465 17.200000 -0.222967 17.250000 0.192524 17.300000 -0.083316 17.350000 0.139459 17.400000 0.391717 17.450000 -0.091359 17.500000 -0.118886 17.550000 -0.054844 17.600000 0.180295 17.650000 0.065399 17.700000 0.319015 17.750000 0.166328 17.800000 0.206875 17.850000 0.108605 17.900000 0.085493 17.950000 0.270683 18.000000 0.240595 18.050000 0.299822 18.100000 -0.040008 18.150000 0.306279 18.200000 -0.037437 18.250000 0.006128 18.300000 0.224231 18.350000 0.054691 18.400000 0.139918 18.450000 -0.079608 18.500000 -0.215388 18.550000 -0.063221 18.600000 0.012560 18.650000 -0.138384 18.700000 0.326622 18.750000 0.130812 18.800000 -0.053009 18.850000 -0.028960 18.900000 0.053191 18.950000 0.239460 19.000000 0.226069 19.050000 -0.016509 19.100000 0.155364 19.150000 0.186324 19.200000 0.076879 19.250000 0.184640 19.300000 0.194979 19.350000 0.153825 19.400000 0.078599 19.450000 0.082126 19.500000 0.069517 19.550000 0.169040 19.600000 0.016125 19.650000 -0.145533 19.700000 -0.314756 19.750000 0.409688 19.800000 -0.071217 19.850000 -0.318566 19.900000 0.159099 19.950000 -0.014190 20.000000 -0.091474 lmfit-py-1.0.0/lmfit.egg-info/000077500000000000000000000000001357751001700160645ustar00rootroot00000000000000lmfit-py-1.0.0/lmfit.egg-info/PKG-INFO000066400000000000000000000040171357751001700171630ustar00rootroot00000000000000Metadata-Version: 1.2 Name: lmfit Version: 1.0.0 Summary: Least-Squares Minimization with Bounds and Constraints Home-page: https://lmfit.github.io/lmfit-py/ Author: LMFit Development Team Author-email: matt.newville@gmail.com License: BSD-3 Download-URL: https://lmfit.github.io//lmfit-py/ Description: A library for least-squares minimization and data fitting in Python. Built on top of scipy.optimize, lmfit provides a Parameter object which can be set as fixed or free, can have upper and/or lower bounds, or can be written in terms of algebraic constraints of other Parameters. The user writes a function to be minimized as a function of these Parameters, and the scipy.optimize methods are used to find the optimal values for the Parameters. The Levenberg-Marquardt (leastsq) is the default minimization algorithm, and provides estimated standard errors and correlations between varied Parameters. Other minimization methods, including Nelder-Mead's downhill simplex, Powell's method, BFGS, Sequential Least Squares, and others are also supported. Bounds and contraints can be placed on Parameters for all of these methods. In addition, methods for explicitly calculating confidence intervals are provided for exploring minmization problems where the approximation of estimating Parameter uncertainties from the covariance matrix is questionable. Keywords: curve-fitting,least-squares minimization Platform: Windows Platform: Linux Platform: Mac OS X Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Science/Research Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Scientific/Engineering Requires-Python: >=3.5 lmfit-py-1.0.0/lmfit.egg-info/SOURCES.txt000066400000000000000000000101161357751001700177470ustar00rootroot00000000000000INSTALL LICENSE MANIFEST.in README.rst THANKS.txt publish_docs.sh requirements.txt setup.cfg setup.py versioneer.py NIST_STRD/Bennett5.dat NIST_STRD/BoxBOD.dat NIST_STRD/Chwirut1.dat NIST_STRD/Chwirut2.dat NIST_STRD/DanWood.dat NIST_STRD/ENSO.dat NIST_STRD/Eckerle4.dat NIST_STRD/Gauss1.dat NIST_STRD/Gauss2.dat NIST_STRD/Gauss3.dat NIST_STRD/Hahn1.dat NIST_STRD/Kirby2.dat NIST_STRD/Lanczos1.dat NIST_STRD/Lanczos2.dat NIST_STRD/Lanczos3.dat NIST_STRD/MGH09.dat NIST_STRD/MGH10.dat NIST_STRD/MGH17.dat NIST_STRD/Misra1a.dat NIST_STRD/Misra1b.dat NIST_STRD/Misra1c.dat NIST_STRD/Misra1d.dat NIST_STRD/Nelson.dat NIST_STRD/Rat42.dat NIST_STRD/Rat43.dat NIST_STRD/Roszman1.dat NIST_STRD/Thurber.dat doc/Makefile doc/bounds.rst doc/builtin_models.rst doc/conf.py doc/confidence.rst doc/constraints.rst doc/contents.rst doc/doc_examples_to_gallery.py doc/extensions.py doc/faq.rst doc/fitting.rst doc/index.rst doc/installation.rst doc/intro.rst doc/model.rst doc/parameters.rst doc/support.rst doc/whatsnew.rst doc/__pycache__/extensions.cpython-37.pyc doc/_static/empty doc/_templates/indexsidebar.html doc/sphinx/ext_imgmath.py doc/sphinx/ext_mathjax.py doc/sphinx/theme/lmfitdoc/layout.html doc/sphinx/theme/lmfitdoc/theme.conf doc/sphinx/theme/lmfitdoc/static/contents.png doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t doc/sphinx/theme/lmfitdoc/static/navigation.png examples/NIST_Gauss2.dat examples/doc_builtinmodels_nistgauss.py examples/doc_builtinmodels_nistgauss2.py examples/doc_builtinmodels_peakmodels.py examples/doc_builtinmodels_stepmodel.py examples/doc_confidence_advanced.py examples/doc_confidence_basic.py examples/doc_fitting_emcee.py examples/doc_fitting_withreport.py examples/doc_model_composite.py examples/doc_model_gaussian.py examples/doc_model_loadmodel.py examples/doc_model_loadmodelresult.py examples/doc_model_loadmodelresult2.py examples/doc_model_savemodel.py examples/doc_model_savemodelresult.py examples/doc_model_savemodelresult2.py examples/doc_model_two_components.py examples/doc_model_uncertainty.py examples/doc_model_with_iter_callback.py examples/doc_model_with_nan_policy.py examples/doc_parameters_basic.py examples/doc_parameters_valuesdict.py examples/example_Model_interface.py examples/example_brute.py examples/example_complex_resonator_model.py examples/example_confidence_interval.py examples/example_detect_outliers.py examples/example_diffev.py examples/example_emcee_Model_interface.py examples/example_expression_model.py examples/example_fit_multi_datasets.py examples/example_fit_with_algebraic_constraint.py examples/example_fit_with_bounds.py examples/example_fit_with_derivfunc.py examples/example_fit_with_inequality.py examples/example_reduce_fcn.py examples/example_use_pandas.py examples/lmfit_emcee_model_selection.py examples/model1d_gauss.dat examples/peak.csv examples/sinedata.dat examples/test_peak.dat lmfit/__init__.py lmfit/_ampgo.py lmfit/_version.py lmfit/confidence.py lmfit/jsonutils.py lmfit/lineshapes.py lmfit/minimizer.py lmfit/model.py lmfit/models.py lmfit/parameter.py lmfit/printfuncs.py lmfit.egg-info/PKG-INFO lmfit.egg-info/SOURCES.txt lmfit.egg-info/dependency_links.txt lmfit.egg-info/requires.txt lmfit.egg-info/top_level.txt lmfit/ui/__init__.py lmfit/ui/basefitter.py lmfit/ui/ipy_fitter.py tests/NISTModels.py tests/conftest.py tests/lmfit_testutils.py tests/test_1variable.py tests/test_NIST_Strd.py tests/test_algebraic_constraint.py tests/test_ampgo.py tests/test_basicfit.py tests/test_basinhopping.py tests/test_bounded_jacobian.py tests/test_bounds.py tests/test_brute.py tests/test_confidence.py tests/test_copy_params.py tests/test_covariance_matrix.py tests/test_custom_independentvar.py tests/test_default_kws.py tests/test_dual_annealing.py tests/test_itercb.py tests/test_jsonutils.py tests/test_least_squares.py tests/test_lineshapes_models.py tests/test_manypeaks_speed.py tests/test_minimizer.py tests/test_model.py tests/test_model_uncertainties.py tests/test_multidatasets.py tests/test_nose.py tests/test_parameter.py tests/test_parameters.py tests/test_params_set.py tests/test_printfuncs.py tests/test_saveload.py tests/test_shgo.py tests/test_stepmodel.pylmfit-py-1.0.0/lmfit.egg-info/dependency_links.txt000066400000000000000000000000011357751001700221320ustar00rootroot00000000000000 lmfit-py-1.0.0/lmfit.egg-info/requires.txt000066400000000000000000000000741357751001700204650ustar00rootroot00000000000000asteval>=0.9.16 numpy>=1.16 scipy>=1.2 uncertainties>=3.0.1 lmfit-py-1.0.0/lmfit.egg-info/top_level.txt000066400000000000000000000000061357751001700206120ustar00rootroot00000000000000lmfit lmfit-py-1.0.0/lmfit/000077500000000000000000000000001357751001700143725ustar00rootroot00000000000000lmfit-py-1.0.0/lmfit/__init__.py000066400000000000000000000040701357751001700165040ustar00rootroot00000000000000""" LMFIT: Non-Linear Least-Squares Minimization and Curve-Fitting for Python ========================================================================= Lmfit provides a high-level interface to non-linear optimization and curve-fitting problems for Python. It builds on the Levenberg-Marquardt algorithm of scipy.optimize.leastsq(), but also supports most of the optimization methods from scipy.optimize. It has a number of useful enhancements, including: * Using Parameter objects instead of plain floats as variables. A Parameter has a value that can be varied in the fit, fixed, have upper and/or lower bounds. It can even have a value that is constrained by an algebraic expression of other Parameter values. * Ease of changing fitting algorithms. Once a fitting model is set up, one can change the fitting algorithm without changing the objective function. * Improved estimation of confidence intervals. While scipy.optimize.leastsq() will automatically calculate uncertainties and correlations from the covariance matrix, lmfit also has functions to explicitly explore parameter space to determine confidence levels even for the most difficult cases. * Improved curve-fitting with the Model class. This extends the capabilities of scipy.optimize.curve_fit(), allowing you to turn a function that models your data into a Python class that helps you parametrize and fit data with that model. * Many built-in models for common lineshapes are included and ready to use. Copyright (c) 2019 Lmfit Developers ; BSD-3 license ; see LICENSE """ from asteval import Interpreter from .confidence import conf_interval, conf_interval2d from .minimizer import Minimizer, MinimizerException, minimize from .parameter import Parameter, Parameters from .printfuncs import (ci_report, fit_report, report_ci, report_errors, report_fit) from .model import Model, CompositeModel from . import lineshapes, models # versioneer code from ._version import get_versions __version__ = get_versions()['version'] del get_versions lmfit-py-1.0.0/lmfit/_ampgo.py000066400000000000000000000235241357751001700162140ustar00rootroot00000000000000"""Adaptive Memory Programming for Global Optimization (AMPGO). added to lmfit by Renee Otten (2018) based on the Python implementation of Andrea Gavana (see: http://infinity77.net/global_optimization/) Implementation details can be found in this paper: http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf """ import numpy as np from scipy.optimize import minimize SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5, tabustrategy='farthest', disp=False): """Find the global minimum of a multivariate function using the AMPGO (Adaptive Memory Programming for Global Optimization) algorithm. Parameters ---------- objfun: callable Objective function to be minimized. The function must have the signature: objfun(params, *args, **kws) x0: numpy.ndarray Initial guesses for parameter values. args: tuple, optional Additional arguments passed to `objfun`. local: str, optional Name of the local minimization method. Valid options are: - `'L-BFGS-B'` (default) - `Nelder-Mead'` - `'Powell'` - `'TNC'` - `'SLSQP'` local_opts: dict, optional Options to pass to the local minimizer. bounds: sequence, optional List of tuples specifying the lower and upper bound for each independent variable [(`xl0`, `xu0`), (`xl1`, `xu1`), ...]. maxfunevals: int, optional Maximum number of function evaluations. If None, the optimization will stop after `totaliter` number of iterations. totaliter: int, optional Maximum number of global iterations. maxiter: int, optional Maximum number of `Tabu Tunneling` iterations during each global iteration. glbtol: float, optional Tolerance whether or not to accept a solution after a tunneling phase. eps1: float, optional Constant used to define an aspiration value for the objective function during the Tunneling phase. eps2: float, optional Perturbation factor used to move away from the latest local minimum at the start of a Tunneling phase. tabulistsize: int, optional Size of the (circular) tabu search list. tabustrategy: str, optional Strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be 'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from the last local minimum found. disp: bool, optional Set to True to print convergence messages. Returns ------- tuple: A tuple of 5 elements, in the following order: 1. **best_x** (`array_like`): the estimated position of the global minimum. 2. **best_f** (`float`): the value of `objfun` at the minimum. 3. **evaluations** (`integer`): the number of function evaluations. 4. **msg** (`string`): a message describes the cause of the termination. 5. **tunnel_info** (`tuple`): a tuple containing the total number of Tunneling phases performed and the successful ones. Notes ----- The detailed implementation of AMPGO is described in the paper "Adaptive Memory Programming for Constrained Global Optimization" located here: http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf """ if local not in SCIPY_LOCAL_SOLVERS: raise Exception('Invalid local solver selected: {}'.format(local)) x0 = np.atleast_1d(x0) n = len(x0) if bounds is None: bounds = [(None, None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') bounds = [b if b is not None else (None, None) for b in bounds] _bounds = [(-np.inf if l is None else l, np.inf if u is None else u) for l, u in bounds] low, up = np.array(_bounds).T if maxfunevals is None: maxfunevals = np.inf if tabulistsize < 1: raise Exception('Invalid tabulistsize specified: {:d}. It should be ' 'an integer greater than zero.'.format(tabulistsize)) if tabustrategy not in ['oldest', 'farthest']: raise Exception('Invalid tabustrategy specified: {:s}. It must be one ' 'of "oldest" or "farthest".'.format(tabustrategy)) tabulist = [] best_f = np.inf best_x = x0 global_iter = 0 all_tunnel = success_tunnel = 0 evaluations = 0 local_tol = min(1e-8, glbtol) while 1: # minimization to find local minimum, either from initial values or # after a successful tunneling loop if disp: print('\n{0}\nStarting MINIMIZATION Phase {1:d}\n{0}' .format('='*72, global_iter+1)) options = {'maxiter': max(1, maxfunevals), 'disp': disp} if local_opts is not None: options.update(local_opts) res = minimize(objfun, x0, args=args, method=local, bounds=bounds, tol=local_tol, options=options) xf, yf, num_fun = res['x'], res['fun'], res['nfev'] if isinstance(yf, np.ndarray): yf = yf[0] maxfunevals -= num_fun evaluations += num_fun if yf < best_f: best_f = yf best_x = xf if disp: print('\n\n ==> Reached local minimum: {:.5g}\n'.format(yf)) if maxfunevals <= 0: if disp: print('='*72) return (best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel)) # if needed, drop a value from the tabu tunneling list and add the # current solution tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) tabulist.append(xf) i = improve = 0 while i < maxiter and improve == 0: if disp: print('{0}\nStarting TUNNELING Phase ({1:d}-{2:d})\n{0}' .format('='*72, global_iter+1, i+1)) all_tunnel += 1 # generate a new starting point away from the current solution r = np.random.uniform(-1.0, 1.0, size=(n, )) beta = eps2*np.linalg.norm(xf) / np.linalg.norm(r) if np.abs(beta) < 1e-8: beta = eps2 x0 = xf + beta*r # make sure that the new starting point is within bounds x0 = np.where(x0 < low, low, x0) x0 = np.where(x0 > up, up, x0) # aspired value of the objective function for the tunneling loop aspiration = best_f - eps1*(1.0 + np.abs(best_f)) tunnel_args = tuple([objfun, aspiration, tabulist] + list(args)) options = {'maxiter': max(1, maxfunevals), 'disp': disp} if local_opts is not None: options.update(local_opts) res = minimize(tunnel, x0, args=tunnel_args, method=local, bounds=bounds, tol=local_tol, options=options) xf, yf, num_fun = res['x'], res['fun'], res['nfev'] if isinstance(yf, np.ndarray): yf = yf[0] maxfunevals -= num_fun evaluations += num_fun yf = inverse_tunnel(xf, yf, aspiration, tabulist) if yf <= best_f + glbtol: oldf = best_f best_f = yf best_x = xf improve = 1 success_tunnel += 1 if disp: print('\n\n ==> Successful tunnelling phase. Reached new ' 'local minimum: {:.5g} < {:.5g}\n'.format(yf, oldf)) i += 1 if maxfunevals <= 0: return (best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel)) tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) tabulist.append(xf) if disp: print('='*72) global_iter += 1 x0 = xf.copy() if global_iter >= totaliter: return (best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel)) def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): """Drop a point from the tabu search list.""" if len(tabulist) < tabulistsize: return tabulist if tabustrategy == 'oldest': tabulist.pop(0) else: distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1)) index = np.argmax(distance) tabulist.pop(index) return tabulist def tunnel(x0, *args): """Tunneling objective function. This function has a global minimum of zero at any feasible point where `f(x) = aspiration`, and minimizing this expression tends to move away from all points in `tabulist`. """ objfun, aspiration, tabulist = args[0:3] fun_args = () if len(args) > 3: fun_args = tuple(args[3:]) numerator = (objfun(x0, *fun_args) - aspiration)**2 denominator = 1.0 for tabu in tabulist: denominator = denominator * np.sqrt(np.sum((x0 - tabu)**2)) ytf = numerator/denominator return ytf def inverse_tunnel(xtf, ytf, aspiration, tabulist): """Calculate the function value after a tunneling phase step.""" denominator = 1.0 for tabu in tabulist: denominator = denominator * np.sqrt(np.sum((xtf - tabu)**2)) numerator = ytf*denominator yf = aspiration + np.sqrt(numerator) return yf lmfit-py-1.0.0/lmfit/_version.py000066400000000000000000000007611357751001700165740ustar00rootroot00000000000000 # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' { "date": "2019-12-20T13:51:16-0600", "dirty": false, "error": null, "full-revisionid": "c5f969028c8e937c02a5b009347d12e2f7843be9", "version": "1.0.0" } ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) lmfit-py-1.0.0/lmfit/confidence.py000066400000000000000000000352011357751001700170420ustar00rootroot00000000000000"""Contains functions to calculate confidence intervals.""" from collections import OrderedDict from warnings import warn import numpy as np from scipy.optimize import brentq from scipy.special import erf from scipy.stats import f from .minimizer import MinimizerException CONF_ERR_GEN = 'Cannot determine Confidence Intervals' CONF_ERR_STDERR = '%s without sensible uncertainty estimates' % CONF_ERR_GEN CONF_ERR_NVARS = '%s with < 2 variables' % CONF_ERR_GEN def f_compare(best_fit, new_fit): """Return the probability calculated using the F-test. The null model (i.e., best-fit solution) is compared to an alternate model where one or more parameters are fixed. Parameters ---------- best_fit: MinimizerResult The result from the best-fit. new_fit: MinimizerResult The result from fit with the fixed parameter(s). Returns ------- prob : float Value of the calculated probality. """ nfree = best_fit.nfree nfix = best_fit.nvarys - new_fit.nvarys dchi = new_fit.chisqr / best_fit.chisqr - 1.0 return f.cdf(dchi * nfree / nfix, nfix, nfree) def copy_vals(params): """Save the values and stderrs of parameters in a temporary dictionary.""" tmp_params = {} for para_key in params: tmp_params[para_key] = (params[para_key].value, params[para_key].stderr) return tmp_params def restore_vals(tmp_params, params): """Restore values and stderrs of parameters from a temporary dictionary.""" for para_key in params: params[para_key].value, params[para_key].stderr = tmp_params[para_key] def conf_interval(minimizer, result, p_names=None, sigmas=[1, 2, 3], trace=False, maxiter=200, verbose=False, prob_func=None): """Calculate the confidence interval (ci) for parameters. The parameter for which the ci is calculated will be varied, while the remaining parameters are re-optimized to minimize the chi-square. The resulting chi-square is used to calculate the probability with a given statistic (e.g., F-test). This function uses a 1d-rootfinder from SciPy to find the values resulting in the searched confidence region. Parameters ---------- minimizer : Minimizer The minimizer to use, holding objective function. result : MinimizerResult The result of running minimize(). p_names : list, optional Names of the parameters for which the ci is calculated. If None (default), the ci is calculated for every parameter. sigmas : list, optional The sigma-levels to find (default is [1, 2, 3]). See Note below. trace : bool, optional Defaults to False; if True, each result of a probability calculation is saved along with the parameter. This can be used to plot so-called "profile traces". maxiter : int, optional Maximum of iteration to find an upper limit (default is 200). verbose: bool, optional Print extra debuging information (default is False). prob_func : None or callable, optional Function to calculate the probability from the optimized chi-square. Default is None and uses the built-in f_compare (i.e., F-test). Returns ------- output : dict A dictionary that contains a list of (sigma, vals)-tuples for each name. trace_dict : dict, optional Only if trace is True. Is a dictionary, the key is the parameter which was fixed. The values are again a dict with the names as keys, but with an additional key 'prob'. Each contains an array of the corresponding values. Note ----- The values for `sigma` are taken as the number of standard deviations for a normal distribution and converted to probabilities. That is, the default ``sigma=[1, 2, 3]`` will use probabilities of 0.6827, 0.9545, and 0.9973. If any of the sigma values is less than 1, that will be interpreted as a probability. That is, a value of 1 and 0.6827 will give the same results, within precision. See also -------- conf_interval2d Examples -------- >>> from lmfit.printfuncs import * >>> mini = minimize(some_func, params) >>> mini.leastsq() True >>> report_errors(params) ... #report >>> ci = conf_interval(mini) >>> report_ci(ci) ... #report Now with quantiles for the sigmas and using the trace. >>> ci, trace = conf_interval(mini, sigmas=[0.5, 1, 2, 3], trace=True) >>> fixed = trace['para1']['para1'] >>> free = trace['para1']['not_para1'] >>> prob = trace['para1']['prob'] This makes it possible to plot the dependence between free and fixed parameters. """ ci = ConfidenceInterval(minimizer, result, p_names, prob_func, sigmas, trace, verbose, maxiter) output = ci.calc_all_ci() if trace: return output, ci.trace_dict return output def map_trace_to_names(trace, params): """Map trace to parameter names.""" out = {} allnames = list(params.keys()) + ['prob'] for name in trace.keys(): tmp_dict = {} tmp = np.array(trace[name]) for para_name, values in zip(allnames, tmp.T): tmp_dict[para_name] = values out[name] = tmp_dict return out class ConfidenceInterval: """Class used to calculate the confidence interval.""" def __init__(self, minimizer, result, p_names=None, prob_func=None, sigmas=[1, 2, 3], trace=False, verbose=False, maxiter=50): self.verbose = verbose self.minimizer = minimizer self.result = result self.params = result.params.copy() self.org = copy_vals(self.params) self.best_chi = result.chisqr if p_names is None: p_names = [i for i in self.params if self.params[i].vary] self.p_names = p_names self.fit_params = [self.params[p] for p in self.p_names] # check that there are at least 2 true variables! # check that all stderrs are sensible (including not None or NaN) for par in self.fit_params: if par.vary and (par.stderr is None or par.stderr is np.nan): raise MinimizerException(CONF_ERR_STDERR) nvars = len([p for p in self.params.values() if p.vary]) if nvars < 2: raise MinimizerException(CONF_ERR_NVARS) if prob_func is None: self.prob_func = f_compare else: self.prob_func = prob_func if trace: self.trace_dict = {i: [] for i in self.p_names} self.trace = trace self.maxiter = maxiter self.min_rel_change = 1e-5 self.sigmas = list(sigmas) self.sigmas.sort() self.probs = [] for sigma in self.sigmas: if sigma < 1: prob = sigma else: prob = erf(sigma/np.sqrt(2)) self.probs.append(prob) def calc_all_ci(self): """Calculate all confidence intervals.""" out = OrderedDict() for p in self.p_names: out[p] = (self.calc_ci(p, -1)[::-1] + [(0., self.params[p].value)] + self.calc_ci(p, 1)) if self.trace: self.trace_dict = map_trace_to_names(self.trace_dict, self.params) return out def calc_ci(self, para, direction): """Calculate the ci for a single parameter in a single direction. Direction is either positive or negative 1. """ if isinstance(para, str): para = self.params[para] # function used to calculate the probability calc_prob = lambda val, prob: self.calc_prob(para, val, prob) if self.trace: x = [i.value for i in self.params.values()] self.trace_dict[para.name].append(x + [0]) para.vary = False limit, max_prob = self.find_limit(para, direction) start_val = a_limit = float(para.value) ret = [] orig_warn_settings = np.geterr() np.seterr(all='ignore') for prob in self.probs: if prob > max_prob: ret.append((prob, direction*np.inf)) continue try: val = brentq(calc_prob, a_limit, limit, rtol=.5e-4, args=prob) except ValueError: self.reset_vals() try: val = brentq(calc_prob, start_val, limit, rtol=.5e-4, args=prob) except ValueError: val = np.nan a_limit = val ret.append((prob, val)) para.vary = True self.reset_vals() np.seterr(**orig_warn_settings) return ret def reset_vals(self): """Reset parameter values to best-fit values.""" restore_vals(self.org, self.params) def find_limit(self, para, direction): """Find a value for a given parameter so that prob(val) > sigmas.""" if self.verbose: print('Calculating CI for ' + para.name) self.reset_vals() # determine starting step if para.stderr > 0 and para.stderr < abs(para.value): step = para.stderr else: step = max(abs(para.value) * 0.2, 0.001) para.vary = False start_val = para.value old_prob = 0 limit = start_val i = 0 bound_reached = False max_prob = max(self.probs) while old_prob < max_prob: i = i + 1 limit += step * direction if limit > para.max: limit = para.max bound_reached = True elif limit < para.min: limit = para.min bound_reached = True new_prob = self.calc_prob(para, limit) rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1e-12) old_prob = new_prob if self.verbose: msg = "P({}={}) = {}, max. prob={}" print(msg.format(para.name, limit, new_prob, max_prob)) # check for convergence if bound_reached: if new_prob < max(self.probs): errmsg = ("Bound reached with " "prob({}={}) = {} < max(sigmas)" ).format(para.name, limit, new_prob) warn(errmsg) break if i > self.maxiter: errmsg = "maxiter={} reached ".format(self.maxiter) errmsg += ("and prob({}={}) = {} < " "max(sigmas).".format(para.name, limit, new_prob)) warn(errmsg) break if rel_change < self.min_rel_change: errmsg = "rel_change={} < {} ".format(rel_change, self.min_rel_change) errmsg += ("at iteration {} and prob({}={}) = {} < max" "(sigmas).".format(i, para.name, limit, new_prob)) warn(errmsg) break self.reset_vals() return limit, new_prob def calc_prob(self, para, val, offset=0., restore=False): """Calculate the probability for given value.""" if restore: restore_vals(self.org, self.params) para.value = val save_para = self.params[para.name] self.params[para.name] = para self.minimizer.prepare_fit(self.params) out = self.minimizer.leastsq() prob = self.prob_func(self.result, out) if self.trace: x = [i.value for i in out.params.values()] self.trace_dict[para.name].append(x + [prob]) self.params[para.name] = save_para return prob - offset def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10, limits=None, prob_func=None): r"""Calculate confidence regions for two fixed parameters. The method itself is explained in *conf_interval*: here we are fixing two parameters. Parameters ---------- minimizer : Minimizer The minimizer to use, holding objective function. result : MinimizerResult The result of running minimize(). x_name : str The name of the parameter which will be the x direction. y_name : str The name of the parameter which will be the y direction. nx : int, optional Number of points in the x direction. ny : int, optional Number of points in the y direction. limits : tuple, optional Should have the form ((x_upper, x_lower), (y_upper, y_lower)). If not given, the default is 5 std-errs in each direction. prob_func : None or callable, optional Function to calculate the probability from the optimized chi-square. Default is None and uses built-in f_compare (i.e., F-test). Returns ------- x : numpy.ndarray X-coordinates (same shape as nx). y : numpy.ndarray Y-coordinates (same shape as ny). grid : numpy.ndarray Grid containing the calculated probabilities (with shape (nx, ny)). Examples -------- >>> mini = Minimizer(some_func, params) >>> result = mini.leastsq() >>> x, y, gr = conf_interval2d(mini, result, 'para1','para2') >>> plt.contour(x,y,gr) """ params = result.params best_chi = result.chisqr org = copy_vals(result.params) if prob_func is None: prob_func = f_compare x = params[x_name] y = params[y_name] if limits is None: (x_upper, x_lower) = (x.value + 5 * x.stderr, x.value - 5 * x.stderr) (y_upper, y_lower) = (y.value + 5 * y.stderr, y.value - 5 * y.stderr) elif len(limits) == 2: (x_upper, x_lower) = limits[0] (y_upper, y_lower) = limits[1] x_points = np.linspace(x_lower, x_upper, nx) y_points = np.linspace(y_lower, y_upper, ny) grid = np.dstack(np.meshgrid(x_points, y_points)) x.vary = False y.vary = False def calc_prob(vals, restore=False): """Calculate the probability.""" if restore: restore_vals(org, result.params) x.value = vals[0] y.value = vals[1] save_x = result.params[x.name] save_y = result.params[y.name] result.params[x.name] = x result.params[y.name] = y minimizer.prepare_fit(params=result.params) out = minimizer.leastsq() prob = prob_func(result, out) result.params[x.name] = save_x result.params[y.name] = save_y return prob out = x_points, y_points, np.apply_along_axis(calc_prob, -1, grid) x.vary, y.vary = True, True restore_vals(org, result.params) result.chisqr = best_chi return out lmfit-py-1.0.0/lmfit/jsonutils.py000066400000000000000000000113421357751001700167770ustar00rootroot00000000000000"""JSON utilities.""" from base64 import b64decode, b64encode import sys import numpy as np try: import dill HAS_DILL = True except ImportError: HAS_DILL = False try: from pandas import DataFrame, Series, read_json except ImportError: DataFrame = Series = type(NotImplemented) read_json = None def bindecode(val): """b64decode wrapper.""" return b64decode(val) def binencode(val): """b64encode wrapper.""" return str(b64encode(val), 'utf-8') def find_importer(obj): """Find importer of an object.""" oname = obj.__name__ for modname, module in sys.modules.items(): if modname.startswith('__main__'): continue t = getattr(module, oname, None) if t is obj: return modname return None def import_from(modulepath, objectname): """Wrapper for __import__ for nested objects.""" path = modulepath.split('.') top = path.pop(0) parent = __import__(top) while len(path) > 0: parent = getattr(parent, path.pop(0)) return getattr(parent, objectname) def encode4js(obj): """Prepare an object for JSON encoding. It has special handling for many Python types, including: - pandas DataFrames and Series - NumPy ndarrays - complex numbers """ if isinstance(obj, DataFrame): return dict(__class__='PDataFrame', value=obj.to_json()) elif isinstance(obj, Series): return dict(__class__='PSeries', value=obj.to_json()) elif isinstance(obj, np.ndarray): if 'complex' in obj.dtype.name: val = [(obj.real).tolist(), (obj.imag).tolist()] elif obj.dtype.name == 'object': val = [encode4js(item) for item in obj['value']] else: val = obj.flatten().tolist() return dict(__class__='NDArray', __shape__=obj.shape, __dtype__=obj.dtype.name, value=val) elif isinstance(obj, (np.float, np.int)): return float(obj) elif isinstance(obj, str): try: return str(obj) except UnicodeError: return obj elif isinstance(obj, np.complex): return dict(__class__='Complex', value=(obj.real, obj.imag)) elif isinstance(obj, (tuple, list)): ctype = 'List' if isinstance(obj, tuple): ctype = 'Tuple' val = [encode4js(item) for item in obj] return dict(__class__=ctype, value=val) elif isinstance(obj, dict): out = dict(__class__='Dict') for key, val in obj.items(): out[encode4js(key)] = encode4js(val) return out elif callable(obj): val, importer = None, None pyvers = "%d.%d" % (sys.version_info.major, sys.version_info.minor) if HAS_DILL: val = binencode(dill.dumps(obj)) else: val = None importer = find_importer(obj) return dict(__class__='Callable', __name__=obj.__name__, pyversion=pyvers, value=val, importer=importer) return obj def decode4js(obj): """Return decoded Python object from encoded object.""" if not isinstance(obj, dict): return obj out = obj classname = obj.pop('__class__', None) if classname is None: return obj if classname == 'Complex': out = obj['value'][0] + 1j*obj['value'][1] elif classname in ('List', 'Tuple'): out = [] for item in obj['value']: out.append(decode4js(item)) if classname == 'Tuple': out = tuple(out) elif classname == 'NDArray': if obj['__dtype__'].startswith('complex'): re = np.fromiter(obj['value'][0], dtype='double') im = np.fromiter(obj['value'][1], dtype='double') out = re + 1j*im elif obj['__dtype__'].startswith('object'): val = [decode4js(v) for v in obj['value']] out = np.array(val, dtype=obj['__dtype__']) else: out = np.fromiter(obj['value'], dtype=obj['__dtype__']) out.shape = obj['__shape__'] elif classname == 'PDataFrame' and read_json is not None: out = read_json(obj['value']) elif classname == 'PSeries' and read_json is not None: out = read_json(obj['value'], typ='series') elif classname == 'Callable': out = val = obj['__name__'] pyvers = "%d.%d" % (sys.version_info.major, sys.version_info.minor) if pyvers == obj['pyversion'] and HAS_DILL: out = dill.loads(bindecode(obj['value'])) elif obj['importer'] is not None: out = import_from(obj['importer'], val) elif classname in ('Dict', 'dict'): out = {} for key, val in obj.items(): out[key] = decode4js(val) return out lmfit-py-1.0.0/lmfit/lineshapes.py000066400000000000000000000320451357751001700171030ustar00rootroot00000000000000"""Basic model line shapes and distribution functions.""" from numpy import (arctan, cos, exp, finfo, float64, isnan, log, pi, sin, sqrt, where) from numpy.testing import assert_allclose from scipy.special import erf, erfc from scipy.special import gamma as gamfcn from scipy.special import gammaln, wofz log2 = log(2) s2pi = sqrt(2*pi) spi = sqrt(pi) s2 = sqrt(2.0) tiny = finfo(float64).eps functions = ('gaussian', 'lorentzian', 'voigt', 'pvoigt', 'moffat', 'pearson7', 'breit_wigner', 'damped_oscillator', 'dho', 'logistic', 'lognormal', 'students_t', 'expgaussian', 'donaich', 'skewed_gaussian', 'skewed_voigt', 'step', 'rectangle', 'erf', 'erfc', 'wofz', 'gamma', 'gammaln', 'exponential', 'powerlaw', 'linear', 'parabolic', 'sine', 'expsine', 'split_lorentzian') def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0): """Return a 1-dimensional Gaussian function. gaussian(x, amplitude, center, sigma) = (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 / (2*sigma**2)) """ return ((amplitude/(max(tiny, s2pi*sigma))) * exp(-(1.0*x-center)**2 / max(tiny, (2*sigma**2)))) def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0): """Return a 1-dimensional Lorentzian function. lorentzian(x, amplitude, center, sigma) = (amplitude/(1 + ((1.0*x-center)/sigma)**2)) / (pi*sigma) """ return ((amplitude/(1 + ((1.0*x-center)/max(tiny, sigma))**2)) / max(tiny, (pi*sigma))) def split_lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0, sigma_r=1.0): """Return a 1-dimensional piecewise Lorentzian function. Split means that width of the function is different between left and right slope of the function. The peak height is calculated from the condition that the integral from ``-.inf`` to ``+.inf`` is equal to ``amplitude``. split_lorentzian(x, amplitude, center, sigma, sigma_r) = [2*amplitude / (pi* (sigma + sigma_r)] * { sigma**2 * (x=center) / [sigma_r**2+ (x - center)**2] } """ s = max(tiny, sigma) r = max(tiny, sigma_r) s2 = s*s r2 = r*r xc2 = (x-center)**2 amp = 2*amplitude/(pi*(s+r)) return amp*(s2*(x < center)/(s2+xc2) + r2*(x >= center)/(r2+xc2)) def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None): """Return a 1-dimensional Voigt function. voigt(x, amplitude, center, sigma, gamma) = amplitude*wofz(z).real / (sigma*s2pi) see https://en.wikipedia.org/wiki/Voigt_profile """ if gamma is None: gamma = sigma z = (x-center + 1j*gamma) / max(tiny, (sigma*s2)) return amplitude*wofz(z).real / max(tiny, (sigma*s2pi)) def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5): """Return a 1-dimensional pseudo-Voigt function. pvoigt(x, amplitude, center, sigma, fraction) = amplitude*(1-fraction)*gaussian(x, center, sigma_g) + amplitude*fraction*lorentzian(x, center, sigma) where sigma_g (the sigma for the Gaussian component) is sigma_g = sigma / sqrt(2*log(2)) ~= sigma / 1.17741 so that the Gaussian and Lorentzian components have the same FWHM of 2*sigma. """ sigma_g = sigma / sqrt(2*log2) return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) + fraction*lorentzian(x, amplitude, center, sigma)) def moffat(x, amplitude=1, center=0., sigma=1, beta=1.): """Return a 1-dimensional Moffat function. moffat(x, amplitude, center, sigma, beta) = amplitude / (((x - center)/sigma)**2 + 1)**beta """ return amplitude / (((x - center)/max(tiny, sigma))**2 + 1)**beta def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0): """Return a Pearson7 lineshape. Using the wikipedia definition: pearson7(x, center, sigma, expon) = amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5)) where arg = (x-center)/sigma and beta() is the beta function. """ arg = (x-center)/max(tiny, sigma) scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5)) return scale*(1+arg**2)**(-expon)/max(tiny, sigma) def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0): """Return a Breit-Wigner-Fano lineshape. breit_wigner(x, amplitude, center, sigma, q) = amplitude*(q*sigma/2 + x - center)**2 / ( (sigma/2)**2 + (x - center)**2 ) """ gam = sigma/2.0 return amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2) def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1): """Return the amplitude for a damped harmonic oscillator. damped_oscillator(x, amplitude, center, sigma) = amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2)) """ center = max(tiny, abs(center)) return amplitude/sqrt((1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2) def dho(x, amplitude=1., center=0., sigma=1., gamma=1.0): """Return a Damped Harmonic Oscillator. Similar to version from PAN dho(x, amplitude, center, sigma, gamma) = amplitude*sigma*pi * (lm - lp) / (1.0 - exp(-x/gamma)) where lm(x, center, sigma) = 1.0 / ((x-center)**2 + sigma**2) lp(x, center, sigma) = 1.0 / ((x+center)**2 + sigma**2) """ bose = (1.0 - exp(-x/max(tiny, gamma))) if isinstance(bose, (int, float)): bose = max(tiny, bose) else: bose[where(isnan(bose))] = tiny bose[where(bose <= tiny)] = tiny lm = 1.0/((x-center)**2 + sigma**2) lp = 1.0/((x+center)**2 + sigma**2) return amplitude*sigma/pi*(lm - lp)/bose def logistic(x, amplitude=1., center=0., sigma=1.): """Return a Logistic lineshape (yet another sigmoidal curve). logistic(x, amplitude, center, sigma) = = amplitude*(1. - 1. / (1 + exp((x-center)/sigma))) """ return amplitude*(1. - 1./(1. + exp((x-center)/sigma))) def lognormal(x, amplitude=1.0, center=0., sigma=1): """Return a log-normal function. lognormal(x, amplitude, center, sigma) = (amplitude/(x*sigma*s2pi)) * exp(-(ln(x) - center)**2/ (2* sigma**2)) """ if isinstance(x, (int, float)): x = max(tiny, x) else: x[where(x <= tiny)] = tiny return ((amplitude/(x*max(tiny, sigma*s2pi))) * exp(-(log(x)-center)**2 / max(tiny, (2*sigma**2)))) def students_t(x, amplitude=1.0, center=0.0, sigma=1.0): """Return Student's t distribution. students_t(x, amplitude, center, sigma) = gamma((sigma+1)/2) (1 + (x-center)**2/sigma)^(-(sigma+1)/2) ------------------------- sqrt(sigma*pi)gamma(sigma/2) """ s1 = (sigma+1)/2.0 denom = max(tiny, (sqrt(sigma*pi)*gamfcn(max(tiny, sigma/2)))) return amplitude*(1 + (x-center)**2/max(tiny, sigma))**(-s1) * gamfcn(s1) / denom def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0): """Return a exponentially modified Gaussian. expgaussian(x, amplitude, center, sigma, gamma=) = (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] * erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)] https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution """ gss = gamma*sigma*sigma arg1 = gamma*(center + gss/2.0 - x) arg2 = (center + gss - x)/max(tiny, (s2*sigma)) return amplitude*(gamma/2) * exp(arg1) * erfc(arg2) def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0): """Return a Doniach Sunjic asymmetric lineshape, used for photo-emission. donaich(x, amplitude, center, sigma, gamma) = amplitude / sigma^(1-gamma) * cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) / (sigma**2 + (x-center)**2)**[(1-gamma)/2] see http://www.casaxps.com/help_manual/line_shapes.htm """ arg = (x-center)/max(tiny, sigma) gm1 = (1.0 - gamma) scale = amplitude/max(tiny, (sigma**gm1)) return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2) def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0): """Return a Gaussian lineshape, skewed with error function. Equal to: gaussian(x, center, sigma)*(1+erf(beta*(x-center))) with beta = gamma/(sigma*sqrt(2)) with gamma < 0: tail to low value of centroid gamma > 0: tail to high value of centroid see https://en.wikipedia.org/wiki/Skew_normal_distribution """ asym = 1 + erf(gamma*(x-center)/max(tiny, (s2*sigma))) return asym * gaussian(x, amplitude, center, sigma) def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0): """Return a Voigt lineshape, skewed with error function. useful for ad-hoc Compton scatter profile with beta = skew/(sigma*sqrt(2)) = voigt(x, center, sigma, gamma)*(1+erf(beta*(x-center))) skew < 0: tail to low value of centroid skew > 0: tail to high value of centroid see https://en.wikipedia.org/wiki/Skew_normal_distribution """ beta = skew/max(tiny, (s2*sigma)) asym = 1 + erf(beta*(x-center)) return asym * voigt(x, amplitude, center, sigma, gamma=gamma) def sine(x, amplitude=1.0, frequency=1.0, shift=0.0): """Return a sinusoidal function. sine(x, amplitude, frequency, shift): = amplitude * sin(x*frequency + shift) """ return amplitude*sin(x*frequency + shift) def expsine(x, amplitude=1.0, frequency=1.0, shift=0.0, decay=0.0): """Return an exponentially decaying sinusoidal function. expsine(x, amplitude, frequency, shift, decay): = amplitude * sin(x*frequency + shift) * exp(-x*decay) """ return amplitude*sin(x*frequency + shift) * exp(-x*decay) def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'): """Return a step function. starts at 0.0, ends at amplitude, with half-max at center, and rising with form: 'linear' (default) = amplitude * min(1, max(0, arg)) 'atan', 'arctan' = amplitude * (0.5 + atan(arg)/pi) 'erf' = amplitude * (1 + erf(arg))/2.0 'logistic' = amplitude * [1 - 1/(1 + exp(arg))] where arg = (x - center)/sigma """ out = (x - center)/max(tiny, sigma) if form == 'erf': out = 0.5*(1 + erf(out)) elif form.startswith('logi'): out = (1. - 1./(1. + exp(out))) elif form in ('atan', 'arctan'): out = 0.5 + arctan(out)/pi else: out[where(out < 0)] = 0.0 out[where(out > 1)] = 1.0 return amplitude*out def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0, center2=1.0, sigma2=1.0, form='linear'): """Return a rectangle function: step up, step down. (see step function) starts at 0.0, rises to amplitude (at center1 with width sigma1) then drops to 0.0 (at center2 with width sigma2) with form: 'linear' (default) = ramp_up + ramp_down 'atan', 'arctan' = amplitude*(atan(arg1) + atan(arg2))/pi 'erf' = amplitude*(erf(arg1) + erf(arg2))/2. 'logisitic' = amplitude*[1 - 1/(1 + exp(arg1)) - 1/(1+exp(arg2))] where arg1 = (x - center1)/sigma1 and arg2 = -(x - center2)/sigma2 """ arg1 = (x - center1)/max(tiny, sigma1) arg2 = (center2 - x)/max(tiny, sigma2) if form == 'erf': out = 0.5*(erf(arg1) + erf(arg2)) elif form.startswith('logi'): out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2))) elif form in ('atan', 'arctan'): out = (arctan(arg1) + arctan(arg2))/pi else: arg1[where(arg1 < 0)] = 0.0 arg1[where(arg1 > 1)] = 1.0 arg2[where(arg2 > 0)] = 0.0 arg2[where(arg2 < -1)] = -1.0 out = arg1 + arg2 return amplitude*out def _erf(x): """Return the error function. erf = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z]) """ return erf(x) def _erfc(x): """Return the complementary error function. erfc = 1 - erf(x) """ return erfc(x) def _wofz(x): """Return the fadeeva function for complex argument. wofz = exp(-x**2)*erfc(-i*x) """ return wofz(x) def _gamma(x): """Return the gamma function.""" return gamfcn(x) def _gammaln(x): """Return the log of absolute value of gamma function.""" return gammaln(x) def exponential(x, amplitude=1, decay=1): """Return an exponential function. x -> amplitude * exp(-x/decay) """ return amplitude * exp(-x/decay) def powerlaw(x, amplitude=1, exponent=1.0): """Return the powerlaw function. x -> amplitude * x**exponent """ return amplitude * x**exponent def linear(x, slope=1.0, intercept=0.0): """Return a linear function. x -> slope * x + intercept """ return slope * x + intercept def parabolic(x, a=0.0, b=0.0, c=0.0): """Return a parabolic function. x -> a * x**2 + b * x + c """ return a * x**2 + b * x + c def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03, err_msg='', verbose=True): """Check whether all actual and desired parameter values are close.""" for param_name, value in desired.items(): assert_allclose(actual[param_name], value, rtol, atol, err_msg, verbose) lmfit-py-1.0.0/lmfit/minimizer.py000066400000000000000000003010401357751001700167450ustar00rootroot00000000000000"""Simple minimizer is a wrapper around scipy.leastsq, allowing a user to build a fitting model as a function of general purpose Fit Parameters that can be fixed or varied, bounded, and written as a simple expression of other Fit Parameters. The user sets up a model in terms of instance of Parameters and writes a function-to-be-minimized (residual function) in terms of these Parameters. Original copyright: Copyright (c) 2011 Matthew Newville, The University of Chicago See LICENSE for more complete authorship information and license. """ from collections import namedtuple from copy import deepcopy import multiprocessing import numbers import warnings import numpy as np from numpy import ndarray, ones_like, sqrt from numpy.dual import inv from numpy.linalg import LinAlgError from scipy.optimize import basinhopping as scipy_basinhopping from scipy.optimize import brute as scipy_brute from scipy.optimize import differential_evolution from scipy.optimize import dual_annealing as scipy_dual_annealing from scipy.optimize import least_squares from scipy.optimize import leastsq as scipy_leastsq from scipy.optimize import minimize as scipy_minimize from scipy.optimize import shgo as scipy_shgo from scipy.sparse import issparse from scipy.sparse.linalg import LinearOperator from scipy.stats import cauchy as cauchy_dist from scipy.stats import norm as norm_dist from scipy.version import version as scipy_version import uncertainties from ._ampgo import ampgo from .parameter import Parameter, Parameters from .printfuncs import fitreport_html_table # check for EMCEE try: import emcee from emcee.autocorr import AutocorrError HAS_EMCEE = int(emcee.__version__[0]) >= 3 except ImportError: HAS_EMCEE = False # check for pandas try: import pandas as pd from pandas import isnull HAS_PANDAS = True except ImportError: HAS_PANDAS = False isnull = np.isnan # check for numdifftools try: import numdifftools as ndt HAS_NUMDIFFTOOLS = True except ImportError: HAS_NUMDIFFTOOLS = False # check for dill try: import dill # noqa: F401 HAS_DILL = True except ImportError: HAS_DILL = False # define the namedtuple here so pickle will work with the MinimizerResult Candidate = namedtuple('Candidate', ['params', 'score']) def asteval_with_uncertainties(*vals, **kwargs): """Calculate object value, given values for variables. This is used by the uncertainties package to calculate the uncertainty in an object even with a complicated expression. """ _obj = kwargs.get('_obj', None) _pars = kwargs.get('_pars', None) _names = kwargs.get('_names', None) _asteval = _pars._asteval if (_obj is None or _pars is None or _names is None or _asteval is None or _obj._expr_ast is None): return 0 for val, name in zip(vals, _names): _asteval.symtable[name] = val return _asteval.eval(_obj._expr_ast) wrap_ueval = uncertainties.wrap(asteval_with_uncertainties) def eval_stderr(obj, uvars, _names, _pars): """Evaluate uncertainty and set .stderr for a parameter `obj`. Given the uncertain values `uvars` (a list of uncertainties.ufloats), a list of parameter names that matches uvars, and a dict of param objects, keyed by name. This uses the uncertainties package wrapped function to evaluate the uncertainty for an arbitrary expression (in obj._expr_ast) of parameters. """ if not isinstance(obj, Parameter) or getattr(obj, '_expr_ast', None) is None: return uval = wrap_ueval(*uvars, _obj=obj, _names=_names, _pars=_pars) try: obj.stderr = uval.std_dev except Exception: obj.stderr = 0 class MinimizerException(Exception): """General Purpose Exception.""" def __init__(self, msg): Exception.__init__(self) self.msg = msg def __str__(self): return "{}".format(self.msg) class AbortFitException(MinimizerException): """Raised when a fit is aborted by the user.""" pass SCALAR_METHODS = {'nelder': 'Nelder-Mead', 'powell': 'Powell', 'cg': 'CG', 'bfgs': 'BFGS', 'newton': 'Newton-CG', 'lbfgsb': 'L-BFGS-B', 'l-bfgsb': 'L-BFGS-B', 'tnc': 'TNC', 'cobyla': 'COBYLA', 'slsqp': 'SLSQP', 'dogleg': 'dogleg', 'trust-ncg': 'trust-ncg', 'differential_evolution': 'differential_evolution', 'trust-constr': 'trust-constr', 'trust-exact': 'trust-exact', 'trust-krylov': 'trust-krylov'} def reduce_chisquare(r): """Reduce residual array to scalar (chi-square). Calculate the chi-square value from the residual array `r`: (r*r).sum() Parameters ---------- r : numpy.ndarray Residual array. Returns ------- float Chi-square calculated from the residual array """ return (r*r).sum() def reduce_negentropy(r): """Reduce residual array to scalar (negentropy). Reduce residual array `r` to scalar using negative entropy and the normal (Gaussian) probability distribution of `r` as pdf: (norm.pdf(r)*norm.logpdf(r)).sum() since pdf(r) = exp(-r*r/2)/sqrt(2*pi), this is ((r*r/2 - log(sqrt(2*pi))) * exp(-r*r/2)).sum() Parameters ---------- r : numpy.ndarray Residual array. Returns ------- float Negative entropy value calculated from the residual array """ return (norm_dist.pdf(r)*norm_dist.logpdf(r)).sum() def reduce_cauchylogpdf(r): """Reduce residual array to scalar (cauchylogpdf). Reduce residual array `r` to scalar using negative log-likelihood and a Cauchy (Lorentzian) distribution of `r`: -scipy.stats.cauchy.logpdf(r) (where the Cauchy pdf = 1/(pi*(1+r*r))). This gives greater suppression of outliers compared to normal sum-of-squares. Parameters ---------- r : numpy.ndarray Residual array. Returns ------- float Negative entropy value calculated from the residual array """ return -cauchy_dist.logpdf(r).sum() class MinimizerResult: r"""The results of a minimization. Minimization results include data such as status and error messages, fit statistics, and the updated (i.e., best-fit) parameters themselves in the :attr:`params` attribute. The list of (possible) `MinimizerResult` attributes is given below: Attributes ---------- params : :class:`~lmfit.parameter.Parameters` The best-fit parameters resulting from the fit. status : int Termination status of the optimizer. Its value depends on the underlying solver. Refer to `message` for details. var_names : list Ordered list of variable parameter names used in optimization, and useful for understanding the values in :attr:`init_vals` and :attr:`covar`. covar : numpy.ndarray Covariance matrix from minimization, with rows and columns corresponding to :attr:`var_names`. init_vals : list List of initial values for variable parameters using :attr:`var_names`. init_values : dict Dictionary of initial values for variable parameters. nfev : int Number of function evaluations. success : bool True if the fit succeeded, otherwise False. errorbars : bool True if uncertainties were estimated, otherwise False. message : str Message about fit success. ier : int Integer error value from :scipydoc:`optimize.leastsq` (`leastsq` only). lmdif_message : str Message from :scipydoc:`optimize.leastsq` (`leastsq` only). nvarys : int Number of variables in fit: :math:`N_{\rm varys}`. ndata : int Number of data points: :math:`N`. nfree : int Degrees of freedom in fit: :math:`N - N_{\rm varys}`. residual : numpy.ndarray Residual array :math:`{\rm Resid_i}`. Return value of the objective function when using the best-fit values of the parameters. chisqr : float Chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`. redchi : float Reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}`. aic : float Akaike Information Criterion statistic: :math:`N \ln(\chi^2/N) + 2 N_{\rm varys}`. bic : float Bayesian Information Criterion statistic: :math:`N \ln(\chi^2/N) + \ln(N) N_{\rm varys}`. flatchain : pandas.DataFrame A flatchain view of the sampling chain from the `emcee` method. Methods ------- show_candidates Pretty_print() representation of candidates from the `brute` method. """ def __init__(self, **kws): for key, val in kws.items(): setattr(self, key, val) @property def flatchain(self): """Show flatchain view of the sampling chain from `emcee` method.""" if hasattr(self, 'chain'): if HAS_PANDAS: if len(self.chain.shape) == 4: return pd.DataFrame(self.chain[0, ...].reshape((-1, self.nvarys)), columns=self.var_names) elif len(self.chain.shape) == 3: return pd.DataFrame(self.chain.reshape((-1, self.nvarys)), columns=self.var_names) else: raise NotImplementedError('Please install Pandas to see the ' 'flattened chain') else: return None def show_candidates(self, candidate_nmb='all'): """Show pretty_print() representation of candidates from `brute` method. Showing all stored candidates (default) or the specified candidate-# from the `brute` method. Parameters ---------- candidate_nmb : int or 'all' The candidate-number to show using the :meth:`pretty_print` method. """ if hasattr(self, 'candidates'): if candidate_nmb == 'all': for i, candidate in enumerate(self.candidates): print("\nCandidate #{}, chisqr = " "{:.3f}".format(i+1, candidate.score)) candidate.params.pretty_print() elif (candidate_nmb < 1 or candidate_nmb > len(self.candidates)): raise ValueError("'candidate_nmb' should be between 1 and {}." .format(len(self.candidates))) else: candidate = self.candidates[candidate_nmb-1] print("\nCandidate #{}, chisqr = " "{:.3f}".format(candidate_nmb, candidate.score)) candidate.params.pretty_print() def _calculate_statistics(self): """Calculate the fitting statistics.""" self.nvarys = len(self.init_vals) if isinstance(self.residual, ndarray): self.chisqr = (self.residual**2).sum() self.ndata = len(self.residual) self.nfree = self.ndata - self.nvarys else: self.chisqr = self.residual self.ndata = 1 self.nfree = 1 self.redchi = self.chisqr / max(1, self.nfree) # this is -2*loglikelihood _neg2_log_likel = self.ndata * np.log(self.chisqr / self.ndata) self.aic = _neg2_log_likel + 2 * self.nvarys self.bic = _neg2_log_likel + np.log(self.ndata) * self.nvarys def _repr_html_(self, show_correl=True, min_correl=0.1): """Returns a HTML representation of parameters data.""" return fitreport_html_table(self, show_correl=show_correl, min_correl=min_correl) class Minimizer: """A general minimizer for curve fitting and optimization.""" _err_nonparam = ("params must be a minimizer.Parameters() instance or list " "of Parameters()") _err_maxfev = ("Too many function calls (max set to %i)! Use:" " minimize(func, params, ..., maxfev=NNN)" "or set leastsq_kws['maxfev'] to increase this maximum.") def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, nan_policy='raise', reduce_fcn=None, calc_covar=True, **kws): """ Parameters ---------- userfcn : callable Objective function that returns the residual (difference between model and data) to be minimized in a least-squares sense. This function must have the signature:: userfcn(params, *fcn_args, **fcn_kws) params : :class:`~lmfit.parameter.Parameters` Contains the Parameters for the model. fcn_args : tuple, optional Positional arguments to pass to `userfcn`. fcn_kws : dict, optional Keyword arguments to pass to `userfcn`. iter_cb : callable, optional Function to be called at each fit iteration. This function should have the signature:: iter_cb(params, iter, resid, *fcn_args, **fcn_kws) where `params` will have the current parameter values, `iter` the iteration number, `resid` the current residual array, and `*fcn_args` and `**fcn_kws` are passed to the objective function. scale_covar : bool, optional Whether to automatically scale the covariance matrix (default is True). nan_policy : str, optional Specifies action if `userfcn` (or a Jacobian) returns NaN values. One of: - 'raise' : a `ValueError` is raised - 'propagate' : the values returned from `userfcn` are un-altered - 'omit' : non-finite values are filtered reduce_fcn : str or callable, optional Function to convert a residual array to a scalar value for the scalar minimizers. Optional values are (where `r` is the residual array): - None : sum of squares of residual [default] = (r*r).sum() - 'negentropy' : neg entropy, using normal distribution = rho*log(rho).sum()`, where rho = exp(-r*r/2)/(sqrt(2*pi)) - 'neglogcauchy': neg log likelihood, using Cauchy distribution = -log(1/(pi*(1+r*r))).sum() - callable : must take one argument (`r`) and return a float. calc_covar : bool, optional Whether to calculate the covariance matrix (default is True) for solvers other than `leastsq` and `least_squares`. Requires the `numdifftools` package to be installed. **kws : dict, optional Options to pass to the minimizer being used. Notes ----- The objective function should return the value to be minimized. For the Levenberg-Marquardt algorithm from :meth:`leastsq` or :meth:`least_squares`, this returned value must be an array, with a length greater than or equal to the number of fitting variables in the model. For the other methods, the return value can either be a scalar or an array. If an array is returned, the sum of squares of the array will be sent to the underlying fitting method, effectively doing a least-squares optimization of the return values. If the objective function returns non-finite values then a `ValueError` will be raised because the underlying solvers cannot deal with them. A common use for the `fcn_args` and `fcn_kws` would be to pass in other data needed to calculate the residual, including such things as the data array, dependent variable, uncertainties in the data, and other data structures for the model calculation. """ self.userfcn = userfcn self.userargs = fcn_args if self.userargs is None: self.userargs = [] self.userkws = fcn_kws if self.userkws is None: self.userkws = {} self.kws = kws self.iter_cb = iter_cb self.calc_covar = calc_covar self.scale_covar = scale_covar self.nfev = 0 self.nfree = 0 self.ndata = 0 self.ier = 0 self._abort = False self.success = True self.errorbars = False self.message = None self.lmdif_message = None self.chisqr = None self.redchi = None self.covar = None self.residual = None self.reduce_fcn = reduce_fcn self.params = params self.jacfcn = None self.nan_policy = nan_policy @property def values(self): """Return Parameter values in a simple dictionary.""" return {name: p.value for name, p in self.result.params.items()} def __residual(self, fvars, apply_bounds_transformation=True): """Residual function used for least-squares fit. With the new, candidate values of `fvars` (the fitting variables), this evaluates all parameters, including setting bounds and evaluating constraints, and then passes those to the user-supplied function to calculate the residual. Parameters ---------- fvars : numpy.ndarray Array of new parameter values suggested by the minimizer. apply_bounds_transformation : bool, optional Whether to apply lmfits parameter transformation to constrain parameters (default is True). This is needed for solvers without inbuilt support for bounds. Returns ------- residual : numpy.ndarray The evaluated function values for given `fvars`. """ params = self.result.params if fvars.shape == (): fvars = fvars.reshape((1,)) if apply_bounds_transformation: for name, val in zip(self.result.var_names, fvars): params[name].value = params[name].from_internal(val) else: for name, val in zip(self.result.var_names, fvars): params[name].value = val params.update_constraints() self.result.nfev += 1 out = self.userfcn(params, *self.userargs, **self.userkws) if callable(self.iter_cb): abort = self.iter_cb(params, self.result.nfev, out, *self.userargs, **self.userkws) self._abort = self._abort or abort if self._abort: self.result.residual = out self.result.aborted = True self.result.message = "Fit aborted by user callback. Could not estimate error-bars." self.result.success = False raise AbortFitException("fit aborted by user.") else: return _nan_policy(np.asarray(out).ravel(), nan_policy=self.nan_policy) def __jacobian(self, fvars): """Return analytical jacobian to be used with Levenberg-Marquardt. modified 02-01-2012 by Glenn Jones, Aberystwyth University modified 06-29-2015 M Newville to apply gradient scaling for bounded variables (thanks to JJ Helmus, N Mayorov) """ pars = self.result.params grad_scale = ones_like(fvars) for ivar, name in enumerate(self.result.var_names): val = fvars[ivar] pars[name].value = pars[name].from_internal(val) grad_scale[ivar] = pars[name].scale_gradient(val) pars.update_constraints() # compute the jacobian for "internal" unbounded variables, # then rescale for bounded "external" variables. jac = self.jacfcn(pars, *self.userargs, **self.userkws) jac = _nan_policy(jac, nan_policy=self.nan_policy) if self.col_deriv: jac = (jac.transpose()*grad_scale).transpose() else: jac *= grad_scale return jac def penalty(self, fvars): """Penalty function for scalar minimizers. Parameters ---------- fvars : numpy.ndarray Array of values for the variable parameters. Returns ------- r : float The evaluated user-supplied objective function. If the objective function is an array of size greater than 1, use the scalar returned by `self.reduce_fcn`. This defaults to sum-of-squares, but can be replaced by other options. """ if self.result.method in ['brute', 'shgo', 'dual_annealing']: apply_bounds_transformation = False else: apply_bounds_transformation = True r = self.__residual(fvars, apply_bounds_transformation) if isinstance(r, ndarray) and r.size > 1: r = self.reduce_fcn(r) if isinstance(r, ndarray) and r.size > 1: r = r.sum() return r def prepare_fit(self, params=None): """Prepare parameters for fitting. Prepares and initializes model and Parameters for subsequent fitting. This routine prepares the conversion of :class:`Parameters` into fit variables, organizes parameter bounds, and parses, "compiles" and checks constrain expressions. The method also creates and returns a new instance of a :class:`MinimizerResult` object that contains the copy of the Parameters that will actually be varied in the fit. Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Contains the Parameters for the model; if None, then the Parameters used to initialize the Minimizer object are used. Returns ------- :class:`MinimizerResult` Notes ----- This method is called directly by the fitting methods, and it is generally not necessary to call this function explicitly. .. versionchanged:: 0.9.0 Return value changed to :class:`MinimizerResult`. """ # determine which parameters are actually variables # and which are defined expressions. self.result = MinimizerResult() result = self.result if params is not None: self.params = params if isinstance(self.params, Parameters): result.params = deepcopy(self.params) elif isinstance(self.params, (list, tuple)): result.params = Parameters() for par in self.params: if not isinstance(par, Parameter): raise MinimizerException(self._err_nonparam) else: result.params[par.name] = par elif self.params is None: raise MinimizerException(self._err_nonparam) # determine which parameters are actually variables # and which are defined expressions. result.var_names = [] # note that this *does* belong to self... result.init_vals = [] result.params.update_constraints() result.nfev = 0 result.errorbars = False result.aborted = False for name, par in self.result.params.items(): par.stderr = None par.correl = None if par.expr is not None: par.vary = False if par.vary: result.var_names.append(name) result.init_vals.append(par.setup_bounds()) par.init_value = par.value if par.name is None: par.name = name result.nvarys = len(result.var_names) result.init_values = {n: v for n, v in zip(result.var_names, result.init_vals)} # set up reduce function for scalar minimizers # 1. user supplied callable # 2. string starting with 'neglogc' or 'negent' # 3. sum of squares if not callable(self.reduce_fcn): if isinstance(self.reduce_fcn, str): if self.reduce_fcn.lower().startswith('neglogc'): self.reduce_fcn = reduce_cauchylogpdf elif self.reduce_fcn.lower().startswith('negent'): self.reduce_fcn = reduce_negentropy if self.reduce_fcn is None: self.reduce_fcn = reduce_chisquare return result def unprepare_fit(self): """Clean fit state, so that subsequent fits need to call prepare_fit(). removes AST compilations of constraint expressions. """ pass def _calculate_covariance_matrix(self, fvars): """Calculate the covariance matrix. The `numdiftoools` package is used to estimate the Hessian matrix, and the covariance matrix is calculated as: .. math:: cov_x = inverse(Hessian) * 2.0 Parameters ---------- fvars : numpy.ndarray Array of the optimal internal, freely variable parameters. Returns ------- cov_x : numpy.ndarray or None Covariance matrix if successful, otherwise None. """ warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd") nfev = deepcopy(self.result.nfev) try: Hfun = ndt.Hessian(self.penalty) hessian_ndt = Hfun(fvars) cov_x = inv(hessian_ndt) * 2.0 except (LinAlgError, ValueError): return None finally: self.result.nfev = nfev return cov_x def _int2ext_cov_x(self, cov_int, fvars): """Transform covariance matrix to external parameter space. It makes use of the gradient scaling according to the MINUIT recipe: cov_ext = np.dot(grad.T, grad) * cov_int Parameters ---------- cov_int : numpy.ndarray Covariance matrix in the internal parameter space. fvars : numpy.ndarray Array of the optimal internal, freely variable, parameter values. Returns ------- cov_ext : numpy.ndarray Covariance matrix, transformed to external parameter space. """ g = [self.result.params[name].scale_gradient(fvars[i]) for i, name in enumerate(self.result.var_names)] grad2d = np.atleast_2d(g) grad = np.dot(grad2d.T, grad2d) cov_ext = cov_int * grad return cov_ext def _calculate_uncertainties_correlations(self): """Calculate parameter uncertainties and correlations.""" self.result.errorbars = True if self.scale_covar: self.result.covar *= self.result.redchi vbest = np.atleast_1d([self.result.params[name].value for name in self.result.var_names]) has_expr = False for par in self.result.params.values(): par.stderr, par.correl = 0, None has_expr = has_expr or par.expr is not None for ivar, name in enumerate(self.result.var_names): par = self.result.params[name] par.stderr = sqrt(self.result.covar[ivar, ivar]) par.correl = {} try: self.result.errorbars = self.result.errorbars and (par.stderr > 0.0) for jvar, varn2 in enumerate(self.result.var_names): if jvar != ivar: par.correl[varn2] = (self.result.covar[ivar, jvar] / (par.stderr * sqrt(self.result.covar[jvar, jvar]))) except ZeroDivisionError: self.result.errorbars = False if has_expr: try: uvars = uncertainties.correlated_values(vbest, self.result.covar) except (LinAlgError, ValueError): uvars = None # for uncertainties on constrained parameters, use the calculated # "correlated_values", evaluate the uncertainties on the constrained # parameters and reset the Parameters to best-fit value if uvars is not None: for par in self.result.params.values(): eval_stderr(par, uvars, self.result.var_names, self.result.params) # restore nominal values for v, nam in zip(uvars, self.result.var_names): self.result.params[nam].value = v.nominal_value def scalar_minimize(self, method='Nelder-Mead', params=None, **kws): """Scalar minimization using :scipydoc:`optimize.minimize`. Perform fit with any of the scalar minimization algorithms supported by :scipydoc:`optimize.minimize`. Default argument values are: +-------------------------+-----------------+-----------------------------------------------------+ | :meth:`scalar_minimize` | Default Value | Description | | arg | | | +=========================+=================+=====================================================+ | method | ``Nelder-Mead`` | fitting method | +-------------------------+-----------------+-----------------------------------------------------+ | tol | 1.e-7 | fitting and parameter tolerance | +-------------------------+-----------------+-----------------------------------------------------+ | hess | None | Hessian of objective function | +-------------------------+-----------------+-----------------------------------------------------+ Parameters ---------- method : str, optional Name of the fitting method to use. One of: - 'Nelder-Mead' (default) - 'L-BFGS-B' - 'Powell' - 'CG' - 'Newton-CG' - 'COBYLA' - 'BFGS' - 'TNC' - 'trust-ncg' - 'trust-exact' - 'trust-krylov' - 'trust-constr' - 'dogleg' - 'SLSQP' - 'differential_evolution' params : :class:`~lmfit.parameter.Parameters`, optional Parameters to use as starting point. **kws : dict, optional Minimizer options pass to :scipydoc:`optimize.minimize`. Returns ------- :class:`MinimizerResult` Object containing the optimized parameter and several goodness-of-fit statistics. .. versionchanged:: 0.9.0 Return value changed to :class:`MinimizerResult`. Notes ----- If the objective function returns a NumPy array instead of the expected scalar, the sum of squares of the array will be used. Note that bounds and constraints can be set on Parameters for any of these methods, so are not supported separately for those designed to use bounds. However, if you use the differential_evolution method you must specify finite (min, max) for each varying Parameter. """ result = self.prepare_fit(params=params) result.method = method variables = result.init_vals params = result.params fmin_kws = dict(method=method, options={'maxiter': 1000 * (len(variables) + 1)}) fmin_kws.update(self.kws) fmin_kws.update(kws) # hess supported only in some methods if 'hess' in fmin_kws and method not in ('Newton-CG', 'dogleg', 'trust-constr', 'trust-ncg', 'trust-krylov', 'trust-exact'): fmin_kws.pop('hess') # jac supported only in some methods (and Dfun could be used...) if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None: self.jacfcn = fmin_kws.pop('jac') fmin_kws['jac'] = self.__jacobian if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP', 'dogleg', 'trust-ncg', 'trust-krylov', 'trust-exact'): self.jacfcn = None fmin_kws.pop('jac') # workers / updating keywords only supported in differential_evolution for kwd in ('workers', 'updating'): if kwd in fmin_kws and method != 'differential_evolution': fmin_kws.pop(kwd) if method == 'differential_evolution': for par in params.values(): if (par.vary and not (np.isfinite(par.min) and np.isfinite(par.max))): raise ValueError('differential_evolution requires finite ' 'bound for all varying parameters') _bounds = [(-np.pi / 2., np.pi / 2.)] * len(variables) kwargs = dict(args=(), strategy='best1bin', maxiter=None, popsize=15, tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, callback=None, disp=False, polish=True, init='latinhypercube', atol=0, updating='immediate', workers=1) for k, v in fmin_kws.items(): if k in kwargs: kwargs[k] = v try: ret = differential_evolution(self.penalty, _bounds, **kwargs) except AbortFitException: pass else: try: ret = scipy_minimize(self.penalty, variables, **fmin_kws) except AbortFitException: pass if not result.aborted: if isinstance(ret, dict): for attr, value in ret.items(): setattr(result, attr, value) else: for attr in dir(ret): if not attr.startswith('_'): setattr(result, attr, getattr(ret, attr)) result.x = np.atleast_1d(result.x) result.residual = self.__residual(result.x) result.nfev -= 1 result._calculate_statistics() # calculate the cov_x and estimate uncertanties/correlations if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and len(result.residual) > len(result.var_names)): _covar_ndt = self._calculate_covariance_matrix(result.x) if _covar_ndt is not None: result.covar = self._int2ext_cov_x(_covar_ndt, result.x) self._calculate_uncertainties_correlations() return result def _lnprob(self, theta, userfcn, params, var_names, bounds, userargs=(), userkws=None, float_behavior='posterior', is_weighted=True, nan_policy='raise'): """Calculate the log-posterior probability. See the `Minimizer.emcee` method for more details. Parameters ---------- theta : sequence Float parameter values (only those being varied). userfcn : callable User objective function. params : :class:`~lmfit.parameters.Parameters` The entire set of Parameters. var_names : list The names of the parameters that are varying. bounds : numpy.ndarray Lower and upper bounds of parameters. Has shape (nvarys, 2). userargs : tuple, optional Extra positional arguments required for user objective function. userkws : dict, optional Extra keyword arguments required for user objective function. float_behavior : str, optional Specifies meaning of objective when it returns a float. Use 'posterior' if objective function returnins a log-posterior probability (default) or 'chi2' if it returns a chi2 value. is_weighted : bool, optional If `userfcn` returns a vector of residuals then `is_weighted` (default is True) specifies if the residuals have been weighted by data uncertainties. nan_policy : str, optional Specifies action if `userfcn` returns NaN values. Use 'raise' (default) to raise a `ValueError`, 'propagate' to use values as-is, or 'omit' to filter out the non-finite values. Returns ------- lnprob : float Log posterior probability. """ # the comparison has to be done on theta and bounds. DO NOT inject theta # values into Parameters, then compare Parameters values to the bounds. # Parameters values are clipped to stay within bounds. if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]): return -np.inf for name, val in zip(var_names, theta): params[name].value = val userkwargs = {} if userkws is not None: userkwargs = userkws # update the constraints params.update_constraints() # now calculate the log-likelihood out = userfcn(params, *userargs, **userkwargs) self.result.nfev += 1 if callable(self.iter_cb): abort = self.iter_cb(params, self.result.nfev, out, *userargs, **userkwargs) self._abort = self._abort or abort if self._abort: self.result.residual = out self._lastpos = theta raise AbortFitException("fit aborted by user.") else: out = _nan_policy(np.asarray(out).ravel(), nan_policy=self.nan_policy) lnprob = np.asarray(out).ravel() if len(lnprob) == 0: lnprob = np.array([-1.e100]) if lnprob.size > 1: # objective function returns a vector of residuals if '__lnsigma' in params and not is_weighted: # marginalise over a constant data uncertainty __lnsigma = params['__lnsigma'].value c = np.log(2 * np.pi) + 2 * __lnsigma lnprob = -0.5 * np.sum((lnprob / np.exp(__lnsigma)) ** 2 + c) else: lnprob = -0.5 * (lnprob * lnprob).sum() else: # objective function returns a single value. # use float_behaviour to figure out if the value is posterior or chi2 if float_behavior == 'posterior': pass elif float_behavior == 'chi2': lnprob *= -0.5 else: raise ValueError("float_behaviour must be either 'posterior' " "or 'chi2' " + float_behavior) return lnprob def emcee(self, params=None, steps=1000, nwalkers=100, burn=0, thin=1, ntemps=1, pos=None, reuse_sampler=False, workers=1, float_behavior='posterior', is_weighted=True, seed=None, progress=True): """Bayesian sampling of the posterior distribution using the `emcee` Markov Chain Monte Carlo package. The method assumes that the prior is Uniform. You need to have `emcee` version 3 installed to use this method. Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Parameters to use as starting point. If this is not specified then the Parameters used to initialize the Minimizer object are used. steps : int, optional How many samples you would like to draw from the posterior distribution for each of the walkers? nwalkers : int, optional Should be set so :math:`nwalkers >> nvarys`, where `nvarys` are the number of parameters being varied during the fit. 'Walkers are the members of the ensemble. They are almost like separate Metropolis-Hastings chains but, of course, the proposal distribution for a given walker depends on the positions of all the other walkers in the ensemble.' - from the `emcee` webpage. burn : int, optional Discard this many samples from the start of the sampling regime. thin : int, optional Only accept 1 in every `thin` samples. ntemps : int, deprecated ntemps has no effect. pos : numpy.ndarray, optional Specify the initial positions for the sampler, an ndarray of shape `(nwalkers, nvarys)`. You can also initialise using a previous chain of the same `nwalkers` and `nvarys`. Note that `nvarys` may be one larger than you expect it to be if your `userfcn` returns an array and `is_weighted` is `False`. reuse_sampler : bool, optional Set to `True` if you have already run `emcee` with the `Minimizer` instance and want to continue to draw from its ``sampler`` (and so retain the chain history). If `False`, a new sampler is created. The keywords `nwalkers`, `pos`, and `params` will be ignored when this is set, as they will be set by the existing sampler. **Important**: the Parameters used to create the sampler must not change in-between calls to `emcee`. Alteration of Parameters would include changed ``min``, ``max``, ``vary`` and ``expr`` attributes. This may happen, for example, if you use an altered Parameters object and call the `minimize` method in-between calls to `emcee`. workers : Pool-like or int, optional For parallelization of sampling. It can be any Pool-like object with a map method that follows the same calling sequence as the built-in `map` function. If int is given as the argument, then a multiprocessing-based pool is spawned internally with the corresponding number of parallel processes. 'mpi4py'-based parallelization and 'joblib'-based parallelization pools can also be used here. **Note**: because of multiprocessing overhead it may only be worth parallelising if the objective function is expensive to calculate, or if there are a large number of objective evaluations per step (`nwalkers * nvarys`). float_behavior : str, optional Meaning of float (scalar) output of objective function. Use 'posterior' if it returns a log-posterior probability or 'chi2' if it returns :math:`\\chi^2`. See Notes for further details. is_weighted : bool, optional Has your objective function been weighted by measurement uncertainties? If `is_weighted is True` then your objective function is assumed to return residuals that have been divided by the true measurement uncertainty `(data - model) / sigma`. If `is_weighted is False` then the objective function is assumed to return unweighted residuals, `data - model`. In this case `emcee` will employ a positive measurement uncertainty during the sampling. This measurement uncertainty will be present in the output params and output chain with the name `__lnsigma`. A side effect of this is that you cannot use this parameter name yourself. **Important** this parameter only has any effect if your objective function returns an array. If your objective function returns a float, then this parameter is ignored. See Notes for more details. seed : int or `numpy.random.RandomState`, optional If `seed` is an int, a new `numpy.random.RandomState` instance is used, seeded with `seed`. If `seed` is already a `numpy.random.RandomState` instance, then that `numpy.random.RandomState` instance is used. Specify `seed` for repeatable minimizations. progress : bool, optional Print a progress bar to the console while running. Returns ------- :class:`MinimizerResult` MinimizerResult object containing updated params, statistics, etc. The updated params represent the median of the samples, while the uncertainties are half the difference of the 15.87 and 84.13 percentiles. The `MinimizerResult` contains a few additional attributes: ``chain`` contain the samples and has shape `((steps - burn) // thin, nwalkers, nvarys)`. ``flatchain`` is a `pandas.DataFrame` of the flattened chain, that can be accessed with `result.flatchain[parname]`. ``lnprob`` contains the log probability for each sample in ``chain``. The sample with the highest probability corresponds to the maximum likelihood estimate. ``acor`` is an array containing the autocorrelation time for each parameter if the autocorrelation time can be computed from the chain. Finally, ``acceptance_fraction`` (an array of the fraction of steps accepted for each walker). Notes ----- This method samples the posterior distribution of the parameters using Markov Chain Monte Carlo. It calculates the log-posterior probability of the model parameters, `F`, given the data, `D`, :math:`\\ln p(F_{true} | D)`. This 'posterior probability' is given by: .. math:: \\ln p(F_{true} | D) \\propto \\ln p(D | F_{true}) + \\ln p(F_{true}) where :math:`\\ln p(D | F_{true})` is the 'log-likelihood' and :math:`\\ln p(F_{true})` is the 'log-prior'. The default log-prior encodes prior information known about the model that the log-prior probability is `-numpy.inf` (impossible) if any of the parameters is outside its limits, and is zero if all the parameters are inside their bounds (uniform prior). The log-likelihood function is [1]_: .. math:: \\ln p(D|F_{true}) = -\\frac{1}{2}\\sum_n \\left[\\frac{(g_n(F_{true}) - D_n)^2}{s_n^2}+\\ln (2\\pi s_n^2)\\right] The first term represents the residual (:math:`g` being the generative model, :math:`D_n` the data and :math:`s_n` the measurement uncertainty). This gives :math:`\\chi^2` when summed over all data points. The objective function may also return the log-posterior probability, :math:`\\ln p(F_{true} | D)`. Since the default log-prior term is zero, the objective function can also just return the log-likelihood, unless you wish to create a non-uniform prior. If the objective function returns a float value, this is assumed by default to be the log-posterior probability, (`float_behavior` default is 'posterior'. If your objective function returns :math:`\\chi^2`, then you should use a value of `float_behavior='chi2'`. By default objective functions may return an ndarray of (possibly weighted) residuals. In this case, use `is_weighted` to select whether these are correctly weighted by measurement uncertainty. Note that this ignores the second term above, so that to calculate a correct log-posterior probability value your objective function should return a float value. With `is_weighted=False` the data uncertainty, `s_n`, will be treated as a nuisance parameter to be marginalized out. This uses strictly positive uncertainty (homoscedasticity) for each data point, :math:`s_n = \\exp(\\rm{\\_\\_lnsigma})`. `__lnsigma` will be present in `MinimizerResult.params`, as well as `Minimizer.chain` and `nvarys` will be increased by one. References ---------- .. [1] https://emcee.readthedocs.io """ if not HAS_EMCEE: raise NotImplementedError('emcee version 3 is required.') if ntemps > 1: msg = ("'ntemps' has no effect anymore, since the PTSampler was " "removed from emcee version 3.") raise DeprecationWarning(msg) tparams = params # if you're reusing the sampler then nwalkers have to be # determined from the previous sampling if reuse_sampler: if not hasattr(self, 'sampler') or not hasattr(self, '_lastpos'): raise ValueError("You wanted to use an existing sampler, but " "it hasn't been created yet") if len(self._lastpos.shape) == 2: nwalkers = self._lastpos.shape[0] elif len(self._lastpos.shape) == 3: nwalkers = self._lastpos.shape[1] tparams = None result = self.prepare_fit(params=tparams) params = result.params # check if the userfcn returns a vector of residuals out = self.userfcn(params, *self.userargs, **self.userkws) out = np.asarray(out).ravel() if out.size > 1 and is_weighted is False: # we need to marginalise over a constant data uncertainty if '__lnsigma' not in params: # __lnsigma should already be in params if is_weighted was # previously set to True. params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf, vary=True) # have to re-prepare the fit result = self.prepare_fit(params) params = result.params result.method = 'emcee' # Removing internal parameter scaling. We could possibly keep it, # but I don't know how this affects the emcee sampling. bounds = [] var_arr = np.zeros(len(result.var_names)) i = 0 for par in params: param = params[par] if param.expr is not None: param.vary = False if param.vary: var_arr[i] = param.value i += 1 else: # don't want to append bounds if they're not being varied. continue param.from_internal = lambda val: val lb, ub = param.min, param.max if lb is None or lb is np.nan: lb = -np.inf if ub is None or ub is np.nan: ub = np.inf bounds.append((lb, ub)) bounds = np.array(bounds) self.nvarys = len(result.var_names) # set up multiprocessing options for the samplers auto_pool = None sampler_kwargs = {} if isinstance(workers, int) and workers > 1 and HAS_DILL: auto_pool = multiprocessing.Pool(workers) sampler_kwargs['pool'] = auto_pool elif hasattr(workers, 'map'): sampler_kwargs['pool'] = workers # function arguments for the log-probability functions # these values are sent to the log-probability functions by the sampler. lnprob_args = (self.userfcn, params, result.var_names, bounds) lnprob_kwargs = {'is_weighted': is_weighted, 'float_behavior': float_behavior, 'userargs': self.userargs, 'userkws': self.userkws, 'nan_policy': self.nan_policy} sampler_kwargs['args'] = lnprob_args sampler_kwargs['kwargs'] = lnprob_kwargs # set up the random number generator rng = _make_random_gen(seed) # now initialise the samplers if reuse_sampler: if auto_pool is not None: self.sampler.pool = auto_pool p0 = self._lastpos if p0.shape[-1] != self.nvarys: raise ValueError("You cannot reuse the sampler if the number " "of varying parameters has changed") else: p0 = 1 + rng.randn(nwalkers, self.nvarys) * 1.e-4 p0 *= var_arr sampler_kwargs['pool'] = auto_pool self.sampler = emcee.EnsembleSampler(nwalkers, self.nvarys, self._lnprob, **sampler_kwargs) # user supplies an initialisation position for the chain # If you try to run the sampler with p0 of a wrong size then you'll get # a ValueError. Note, you can't initialise with a position if you are # reusing the sampler. if pos is not None and not reuse_sampler: tpos = np.asfarray(pos) if p0.shape == tpos.shape: pass # trying to initialise with a previous chain elif tpos.shape[-1] == self.nvarys: tpos = tpos[-1] else: raise ValueError('pos should have shape (nwalkers, nvarys)') p0 = tpos # if you specified a seed then you also need to seed the sampler if seed is not None: self.sampler.random_state = rng.get_state() # now do a production run, sampling all the time try: output = self.sampler.run_mcmc(p0, steps, progress=progress) self._lastpos = output.coords except AbortFitException: result.aborted = True result.message = "Fit aborted by user callback. Could not estimate error-bars." result.success = False result.nfev = self.result.nfev output = None # discard the burn samples and thin chain = self.sampler.get_chain(thin=thin, discard=burn)[..., :, :] lnprobability = self.sampler.get_log_prob(thin=thin, discard=burn)[..., :] flatchain = chain.reshape((-1, self.nvarys)) if not result.aborted: quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0) for i, var_name in enumerate(result.var_names): std_l, median, std_u = quantiles[:, i] params[var_name].value = median params[var_name].stderr = 0.5 * (std_u - std_l) params[var_name].correl = {} params.update_constraints() # work out correlation coefficients corrcoefs = np.corrcoef(flatchain.T) for i, var_name in enumerate(result.var_names): for j, var_name2 in enumerate(result.var_names): if i != j: result.params[var_name].correl[var_name2] = corrcoefs[i, j] result.chain = np.copy(chain) result.lnprob = np.copy(lnprobability) result.errorbars = True result.nvarys = len(result.var_names) result.nfev = nwalkers*steps try: result.acor = self.sampler.get_autocorr_time() except AutocorrError as e: print(str(e)) pass result.acceptance_fraction = self.sampler.acceptance_fraction # Calculate the residual with the "best fit" parameters out = self.userfcn(params, *self.userargs, **self.userkws) result.residual = _nan_policy(out, nan_policy=self.nan_policy, handle_inf=False) # If uncertainty was automatically estimated, weight the residual properly if (not is_weighted) and (result.residual.size > 1): if '__lnsigma' in params: result.residual = result.residual/np.exp(params['__lnsigma'].value) # Calculate statistics for the two standard cases: if isinstance(result.residual, ndarray) or (float_behavior == 'chi2'): result._calculate_statistics() # Handle special case unique to emcee: # This should eventually be moved into result._calculate_statistics. elif float_behavior == 'posterior': result.ndata = 1 result.nfree = 1 # assuming prior prob = 1, this is true _neg2_log_likel = -2*result.residual # assumes that residual is properly weighted, avoid overflowing np.exp() result.chisqr = np.exp(min(650, _neg2_log_likel)) result.redchi = result.chisqr / result.nfree result.aic = _neg2_log_likel + 2 * result.nvarys result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys if auto_pool is not None: auto_pool.terminate() return result def least_squares(self, params=None, **kws): """Least-squares minimization using :scipydoc:`optimize.least_squares`. This method wraps :scipydoc:`optimize.least_squares`, which has inbuilt support for bounds and robust loss functions. By default it uses the Trust Region Reflective algorithm with a linear loss function (i.e., the standard least-squares problem). Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Parameters to use as starting point. **kws : dict, optional Minimizer options to pass to :scipydoc:`optimize.least_squares`. Returns ------- :class:`MinimizerResult` Object containing the optimized parameter and several goodness-of-fit statistics. .. versionchanged:: 0.9.0 Return value changed to :class:`MinimizerResult`. """ result = self.prepare_fit(params) result.method = 'least_squares' replace_none = lambda x, sign: sign*np.inf if x is None else x start_vals, lower_bounds, upper_bounds = [], [], [] for vname in result.var_names: par = self.params[vname] start_vals.append(par.value) lower_bounds.append(replace_none(par.min, -1)) upper_bounds.append(replace_none(par.max, 1)) try: ret = least_squares(self.__residual, start_vals, bounds=(lower_bounds, upper_bounds), kwargs=dict(apply_bounds_transformation=False), **kws) result.residual = ret.fun except AbortFitException: pass # note: upstream least_squares is actually returning # "last evaluation", not "best result", but we do this # here for consistency, and assuming it will be fixed. if not result.aborted: result.residual = self.__residual(ret.x, False) result.nfev -= 1 result._calculate_statistics() if not result.aborted: for attr in ret: setattr(result, attr, ret[attr]) result.x = np.atleast_1d(result.x) # calculate the cov_x and estimate uncertainties/correlations try: if issparse(ret.jac): hess = (ret.jac.T * ret.jac).toarray() elif isinstance(ret.jac, LinearOperator): identity = np.eye(ret.jac.shape[1], dtype=ret.jac.dtype) # TODO: Remove try-except when scipy < 1.4.0 support dropped try: # For scipy >= 1.4.0 (with Linear Operator transpose) # https://github.com/scipy/scipy/pull/9064 hess = (ret.jac.T * ret.jac) * identity except AttributeError: # For scipy < 1.4.0 (without Linear Operator transpose) jac = ret.jac * identity hess = np.matmul(jac.T, jac) else: hess = np.matmul(ret.jac.T, ret.jac) result.covar = np.linalg.inv(hess) self._calculate_uncertainties_correlations() except LinAlgError: pass return result def leastsq(self, params=None, **kws): """Use Levenberg-Marquardt minimization to perform a fit. It assumes that the input Parameters have been initialized, and a function to minimize has been properly set up. When possible, this calculates the estimated uncertainties and variable correlations from the covariance matrix. This method calls :scipydoc:`optimize.leastsq`. By default, numerical derivatives are used, and the following arguments are set: +------------------+----------------+------------------------------------------------------------+ | :meth:`leastsq` | Default Value | Description | | arg | | | +==================+================+============================================================+ | xtol | 1.e-7 | Relative error in the approximate solution | +------------------+----------------+------------------------------------------------------------+ | ftol | 1.e-7 | Relative error in the desired sum of squares | +------------------+----------------+------------------------------------------------------------+ | maxfev | 2000*(nvar+1) | Maximum number of function calls (nvar= # of variables) | +------------------+----------------+------------------------------------------------------------+ | Dfun | None | Function to call for Jacobian calculation | +------------------+----------------+------------------------------------------------------------+ Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Parameters to use as starting point. **kws : dict, optional Minimizer options to pass to :scipydoc:`optimize.leastsq`. Returns ------- :class:`MinimizerResult` Object containing the optimized parameter and several goodness-of-fit statistics. .. versionchanged:: 0.9.0 Return value changed to :class:`MinimizerResult`. """ result = self.prepare_fit(params=params) result.method = 'leastsq' result.nfev -= 2 # correct for "pre-fit" initialization/checks variables = result.init_vals nvars = len(variables) lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False, gtol=1.e-7, maxfev=2000*(nvars+1), Dfun=None) lskws.update(self.kws) lskws.update(kws) self.col_deriv = False if lskws['Dfun'] is not None: self.jacfcn = lskws['Dfun'] self.col_deriv = lskws['col_deriv'] lskws['Dfun'] = self.__jacobian # suppress runtime warnings during fit and error analysis orig_warn_settings = np.geterr() np.seterr(all='ignore') try: lsout = scipy_leastsq(self.__residual, variables, **lskws) except AbortFitException: pass if not result.aborted: _best, _cov, infodict, errmsg, ier = lsout result.residual = self.__residual(_best) result.nfev -= 1 result._calculate_statistics() if result.aborted: return result result.ier = ier result.lmdif_message = errmsg result.success = ier in [1, 2, 3, 4] if ier in {1, 2, 3}: result.message = 'Fit succeeded.' elif ier == 0: result.message = ('Invalid Input Parameters. I.e. more variables ' 'than data points given, tolerance < 0.0, or ' 'no data provided.') elif ier == 4: result.message = 'One or more variable did not affect the fit.' elif ier == 5: result.message = self._err_maxfev % lskws['maxfev'] else: result.message = 'Tolerance seems to be too small.' # self.errorbars = error bars were successfully estimated result.errorbars = (_cov is not None) if result.errorbars: # transform the covariance matrix to "external" parameter space result.covar = self._int2ext_cov_x(_cov, _best) # calculate parameter uncertainties and correlations self._calculate_uncertainties_correlations() else: result.message = '%s Could not estimate error-bars.' % result.message np.seterr(**orig_warn_settings) return result def basinhopping(self, params=None, **kws): """Use the `basinhopping` algorithm to find the global minimum of a function. This method calls :scipydoc:`optimize.basinhopping` using the default arguments. The default minimizer is `BFGS`, but since lmfit supports parameter bounds for all minimizers, the user can choose any of the solvers present in :scipydoc:`optimize.minimize`. Parameters ---------- params : :class:`~lmfit.parameter.Parameters` object, optional Contains the Parameters for the model. If None, then the Parameters used to initialize the Minimizer object are used. Returns ------- :class:`MinimizerResult` Object containing the optimization results from the basinhopping algorithm. .. versionadded:: 0.9.10 """ result = self.prepare_fit(params=params) result.method = 'basinhopping' basinhopping_kws = dict(niter=100, T=1.0, stepsize=0.5, minimizer_kwargs={}, take_step=None, accept_test=None, callback=None, interval=50, disp=False, niter_success=None, seed=None) basinhopping_kws.update(self.kws) basinhopping_kws.update(kws) x0 = result.init_vals try: ret = scipy_basinhopping(self.penalty, x0, **basinhopping_kws) except AbortFitException: pass if not result.aborted: result.message = ret.message result.residual = self.__residual(ret.x) result.nfev -= 1 result._calculate_statistics() # calculate the cov_x and estimate uncertanties/correlations if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and len(result.residual) > len(result.var_names)): _covar_ndt = self._calculate_covariance_matrix(ret.x) if _covar_ndt is not None: result.covar = self._int2ext_cov_x(_covar_ndt, ret.x) self._calculate_uncertainties_correlations() return result def brute(self, params=None, Ns=20, keep=50, workers=1): """Use the `brute` method to find the global minimum of a function. The following parameters are passed to :scipydoc:`optimize.brute` and cannot be changed: +-------------------+-------+----------------------------------------+ | :meth:`brute` arg | Value | Description | +===================+=======+========================================+ | full_output | 1 | Return the evaluation grid and | | | | the objective function's values on it. | +-------------------+-------+----------------------------------------+ | finish | None | No "polishing" function is to be used | | | | after the grid search. | +-------------------+-------+----------------------------------------+ | disp | False | Do not print convergence messages | | | | (when finish is not None). | +-------------------+-------+----------------------------------------+ It assumes that the input Parameters have been initialized, and a function to minimize has been properly set up. Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Contains the Parameters for the model. If None, then the Parameters used to initialize the Minimizer object are used. Ns : int, optional Number of grid points along the axes, if not otherwise specified (see Notes). keep : int, optional Number of best candidates from the brute force method that are stored in the :attr:`candidates` attribute. If 'all', then all grid points from :scipydoc:`optimize.brute` are stored as candidates. workers : int or map-like callable, optional For parallel evaluation of the grid, added in SciPy v1.3 (see :scipydoc:`optimize.brute` for more details). Returns ------- :class:`MinimizerResult` Object containing the parameters from the brute force method. The return values (`x0`, `fval`, `grid`, `Jout`) from :scipydoc:`optimize.brute` are stored as `brute_` attributes. The `MinimizerResult` also contains the `candidates` attribute and `show_candidates()` method. The `candidates` attribute contains the parameters and chisqr from the brute force method as a namedtuple, ('Candidate', ['params', 'score']), sorted on the (lowest) chisqr value. To access the values for a particular candidate one can use `result.candidate[#].params` or `result.candidate[#].score`, where a lower # represents a better candidate. The `show_candidates(#)` uses the :meth:`pretty_print` method to show a specific candidate-# or all candidates when no number is specified. .. versionadded:: 0.9.6 Notes ----- The :meth:`brute` method evalutes the function at each point of a multidimensional grid of points. The grid points are generated from the parameter ranges using `Ns` and (optional) `brute_step`. The implementation in :scipydoc:`optimize.brute` requires finite bounds and the `range` is specified as a two-tuple `(min, max)` or slice-object `(min, max, brute_step)`. A slice-object is used directly, whereas a two-tuple is converted to a slice object that interpolates `Ns` points from `min` to `max`, inclusive. In addition, the :meth:`brute` method in lmfit, handles three other scenarios given below with their respective slice-object: - lower bound (:attr:`min`) and :attr:`brute_step` are specified: range = (`min`, `min` + `Ns` * `brute_step`, `brute_step`). - upper bound (:attr:`max`) and :attr:`brute_step` are specified: range = (`max` - `Ns` * `brute_step`, `max`, `brute_step`). - numerical value (:attr:`value`) and :attr:`brute_step` are specified: range = (`value` - (`Ns`//2) * `brute_step`, `value` + (`Ns`//2) * `brute_step`, `brute_step`). """ result = self.prepare_fit(params=params) result.method = 'brute' brute_kws = dict(full_output=1, finish=None, disp=False) # keyword 'workers' is introduced in SciPy v1.3 # FIXME: remove this check after updating the requirement >= 1.3 major, minor, micro = scipy_version.split('.', 2) if int(major) == 1 and int(minor) >= 3: brute_kws.update({'workers': workers}) varying = np.asarray([par.vary for par in self.params.values()]) replace_none = lambda x, sign: sign*np.inf if x is None else x lower_bounds = np.asarray([replace_none(i.min, -1) for i in self.params.values()])[varying] upper_bounds = np.asarray([replace_none(i.max, 1) for i in self.params.values()])[varying] value = np.asarray([i.value for i in self.params.values()])[varying] stepsize = np.asarray([i.brute_step for i in self.params.values()])[varying] ranges = [] for i, step in enumerate(stepsize): if np.all(np.isfinite([lower_bounds[i], upper_bounds[i]])): # lower AND upper bounds are specified (brute_step optional) par_range = ((lower_bounds[i], upper_bounds[i], step) if step else (lower_bounds[i], upper_bounds[i])) elif np.isfinite(lower_bounds[i]) and step: # lower bound AND brute_step are specified par_range = (lower_bounds[i], lower_bounds[i] + Ns*step, step) elif np.isfinite(upper_bounds[i]) and step: # upper bound AND brute_step are specified par_range = (upper_bounds[i] - Ns*step, upper_bounds[i], step) elif np.isfinite(value[i]) and step: # no bounds, but an initial value is specified par_range = (value[i] - (Ns//2)*step, value[i] + (Ns//2)*step, step) else: raise ValueError('Not enough information provided for the brute ' 'force method. Please specify bounds or at ' 'least an initial value and brute_step for ' 'parameter "{}".'.format(result.var_names[i])) ranges.append(par_range) try: ret = scipy_brute(self.penalty, tuple(ranges), Ns=Ns, **brute_kws) except AbortFitException: pass if not result.aborted: result.brute_x0 = ret[0] result.brute_fval = ret[1] result.brute_grid = ret[2] result.brute_Jout = ret[3] # sort the results of brute and populate .candidates attribute grid_score = ret[3].ravel() # chisqr grid_points = [par.ravel() for par in ret[2]] if len(result.var_names) == 1: grid_result = np.array([res for res in zip(zip(grid_points), grid_score)], dtype=[('par', 'O'), ('score', 'float64')]) else: grid_result = np.array([res for res in zip(zip(*grid_points), grid_score)], dtype=[('par', 'O'), ('score', 'float64')]) grid_result_sorted = grid_result[grid_result.argsort(order='score')] result.candidates = [] if keep == 'all': keep_candidates = len(grid_result_sorted) else: keep_candidates = min(len(grid_result_sorted), keep) for data in grid_result_sorted[:keep_candidates]: pars = deepcopy(self.params) for i, par in enumerate(result.var_names): pars[par].value = data[0][i] result.candidates.append(Candidate(params=pars, score=data[1])) result.params = result.candidates[0].params result.residual = self.__residual(result.brute_x0, apply_bounds_transformation=False) result.nfev = len(result.brute_Jout.ravel()) result._calculate_statistics() return result def ampgo(self, params=None, **kws): """Find the global minimum of a multivariate function using AMPGO. AMPGO stands for 'Adaptive Memory Programming for Global Optimization' and is an efficient algorithm to find the global minimum. Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Contains the Parameters for the model. If None, then the Parameters used to initialize the Minimizer object are used. **kws : dict, optional Minimizer options to pass to the ampgo algorithm, the options are listed below:: local: str (default is 'L-BFGS-B') Name of the local minimization method. Valid options are: - 'L-BFGS-B' - 'Nelder-Mead' - 'Powell' - 'TNC' - 'SLSQP' local_opts: dict (default is None) Options to pass to the local minimizer. maxfunevals: int (default is None) Maximum number of function evaluations. If None, the optimization will stop after `totaliter` number of iterations. totaliter: int (default is 20) Maximum number of global iterations. maxiter: int (default is 5) Maximum number of `Tabu Tunneling` iterations during each global iteration. glbtol: float (default is 1e-5) Tolerance whether or not to accept a solution after a tunneling phase. eps1: float (default is 0.02) Constant used to define an aspiration value for the objective function during the Tunneling phase. eps2: float (default is 0.1) Perturbation factor used to move away from the latest local minimum at the start of a Tunneling phase. tabulistsize: int (default is 5) Size of the (circular) tabu search list. tabustrategy: str (default is 'farthest') Strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be 'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from the last local minimum found. disp: bool (default is False) Set to True to print convergence messages. Returns ------- :class:`MinimizerResult` Object containing the parameters from the ampgo method, with fit parameters, statistics and such. The return values (`x0`, `fval`, `eval`, `msg`, `tunnel`) are stored as `ampgo_` attributes. .. versionadded:: 0.9.10 Notes ---- The Python implementation was written by Andrea Gavana in 2014 (http://infinity77.net/global_optimization/index.html). The details of the AMPGO algorithm are described in the paper "Adaptive Memory Programming for Constrained Global Optimization" located here: http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf """ result = self.prepare_fit(params=params) ampgo_kws = dict(local='L-BFGS-B', local_opts=None, maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5, tabustrategy='farthest', disp=False) ampgo_kws.update(self.kws) ampgo_kws.update(kws) values = result.init_vals result.method = "ampgo, with {} as local solver".format(ampgo_kws['local']) try: ret = ampgo(self.penalty, values, **ampgo_kws) except AbortFitException: pass if not result.aborted: result.ampgo_x0 = ret[0] result.ampgo_fval = ret[1] result.ampgo_eval = ret[2] result.ampgo_msg = ret[3] result.ampgo_tunnel = ret[4] for i, par in enumerate(result.var_names): result.params[par].value = result.ampgo_x0[i] result.residual = self.__residual(result.ampgo_x0) result.nfev -= 1 result._calculate_statistics() # calculate the cov_x and estimate uncertanties/correlations if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and len(result.residual) > len(result.var_names)): _covar_ndt = self._calculate_covariance_matrix(result.ampgo_x0) if _covar_ndt is not None: result.covar = self._int2ext_cov_x(_covar_ndt, result.ampgo_x0) self._calculate_uncertainties_correlations() return result def shgo(self, params=None, **kws): """Use the `SHGO` algorithm to find the global minimum. SHGO stands for "simplicial homology global optimization" and calls :scipydoc:`optimize.shgo` using its default arguments. Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Contains the Parameters for the model. If None, then the Parameters used to initialize the Minimizer object are used. **kws : dict, optional Minimizer options to pass to the SHGO algorithm. Returns ------- :class:`MinimizerResult` Object containing the parameters from the SHGO method. The return values specific to :scipydoc:`optimize.shgo` (`x`, `xl`, `fun`, `funl`, `nfev`, `nit`, `nlfev`, `nlhev`, and `nljev`) are stored as `shgo_` attributes. .. versionadded:: 0.9.14 """ result = self.prepare_fit(params=params) result.method = 'shgo' shgo_kws = dict(constraints=None, n=100, iters=1, callback=None, minimizer_kwargs=None, options=None, sampling_method='simplicial') shgo_kws.update(self.kws) shgo_kws.update(kws) varying = np.asarray([par.vary for par in self.params.values()]) bounds = np.asarray([(par.min, par.max) for par in self.params.values()])[varying] try: ret = scipy_shgo(self.penalty, bounds, **shgo_kws) except AbortFitException: pass if not result.aborted: for attr, value in ret.items(): if attr in ['success', 'message']: setattr(result, attr, value) else: setattr(result, 'shgo_{}'.format(attr), value) result.residual = self.__residual(result.shgo_x, False) result.nfev -= 1 result._calculate_statistics() # calculate the cov_x and estimate uncertanties/correlations if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and len(result.residual) > len(result.var_names)): result.covar = self._calculate_covariance_matrix(result.shgo_x) if result.covar is not None: self._calculate_uncertainties_correlations() return result def dual_annealing(self, params=None, **kws): """Use the `dual_annealing` algorithm to find the global minimum. This method calls :scipydoc:`optimize.dual_annealing` using its default arguments. Parameters ---------- params : :class:`~lmfit.parameter.Parameters`, optional Contains the Parameters for the model. If None, then the Parameters used to initialize the Minimizer object are used. **kws : dict, optional Minimizer options to pass to the dual_annealing algorithm. Returns ------- :class:`MinimizerResult` Object containing the parameters from the dual_annealing method. The return values specific to :scipydoc:`optimize.dual_annealing` (`x`, `fun`, `nfev`, `nhev`, `njev`, and `nit`) are stored as `da_` attributes. .. versionadded:: 0.9.14 """ result = self.prepare_fit(params=params) result.method = 'dual_annealing' da_kws = dict(maxiter=1000, local_search_options={}, initial_temp=5230.0, restart_temp_ratio=2e-05, visit=2.62, accept=-5.0, maxfun=10000000.0, seed=None, no_local_search=False, callback=None, x0=None) da_kws.update(self.kws) da_kws.update(kws) varying = np.asarray([par.vary for par in self.params.values()]) bounds = np.asarray([(par.min, par.max) for par in self.params.values()])[varying] if not np.all(np.isfinite(bounds)): raise ValueError('dual_annealing requires finite bounds for all' ' varying parameters') try: ret = scipy_dual_annealing(self.penalty, bounds, **da_kws) except AbortFitException: pass if not result.aborted: for attr, value in ret.items(): if attr in ['success', 'message']: setattr(result, attr, value) else: setattr(result, 'da_{}'.format(attr), value) result.residual = self.__residual(result.da_x, False) result.nfev -= 1 result._calculate_statistics() # calculate the cov_x and estimate uncertanties/correlations if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and len(result.residual) > len(result.var_names)): result.covar = self._calculate_covariance_matrix(result.da_x) if result.covar is not None: self._calculate_uncertainties_correlations() return result def minimize(self, method='leastsq', params=None, **kws): """Perform the minimization. Parameters ---------- method : str, optional Name of the fitting method to use. Valid values are: - `'leastsq'`: Levenberg-Marquardt (default) - `'least_squares'`: Least-Squares minimization, using Trust Region Reflective method - `'differential_evolution'`: differential evolution - `'brute'`: brute force method - `'basinhopping'`: basinhopping - `'ampgo'`: Adaptive Memory Programming for Global Optimization - '`nelder`': Nelder-Mead - `'lbfgsb'`: L-BFGS-B - `'powell'`: Powell - `'cg'`: Conjugate-Gradient - `'newton'`: Newton-CG - `'cobyla'`: Cobyla - `'bfgs'`: BFGS - `'tnc'`: Truncated Newton - `'trust-ncg'`: Newton-CG trust-region - `'trust-exact'`: nearly exact trust-region - `'trust-krylov'`: Newton GLTR trust-region - `'trust-constr'`: trust-region for constrained optimization - `'dogleg'`: Dog-leg trust-region - `'slsqp'`: Sequential Linear Squares Programming - `'emcee'`: Maximum likelihood via Monte-Carlo Markov Chain - `'shgo'`: Simplicial Homology Global Optimization - `'dual_annealing'`: Dual Annealing optimization In most cases, these methods wrap and use the method with the same name from `scipy.optimize`, or use `scipy.optimize.minimize` with the same `method` argument. Thus '`leastsq`' will use `scipy.optimize.leastsq`, while '`powell`' will use `scipy.optimize.minimizer(..., method='powell')` For more details on the fitting methods please refer to the `SciPy docs `__. params : :class:`~lmfit.parameter.Parameters`, optional Parameters of the model to use as starting values. **kws : optional Additional arguments are passed to the underlying minimization method. Returns ------- :class:`MinimizerResult` Object containing the optimized parameter and several goodness-of-fit statistics. .. versionchanged:: 0.9.0 Return value changed to :class:`MinimizerResult`. """ function = self.leastsq kwargs = {'params': params} kwargs.update(self.kws) kwargs.update(kws) user_method = method.lower() if user_method.startswith('leasts'): function = self.leastsq elif user_method.startswith('least_s'): function = self.least_squares elif user_method == 'brute': function = self.brute elif user_method == 'basinhopping': function = self.basinhopping elif user_method == 'ampgo': function = self.ampgo elif user_method == 'emcee': function = self.emcee elif user_method == 'shgo': function = self.shgo elif user_method == 'dual_annealing': function = self.dual_annealing else: function = self.scalar_minimize for key, val in SCALAR_METHODS.items(): if (key.lower().startswith(user_method) or val.lower().startswith(user_method)): kwargs['method'] = val return function(**kwargs) def _make_random_gen(seed): """Turn seed into a numpy.random.RandomState instance. If seed is None, return the RandomState singleton used by numpy.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (numbers.Integral, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('%r cannot be used to seed a numpy.random.RandomState' ' instance' % seed) def _nan_policy(arr, nan_policy='raise', handle_inf=True): """Specify behaviour when an array contains numpy.nan or numpy.inf. Parameters ---------- arr : array_like Input array to consider. nan_policy : str, optional One of: 'raise' - raise a `ValueError` if `arr` contains NaN (default) 'propagate' - propagate NaN 'omit' - filter NaN from input array handle_inf : bool, optional As well as NaN consider +/- inf. Returns ------- filtered_array : array_like Note ---- This function is copied, then modified, from scipy/stats/stats.py/_contains_nan """ if nan_policy not in ('propagate', 'omit', 'raise'): raise ValueError("nan_policy must be 'propagate', 'omit', or 'raise'.") if handle_inf: handler_func = lambda x: ~np.isfinite(x) else: handler_func = isnull if nan_policy == 'omit': # mask locates any values to remove mask = ~handler_func(arr) if not np.all(mask): # there are some NaNs/infs/missing values return arr[mask] if nan_policy == 'raise': try: # Calling np.sum to avoid creating a huge array into memory # e.g. np.isnan(a).any() with np.errstate(invalid='ignore'): contains_nan = handler_func(np.sum(arr)) except TypeError: # If the check cannot be properly performed we fallback to omiting # nan values and raising a warning. This can happen when attempting to # sum things that are not numbers (e.g. as in the function `mode`). contains_nan = False warnings.warn("The input array could not be checked for NaNs. " "NaNs will be ignored.", RuntimeWarning) if contains_nan: msg = ('NaN values detected in your input data or the output of ' 'your objective/model function - fitting algorithms cannot ' 'handle this! Please read https://lmfit.github.io/lmfit-py/faq.html#i-get-errors-from-nan-in-my-fit-what-can-i-do ' 'for more information.') raise ValueError(msg) return arr def minimize(fcn, params, method='leastsq', args=None, kws=None, iter_cb=None, scale_covar=True, nan_policy='raise', reduce_fcn=None, calc_covar=True, **fit_kws): """Perform a fit of a set of parameters by minimizing an objective (or cost) function using one of the several available methods. The minimize function takes an objective function to be minimized, a dictionary (:class:`~lmfit.parameter.Parameters`) containing the model parameters, and several optional arguments. Parameters ---------- fcn : callable Objective function to be minimized. When method is `leastsq` or `least_squares`, the objective function should return an array of residuals (difference between model and data) to be minimized in a least-squares sense. With the scalar methods the objective function can either return the residuals array or a single scalar value. The function must have the signature: `fcn(params, *args, **kws)` params : :class:`~lmfit.parameter.Parameters` Contains the Parameters for the model. method : str, optional Name of the fitting method to use. Valid values are: - `'leastsq'`: Levenberg-Marquardt (default) - `'least_squares'`: Least-Squares minimization, using Trust Region Reflective method - `'differential_evolution'`: differential evolution - `'brute'`: brute force method - `'basinhopping'`: basinhopping - `'ampgo'`: Adaptive Memory Programming for Global Optimization - '`nelder`': Nelder-Mead - `'lbfgsb'`: L-BFGS-B - `'powell'`: Powell - `'cg'`: Conjugate-Gradient - `'newton'`: Newton-CG - `'cobyla'`: Cobyla - `'bfgs'`: BFGS - `'tnc'`: Truncated Newton - `'trust-ncg'`: Newton-CG trust-region - `'trust-exact'`: nearly exact trust-region - `'trust-krylov'`: Newton GLTR trust-region - `'trust-constr'`: trust-region for constrained optimization - `'dogleg'`: Dog-leg trust-region - `'slsqp'`: Sequential Linear Squares Programming - `'emcee'`: Maximum likelihood via Monte-Carlo Markov Chain - `'shgo'`: Simplicial Homology Global Optimization - `'dual_annealing'`: Dual Annealing optimization In most cases, these methods wrap and use the method of the same name from `scipy.optimize`, or use `scipy.optimize.minimize` with the same `method` argument. Thus '`leastsq`' will use `scipy.optimize.leastsq`, while '`powell`' will use `scipy.optimize.minimizer(..., method='powell')` For more details on the fitting methods please refer to the `SciPy docs `__. args : tuple, optional Positional arguments to pass to `fcn`. kws : dict, optional Keyword arguments to pass to `fcn`. iter_cb : callable, optional Function to be called at each fit iteration. This function should have the signature `iter_cb(params, iter, resid, *args, **kws)`, where `params` will have the current parameter values, `iter` the iteration number, `resid` the current residual array, and `*args` and `**kws` as passed to the objective function. scale_covar : bool, optional Whether to automatically scale the covariance matrix (default is True). nan_policy : str, optional Specifies action if `userfcn` (or a Jacobian) returns NaN values. One of: - 'raise' : a `ValueError` is raised - 'propagate' : the values returned from `userfcn` are un-altered - 'omit' : non-finite values are filtered reduce_fcn : str or callable, optional Function to convert a residual array to a scalar value for the scalar minimizers. See notes in `Minimizer`. calc_covar : bool, optional Whether to calculate the covariance matrix (default is True) for solvers other than `leastsq` and `least_squares`. Requires the `numdifftools` package to be installed. **fit_kws : dict, optional Options to pass to the minimizer being used. Returns ------- :class:`MinimizerResult` Object containing the optimized parameter and several goodness-of-fit statistics. .. versionchanged:: 0.9.0 Return value changed to :class:`MinimizerResult`. Notes ----- The objective function should return the value to be minimized. For the Levenberg-Marquardt algorithm from leastsq(), this returned value must be an array, with a length greater than or equal to the number of fitting variables in the model. For the other methods, the return value can either be a scalar or an array. If an array is returned, the sum of squares of the array will be sent to the underlying fitting method, effectively doing a least-squares optimization of the return values. A common use for `args` and `kws` would be to pass in other data needed to calculate the residual, including such things as the data array, dependent variable, uncertainties in the data, and other data structures for the model calculation. On output, `params` will be unchanged. The best-fit values and, where appropriate, estimated uncertainties and correlations, will all be contained in the returned :class:`MinimizerResult`. See :ref:`fit-results-label` for further details. This function is simply a wrapper around :class:`Minimizer` and is equivalent to:: fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws, iter_cb=iter_cb, scale_covar=scale_covar, nan_policy=nan_policy, reduce_fcn=reduce_fcn, calc_covar=calc_covar, **fit_kws) fitter.minimize(method=method) """ fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws, iter_cb=iter_cb, scale_covar=scale_covar, nan_policy=nan_policy, reduce_fcn=reduce_fcn, calc_covar=calc_covar, **fit_kws) return fitter.minimize(method=method) lmfit-py-1.0.0/lmfit/model.py000066400000000000000000002223651357751001700160560ustar00rootroot00000000000000"""Implementation of the Model interface.""" from collections import OrderedDict from copy import deepcopy from functools import wraps import inspect import json import operator import sys import warnings import numpy as np from scipy.special import erf from scipy.stats import t from . import Minimizer, Parameter, Parameters, lineshapes from .confidence import conf_interval from .jsonutils import HAS_DILL, decode4js, encode4js from .minimizer import MinimizerResult from .printfuncs import ci_report, fit_report, fitreport_html_table # Use pandas.isnull for aligning missing data if pandas is available. # otherwise use numpy.isnan try: from pandas import isnull, Series except ImportError: isnull = np.isnan Series = type(NotImplemented) def _align(var, mask, data): """Align missing data, if pandas is available.""" if isinstance(data, Series) and isinstance(var, Series): return var.reindex_like(data).dropna() elif mask is not None: return var[mask] return var try: import matplotlib # noqa: F401 _HAS_MATPLOTLIB = True except Exception: _HAS_MATPLOTLIB = False def _ensureMatplotlib(function): if _HAS_MATPLOTLIB: @wraps(function) def wrapper(*args, **kws): return function(*args, **kws) return wrapper def no_op(*args, **kwargs): print('matplotlib module is required for plotting the results') return no_op def get_reducer(option): """Factory function to build a parser for complex numbers. Parameters ---------- option : str Should be one of `['real', 'imag', 'abs', 'angle']`. Implements the numpy function of the same name. Returns ------- callable See docstring for `reducer` below. """ if option not in ['real', 'imag', 'abs', 'angle']: raise ValueError("Invalid parameter name ('%s') for function 'propagate_err'." % option) def reducer(array): """Convert a complex array to a real array. Several conversion methods are available and it does nothing to a purely real array. Parameters ---------- array : array-like Input array. If complex, will be converted to real array via one of the following numpy functions: `real`, `imag`, `abs`, or `angle`. Returns ------- numpy.array Returned array will be purely real. """ if any(np.iscomplex(array)): parsed_array = getattr(np, option)(array) else: parsed_array = array return parsed_array return reducer def propagate_err(z, dz, option): """Perform error propagation on a vector of complex uncertainties to get values for magnitude (abs) and phase (angle) uncertainty. Parameters ---------- z : array-like Array of complex or real numbers. dz : array-like Array of uncertainties corresponding to z. Must satisfy `numpy.shape(dz) == np.shape(z)`. option : str Should be one of `['real', 'imag', 'abs', 'angle']`. Returns ------- numpy.array Returned array will be purely real. Notes ----- Uncertainties are 1/weights. If the weights provided are real, they are assumed to apply equally to the real and imaginary parts. If the weights are complex, the real part of the weights are applied to the real part of the residual and the imaginary part is treated correspondingly. In the case where `option == 'angle'` and `numpy.abs(z) == 0` for any value of `z` the phase angle uncertainty becomes the entire circle and so a value of pi is returned. In the case where `option == 'abs'` and `numpy.abs(z) == 0` for any value of `z` the mangnitude uncertainty is approximated by `numpy.abs(dz)` for that value. """ if option not in ['real', 'imag', 'abs', 'angle']: raise ValueError("Invalid parameter name ('%s') for function 'propagate_err'." % option) # Check the main vector for complex. Do nothing if real. if any(np.iscomplex(z)): # if uncertainties are real, apply them equally to # real and imaginary parts if all(np.isreal(dz)): dz = dz+1j*dz if option == 'real': err = np.real(dz) elif option == 'imag': err = np.imag(dz) elif option in ['abs', 'angle']: rz = np.real(z) iz = np.imag(z) rdz = np.real(dz) idz = np.imag(dz) # Don't spit out warnings for divide by zero. Will fix these later. with np.errstate(divide='ignore', invalid='ignore'): if option == 'abs': # Standard error propagation for abs = sqrt(re**2 + im**2) err = np.true_divide(np.sqrt((iz*idz)**2+(rz*rdz)**2), np.abs(z)) # For abs = 0, error is +/- abs(rdz + j idz) err[err == np.inf] = np.abs(dz)[err == np.inf] if option == 'angle': # Standard error propagation for angle = arctan(im/re) err = np.true_divide(np.sqrt((rz*idz)**2+(iz*rdz)**2), np.abs(z)**2) # For abs = 0, error is +/- pi (i.e. the whole circle) err[err == np.inf] = np.pi else: err = dz return err class Model: """Model class.""" _forbidden_args = ('data', 'weights', 'params') _invalid_ivar = "Invalid independent variable name ('%s') for function %s" _invalid_par = "Invalid parameter name ('%s') for function %s" _invalid_hint = "unknown parameter hint '%s' for param '%s'" _hint_names = ('value', 'vary', 'min', 'max', 'expr') def __init__(self, func, independent_vars=None, param_names=None, nan_policy='raise', prefix='', name=None, **kws): """Create a model from a user-supplied model function. The model function will normally take an independent variable (generally, the first argument) and a series of arguments that are meant to be parameters for the model. It will return an array of data to model some data as for a curve-fitting problem. Parameters ---------- func : callable Function to be wrapped. independent_vars : list of str, optional Arguments to func that are independent variables (default is None). param_names : list of str, optional Names of arguments to func that are to be made into parameters (default is None). nan_policy : str, optional How to handle NaN and missing values in data. Must be one of 'raise' (default), 'propagate', or 'omit'. See Note below. prefix : str, optional Prefix used for the model. name : str, optional Name for the model. When None (default) the name is the same as the model function (`func`). **kws : dict, optional Additional keyword arguments to pass to model function. Notes ----- 1. Parameter names are inferred from the function arguments, and a residual function is automatically constructed. 2. The model function must return an array that will be the same size as the data being modeled. 3. nan_policy sets what to do when a NaN or missing value is seen in the data. Should be one of: - 'raise' : Raise a ValueError (default) - 'propagate' : do nothing - 'omit' : drop missing data Examples -------- The model function will normally take an independent variable (generally, the first argument) and a series of arguments that are meant to be parameters for the model. Thus, a simple peak using a Gaussian defined as: >>> import numpy as np >>> def gaussian(x, amp, cen, wid): ... return amp * np.exp(-(x-cen)**2 / wid) can be turned into a Model with: >>> gmodel = Model(gaussian) this will automatically discover the names of the independent variables and parameters: >>> print(gmodel.param_names, gmodel.independent_vars) ['amp', 'cen', 'wid'], ['x'] """ self.func = func self._prefix = prefix self._param_root_names = param_names # will not include prefixes self.independent_vars = independent_vars self._func_allargs = [] self._func_haskeywords = False self.nan_policy = nan_policy self.opts = kws self.param_hints = OrderedDict() # the following has been changed from OrderedSet for the time being self._param_names = [] self._parse_params() if self.independent_vars is None: self.independent_vars = [] if name is None and hasattr(self.func, '__name__'): name = self.func.__name__ self._name = name def _reprstring(self, long=False): out = self._name opts = [] if len(self._prefix) > 0: opts.append("prefix='%s'" % (self._prefix)) if long: for k, v in self.opts.items(): opts.append("%s='%s'" % (k, v)) if len(opts) > 0: out = "%s, %s" % (out, ', '.join(opts)) return "Model(%s)" % out def _get_state(self): """Save a Model for serialization. Note: like the standard-ish '__getstate__' method but not really useful with Pickle. """ funcdef = None if HAS_DILL: funcdef = self.func state = (self.func.__name__, funcdef, self._name, self._prefix, self.independent_vars, self._param_root_names, self.param_hints, self.nan_policy, self.opts) return (state, None, None) def _set_state(self, state, funcdefs=None): """Restore Model from serialization. Note: like the standard-ish '__setstate__' method but not really useful with Pickle. Parameters ---------- state : Serialized state from `_get_state`. funcdefs : dict, optional Dictionary of function definitions to use to construct Model. """ return _buildmodel(state, funcdefs=funcdefs) def dumps(self, **kws): """Dump serialization of Model as a JSON string. Parameters ---------- **kws : optional Keyword arguments that are passed to `json.dumps()`. Returns ------- str JSON string representation of Model. See Also -------- loads(), json.dumps() """ return json.dumps(encode4js(self._get_state()), **kws) def dump(self, fp, **kws): """Dump serialization of Model to a file. Parameters ---------- fp : file-like object an open and ``.write()``-supporting file-like object. **kws : optional Keyword arguments that are passed to `json.dumps()`. Returns ------- None or int Return value from `fp.write()`. None for Python 2.7 and the number of characters written in Python 3. See Also -------- dumps(), load(), json.dump() """ return fp.write(self.dumps(**kws)) def loads(self, s, funcdefs=None, **kws): """Load Model from a JSON string. Parameters ---------- s : str Input JSON string containing serialized Model funcdefs : dict, optional Dictionary of function definitions to use to construct Model. **kws : optional Keyword arguments that are passed to `json.loads()`. Returns ------- :class:`Model` Model created from JSON string. See Also -------- dump(), dumps(), load(), json.loads() """ tmp = decode4js(json.loads(s, **kws)) return self._set_state(tmp, funcdefs=funcdefs) def load(self, fp, funcdefs=None, **kws): """Load JSON representation of Model from a file-like object. Parameters ---------- fp : file-like object An open and ``.read()``-supporting file-like object. funcdefs : dict, optional Dictionary of function definitions to use to construct Model. **kws : optional Keyword arguments that are passed to `loads()`. Returns ------- :class:`Model` Model created from `fp`. See Also -------- dump(), loads(), json.load() """ return self.loads(fp.read(), funcdefs=funcdefs, **kws) @property def name(self): """Return Model name.""" return self._reprstring(long=False) @name.setter def name(self, value): self._name = value @property def prefix(self): """Return Model prefix.""" return self._prefix @prefix.setter def prefix(self, value): """Change Model prefix.""" self._prefix = value self._set_paramhints_prefix() self._param_names = [] self._parse_params() def _set_paramhints_prefix(self): """Reset parameter hints for prefix: intended to be overwritten.""" pass @property def param_names(self): """Return the parameters of the Model.""" return self._param_names def __repr__(self): """Return representation of Model.""" return "" % (self.name) def copy(self, **kwargs): """DOES NOT WORK.""" raise NotImplementedError("Model.copy does not work. Make a new Model") def _parse_params(self): """Build parameters from function arguments.""" if self.func is None: return # need to fetch the following from the function signature: # pos_args: list of positional argument names # kw_args: dict of keyword arguments with default values # keywords_: name of **kws argument or None # 1. limited support for asteval functions as the model functions: if hasattr(self.func, 'argnames') and hasattr(self.func, 'kwargs'): pos_args = self.func.argnames[:] keywords_ = None kw_args = {} for name, defval in self.func.kwargs: kw_args[name] = defval # 2. modern, best-practice approach: use inspect.signature elif sys.version_info > (3, 4): pos_args = [] kw_args = {} keywords_ = None sig = inspect.signature(self.func) for fnam, fpar in sig.parameters.items(): if fpar.kind == fpar.VAR_KEYWORD: keywords_ = fnam elif fpar.kind == fpar.POSITIONAL_OR_KEYWORD: if fpar.default == fpar.empty: pos_args.append(fnam) else: kw_args[fnam] = fpar.default elif fpar.kind == fpar.VAR_POSITIONAL: raise ValueError("varargs '*%s' is not supported" % fnam) # 3. Py2 compatible approach else: argspec = inspect.getargspec(self.func) keywords_ = argspec.keywords pos_args = argspec.args kw_args = {} if argspec.defaults is not None: for val in reversed(argspec.defaults): kw_args[pos_args.pop()] = val # inspection done self._func_haskeywords = keywords_ is not None self._func_allargs = pos_args + list(kw_args.keys()) allargs = self._func_allargs if len(allargs) == 0 and keywords_ is not None: return # default independent_var = 1st argument if self.independent_vars is None: self.independent_vars = [pos_args[0]] # default param names: all positional args # except independent variables self.def_vals = {} might_be_param = [] if self._param_root_names is None: self._param_root_names = pos_args[:] for key, val in kw_args.items(): if (not isinstance(val, bool) and isinstance(val, (float, int))): self._param_root_names.append(key) self.def_vals[key] = val elif val is None: might_be_param.append(key) for p in self.independent_vars: if p in self._param_root_names: self._param_root_names.remove(p) new_opts = {} for opt, val in self.opts.items(): if (opt in self._param_root_names or opt in might_be_param and isinstance(val, Parameter)): self.set_param_hint(opt, value=val.value, min=val.min, max=val.max, expr=val.expr) elif opt in self._func_allargs: new_opts[opt] = val self.opts = new_opts names = [] if self._prefix is None: self._prefix = '' for pname in self._param_root_names: names.append("%s%s" % (self._prefix, pname)) # check variables names for validity # The implicit magic in fit() requires us to disallow some fname = self.func.__name__ for arg in self.independent_vars: if arg not in allargs or arg in self._forbidden_args: raise ValueError(self._invalid_ivar % (arg, fname)) for arg in names: if (self._strip_prefix(arg) not in allargs or arg in self._forbidden_args): raise ValueError(self._invalid_par % (arg, fname)) # the following as been changed from OrderedSet for the time being. self._param_names = names[:] def set_param_hint(self, name, **kwargs): """Set *hints* to use when creating parameters with `make_params()` for the named parameter. This is especially convenient for setting initial values. The `name` can include the models `prefix` or not. The hint given can also include optional bounds and constraints ``(value, vary, min, max, expr)``, which will be used by make_params() when building default parameters. Parameters ---------- name : str Parameter name. **kwargs : optional Arbitrary keyword arguments, needs to be a Parameter attribute. Can be any of the following: - value : float, optional Numerical Parameter value. - vary : bool, optional Whether the Parameter is varied during a fit (default is True). - min : float, optional Lower bound for value (default is `-numpy.inf`, no lower bound). - max : float, optional Upper bound for value (default is `numpy.inf`, no upper bound). - expr : str, optional Mathematical expression used to constrain the value during the fit. Example -------- >>> model = GaussianModel() >>> model.set_param_hint('sigma', min=0) """ npref = len(self._prefix) if npref > 0 and name.startswith(self._prefix): name = name[npref:] if name not in self.param_hints: self.param_hints[name] = OrderedDict() for key, val in kwargs.items(): if key in self._hint_names: self.param_hints[name][key] = val else: warnings.warn(self._invalid_hint % (key, name)) def print_param_hints(self, colwidth=8): """Print a nicely aligned text-table of parameter hints. Parameters ---------- colwidth : int, optional Width of each column, except for first and last columns. """ name_len = max(len(s) for s in self.param_hints) print('{:{name_len}} {:>{n}} {:>{n}} {:>{n}} {:>{n}} {:{n}}' .format('Name', 'Value', 'Min', 'Max', 'Vary', 'Expr', name_len=name_len, n=colwidth)) line = ('{name:<{name_len}} {value:{n}g} {min:{n}g} {max:{n}g} ' '{vary!s:>{n}} {expr}') for name, values in sorted(self.param_hints.items()): pvalues = dict(name=name, value=np.nan, min=-np.inf, max=np.inf, vary=True, expr='') pvalues.update(**values) print(line.format(name_len=name_len, n=colwidth, **pvalues)) def make_params(self, verbose=False, **kwargs): """Create a Parameters object for a Model. Parameters ---------- verbose : bool, optional Whether to print out messages (default is False). **kwargs : optional Parameter names and initial values. Returns --------- params : Parameters Notes ----- 1. The parameters may or may not have decent initial values for each parameter. 2. This applies any default values or parameter hints that may have been set. """ params = Parameters() # make sure that all named parameters are in params for name in self.param_names: if name in params: par = params[name] else: par = Parameter(name=name) par._delay_asteval = True basename = name[len(self._prefix):] # apply defaults from model function definition if basename in self.def_vals: par.value = self.def_vals[basename] if par.value in (None, -np.inf, np.inf, np.nan): for key, val in self.def_vals.items(): if key in name.lower(): par.value = val # apply defaults from parameter hints if basename in self.param_hints: hint = self.param_hints[basename] for item in self._hint_names: if item in hint: setattr(par, item, hint[item]) # apply values passed in through kw args if basename in kwargs: # kw parameter names with no prefix par.value = kwargs[basename] if name in kwargs: # kw parameter names with prefix par.value = kwargs[name] params.add(par) if verbose: print(' - Adding parameter "%s"' % name) # next build parameters defined in param_hints # note that composites may define their own additional # convenience parameters here for basename, hint in self.param_hints.items(): name = "%s%s" % (self._prefix, basename) if name in params: par = params[name] else: par = Parameter(name=name) params.add(par) if verbose: print(' - Adding parameter for hint "%s"' % name) par._delay_asteval = True for item in self._hint_names: if item in hint: setattr(par, item, hint[item]) if basename in kwargs: par.value = kwargs[basename] # Add the new parameter to self._param_names if name not in self._param_names: self._param_names.append(name) for p in params.values(): p._delay_asteval = False return params def guess(self, data, **kws): """Guess starting values for the parameters of a Model. This is not implemented for all models, but is available for many of the built-in models. Parameters ---------- data : array_like Array of data to use to guess parameter values. **kws : optional Additional keyword arguments, passed to model function. Returns ------- params : Parameters Notes ----- Should be implemented for each model subclass to run self.make_params(), update starting values and return a Parameters object. Raises ------ NotImplementedError """ cname = self.__class__.__name__ msg = 'guess() not implemented for %s' % cname raise NotImplementedError(msg) def _residual(self, params, data, weights, **kwargs): """Return the residual. Default residual: (data-model)*weights. If the model returns complex values, the residual is computed by treating the real and imaginary parts separately. In this case, if the weights provided are real, they are assumed to apply equally to the real and imaginary parts. If the weights are complex, the real part of the weights are applied to the real part of the residual and the imaginary part is treated correspondingly. Since the underlying scipy.optimize routines expect numpy.float arrays, the only complex type supported is np.complex. The "ravels" throughout are necessary to support pandas.Series. """ model = self.eval(params, **kwargs) if self.nan_policy == 'raise' and not np.all(np.isfinite(model)): msg = ('The model function generated NaN values and the fit ' 'aborted! Please check your model function and/or set ' 'boundaries on parameters where applicable. In cases like ' 'this, using "nan_policy=\'omit\'" will probably not work.') raise ValueError(msg) diff = model - data if diff.dtype == np.complex: # data/model are complex diff = diff.ravel().view(np.float) if weights is not None: if weights.dtype == np.complex: # weights are complex weights = weights.ravel().view(np.float) else: # real weights but complex data weights = (weights + 1j * weights).ravel().view(np.float) if weights is not None: diff *= weights return np.asarray(diff).ravel() # for compatibility with pandas.Series def _strip_prefix(self, name): npref = len(self._prefix) if npref > 0 and name.startswith(self._prefix): name = name[npref:] return name def make_funcargs(self, params=None, kwargs=None, strip=True): """Convert parameter values and keywords to function arguments.""" if params is None: params = {} if kwargs is None: kwargs = {} out = {} out.update(self.opts) for name, par in params.items(): if strip: name = self._strip_prefix(name) if name in self._func_allargs or self._func_haskeywords: out[name] = par.value # kwargs handled slightly differently -- may set param value too! for name, val in kwargs.items(): if strip: name = self._strip_prefix(name) if name in self._func_allargs or self._func_haskeywords: out[name] = val if name in params: params[name].value = val return out def _make_all_args(self, params=None, **kwargs): """Generate **all** function args for all functions.""" args = {} for key, val in self.make_funcargs(params, kwargs).items(): args["%s%s" % (self._prefix, key)] = val return args def eval(self, params=None, **kwargs): """Evaluate the model with supplied parameters and keyword arguments. Parameters ----------- params : Parameters, optional Parameters to use in Model. **kwargs : optional Additional keyword arguments to pass to model function. Returns ------- numpy.ndarray Value of model given the parameters and other arguments. Notes ----- 1. if `params` is None, the values for all parameters are expected to be provided as keyword arguments. If `params` is given, and a keyword argument for a parameter value is also given, the keyword argument will be used. 2. all non-parameter arguments for the model function, **including all the independent variables** will need to be passed in using keyword arguments. """ return self.func(**self.make_funcargs(params, kwargs)) @property def components(self): """Return components for composite model.""" return [self] def eval_components(self, params=None, **kwargs): """Evaluate the model with the supplied parameters. Parameters ----------- params : Parameters, optional Parameters to use in Model. **kwargs : optional Additional keyword arguments to pass to model function. Returns ------- OrderedDict Keys are prefixes for component model, values are value of each component. """ key = self._prefix if len(key) < 1: key = self._name return {key: self.eval(params=params, **kwargs)} def fit(self, data, params=None, weights=None, method='leastsq', iter_cb=None, scale_covar=True, verbose=False, fit_kws=None, nan_policy=None, calc_covar=True, **kwargs): """Fit the model to the data using the supplied Parameters. Parameters ---------- data : array_like Array of data to be fit. params : Parameters, optional Parameters to use in fit (default is None). weights : array_like of same size as `data`, optional Weights to use for the calculation of the fit residual (default is None). method : str, optional Name of fitting method to use (default is `'leastsq'`). iter_cb : callable, optional Callback function to call at each iteration (default is None). scale_covar : bool, optional Whether to automatically scale the covariance matrix when calculating uncertainties (default is True). verbose: bool, optional Whether to print a message when a new parameter is added because of a hint (default is True). nan_policy : str, optional, one of 'raise' (default), 'propagate', or 'omit'. What to do when encountering NaNs when fitting Model. fit_kws: dict, optional Options to pass to the minimizer being used. calc_covar : bool, optional Whether to calculate the covariance matrix (default is True) for solvers other than `leastsq` and `least_squares`. Requires the `numdifftools` package to be installed. **kwargs: optional Arguments to pass to the model function, possibly overriding params. Returns ------- ModelResult Examples -------- Take `t` to be the independent variable and data to be the curve we will fit. Use keyword arguments to set initial guesses: >>> result = my_model.fit(data, tau=5, N=3, t=t) Or, for more control, pass a Parameters object. >>> result = my_model.fit(data, params, t=t) Keyword arguments override Parameters. >>> result = my_model.fit(data, params, tau=5, t=t) Notes ----- 1. if `params` is None, the values for all parameters are expected to be provided as keyword arguments. If `params` is given, and a keyword argument for a parameter value is also given, the keyword argument will be used. 2. all non-parameter arguments for the model function, **including all the independent variables** will need to be passed in using keyword arguments. 3. Parameters (however passed in), are copied on input, so the original Parameter objects are unchanged, and the updated values are in the returned `ModelResult`. """ if params is None: params = self.make_params(verbose=verbose) else: params = deepcopy(params) # If any kwargs match parameter names, override params. param_kwargs = set(kwargs.keys()) & set(self.param_names) for name in param_kwargs: p = kwargs[name] if isinstance(p, Parameter): p.name = name # allows N=Parameter(value=5) with implicit name params[name] = deepcopy(p) else: params[name].set(value=p) del kwargs[name] # All remaining kwargs should correspond to independent variables. for name in kwargs: if name not in self.independent_vars: warnings.warn("The keyword argument %s does not " % name + "match any arguments of the model function. " + "It will be ignored.", UserWarning) # If any parameter is not initialized raise a more helpful error. missing_param = any([p not in params.keys() for p in self.param_names]) blank_param = any([(p.value is None and p.expr is None) for p in params.values()]) if missing_param or blank_param: msg = ('Assign each parameter an initial value by passing ' 'Parameters or keyword arguments to fit.\n') missing = [p for p in self.param_names if p not in params.keys()] blank = [name for name, p in params.items() if p.value is None and p.expr is None] msg += 'Missing parameters: %s\n' % str(missing) msg += 'Non initialized parameters: %s' % str(blank) raise ValueError(msg) # Do not alter anything that implements the array interface (np.array, pd.Series) # but convert other iterables (e.g., Python lists) to numpy arrays. if not hasattr(data, '__array__'): data = np.asfarray(data) for var in self.independent_vars: var_data = kwargs[var] if isinstance(var_data, (list, tuple)): kwargs[var] = np.asfarray(var_data) # Handle null/missing values. if nan_policy is not None: self.nan_policy = nan_policy mask = None if self.nan_policy == 'omit': mask = ~isnull(data) if mask is not None: data = data[mask] if weights is not None: weights = _align(weights, mask, data) # If independent_vars and data are alignable (pandas), align them, # and apply the mask from above if there is one. for var in self.independent_vars: if not np.isscalar(kwargs[var]): # print("Model fit align ind dep ", var, mask.sum()) kwargs[var] = _align(kwargs[var], mask, data) if fit_kws is None: fit_kws = {} output = ModelResult(self, params, method=method, iter_cb=iter_cb, scale_covar=scale_covar, fcn_kws=kwargs, nan_policy=self.nan_policy, calc_covar=calc_covar, **fit_kws) output.fit(data=data, weights=weights) output.components = self.components return output def __add__(self, other): """+""" return CompositeModel(self, other, operator.add) def __sub__(self, other): """-""" return CompositeModel(self, other, operator.sub) def __mul__(self, other): """*""" return CompositeModel(self, other, operator.mul) def __div__(self, other): """/""" return CompositeModel(self, other, operator.truediv) def __truediv__(self, other): """/""" return CompositeModel(self, other, operator.truediv) class CompositeModel(Model): """Combine two models (`left` and `right`) with a binary operator (`op`) into a CompositeModel. Normally, one does not have to explicitly create a `CompositeModel`, but can use normal Python operators `+`, '-', `*`, and `/` to combine components as in:: >>> mod = Model(fcn1) + Model(fcn2) * Model(fcn3) """ _names_collide = ("\nTwo models have parameters named '{clash}'. " "Use distinct names.") _bad_arg = "CompositeModel: argument {arg} is not a Model" _bad_op = "CompositeModel: operator {op} is not callable" _known_ops = {operator.add: '+', operator.sub: '-', operator.mul: '*', operator.truediv: '/'} def __init__(self, left, right, op, **kws): """ Parameters ---------- left : Model Left-hand model. right : Model Right-hand model. op : callable binary operator Operator to combine `left` and `right` models. **kws : optional Additional keywords are passed to `Model` when creating this new model. Notes ----- 1. The two models must use the same independent variable. """ if not isinstance(left, Model): raise ValueError(self._bad_arg.format(arg=left)) if not isinstance(right, Model): raise ValueError(self._bad_arg.format(arg=right)) if not callable(op): raise ValueError(self._bad_op.format(op=op)) self.left = left self.right = right self.op = op name_collisions = set(left.param_names) & set(right.param_names) if len(name_collisions) > 0: msg = '' for collision in name_collisions: msg += self._names_collide.format(clash=collision) raise NameError(msg) # we assume that all the sub-models have the same independent vars if 'independent_vars' not in kws: kws['independent_vars'] = self.left.independent_vars if 'nan_policy' not in kws: kws['nan_policy'] = self.left.nan_policy def _tmp(self, *args, **kws): pass Model.__init__(self, _tmp, **kws) for side in (left, right): prefix = side.prefix for basename, hint in side.param_hints.items(): self.param_hints["%s%s" % (prefix, basename)] = hint def _parse_params(self): self._func_haskeywords = (self.left._func_haskeywords or self.right._func_haskeywords) self._func_allargs = (self.left._func_allargs + self.right._func_allargs) self.def_vals = deepcopy(self.right.def_vals) self.def_vals.update(self.left.def_vals) self.opts = deepcopy(self.right.opts) self.opts.update(self.left.opts) def _reprstring(self, long=False): return "(%s %s %s)" % (self.left._reprstring(long=long), self._known_ops.get(self.op, self.op), self.right._reprstring(long=long)) def eval(self, params=None, **kwargs): """Evaluate model function for composite model.""" return self.op(self.left.eval(params=params, **kwargs), self.right.eval(params=params, **kwargs)) def eval_components(self, **kwargs): """Return OrderedDict of name, results for each component.""" out = OrderedDict(self.left.eval_components(**kwargs)) out.update(self.right.eval_components(**kwargs)) return out @property def param_names(self): """Return parameter names for composite model.""" return self.left.param_names + self.right.param_names @property def components(self): """Return components for composite model.""" return self.left.components + self.right.components def _get_state(self): return (self.left._get_state(), self.right._get_state(), self.op.__name__) def _set_state(self, state, funcdefs=None): return _buildmodel(state, funcdefs=funcdefs) def _make_all_args(self, params=None, **kwargs): """Generate **all** function arguments for all functions.""" out = self.right._make_all_args(params=params, **kwargs) out.update(self.left._make_all_args(params=params, **kwargs)) return out def save_model(model, fname): """Save a Model to a file. Parameters ---------- model : model instance Model to be saved. fname : str Name of file for saved Model. """ with open(fname, 'w') as fout: model.dump(fout) def load_model(fname, funcdefs=None): """Load a saved Model from a file. Parameters ---------- fname : str Name of file containing saved Model. funcdefs : dict, optional Dictionary of custom function names and definitions. Returns ------- Model """ m = Model(lambda x: x) with open(fname) as fh: model = m.load(fh, funcdefs=funcdefs) return model def _buildmodel(state, funcdefs=None): """Build model from saved state. Intended for internal use only. """ if len(state) != 3: raise ValueError("Cannot restore Model") known_funcs = {} for fname in lineshapes.functions: fcn = getattr(lineshapes, fname, None) if callable(fcn): known_funcs[fname] = fcn if funcdefs is not None: known_funcs.update(funcdefs) left, right, op = state if op is None and right is None: (fname, fcndef, name, prefix, ivars, pnames, phints, nan_policy, opts) = left if not callable(fcndef) and fname in known_funcs: fcndef = known_funcs[fname] if fcndef is None: raise ValueError("Cannot restore Model: model function not found") model = Model(fcndef, name=name, prefix=prefix, independent_vars=ivars, param_names=pnames, nan_policy=nan_policy, **opts) for name, hint in phints.items(): model.set_param_hint(name, **hint) return model else: lmodel = _buildmodel(left, funcdefs=funcdefs) rmodel = _buildmodel(right, funcdefs=funcdefs) return CompositeModel(lmodel, rmodel, getattr(operator, op)) def save_modelresult(modelresult, fname): """Save a ModelResult to a file. Parameters ---------- modelresult : ModelResult instance ModelResult to be saved. fname : str Name of file for saved ModelResult. """ with open(fname, 'w') as fout: modelresult.dump(fout) def load_modelresult(fname, funcdefs=None): """Load a saved ModelResult from a file. Parameters ---------- fname : str Name of file containing saved ModelResult. funcdefs : dict, optional Dictionary of custom function names and definitions. Returns ------- ModelResult """ params = Parameters() modres = ModelResult(Model(lambda x: x, None), params) with open(fname) as fh: mresult = modres.load(fh, funcdefs=funcdefs) return mresult class ModelResult(Minimizer): """Result from the Model fit. This has many attributes and methods for viewing and working with the results of a fit using Model. It inherits from Minimizer, so that it can be used to modify and re-run the fit for the Model. """ def __init__(self, model, params, data=None, weights=None, method='leastsq', fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, nan_policy='raise', calc_covar=True, **fit_kws): """ Parameters ---------- model : Model Model to use. params : Parameters Parameters with initial values for model. data : array_like, optional Data to be modeled. weights : array_like, optional Weights to multiply (data-model) for fit residual. method : str, optional Name of minimization method to use (default is `'leastsq'`). fcn_args : sequence, optional Positional arguments to send to model function. fcn_dict : dict, optional Keyword arguments to send to model function. iter_cb : callable, optional Function to call on each iteration of fit. scale_covar : bool, optional Whether to scale covariance matrix for uncertainty evaluation. nan_policy : str, optional, one of 'raise' (default), 'propagate', or 'omit'. What to do when encountering NaNs when fitting Model. calc_covar : bool, optional Whether to calculate the covariance matrix (default is True) for solvers other than `leastsq` and `least_squares`. Requires the `numdifftools` package to be installed. **fit_kws : optional Keyword arguments to send to minimization routine. """ self.model = model self.data = data self.weights = weights self.method = method self.ci_out = None self.user_options = None self.init_params = deepcopy(params) Minimizer.__init__(self, model._residual, params, fcn_args=fcn_args, fcn_kws=fcn_kws, iter_cb=iter_cb, nan_policy=nan_policy, scale_covar=scale_covar, calc_covar=calc_covar, **fit_kws) def fit(self, data=None, params=None, weights=None, method=None, nan_policy=None, **kwargs): """Re-perform fit for a Model, given data and params. Parameters ---------- data : array_like, optional Data to be modeled. params : Parameters, optional Parameters with initial values for model. weights : array_like, optional Weights to multiply (data-model) for fit residual. method : str, optional Name of minimization method to use (default is `'leastsq'`). nan_policy : str, optional, one of 'raise' (default), 'propagate', or 'omit'. What to do when encountering NaNs when fitting Model. **kwargs : optional Keyword arguments to send to minimization routine. """ if data is not None: self.data = data if params is not None: self.init_params = params if weights is not None: self.weights = weights if method is not None: self.method = method if nan_policy is not None: self.nan_policy = nan_policy self.ci_out = None self.userargs = (self.data, self.weights) self.userkws.update(kwargs) self.init_fit = self.model.eval(params=self.params, **self.userkws) _ret = self.minimize(method=self.method) for attr in dir(_ret): if not attr.startswith('_'): try: setattr(self, attr, getattr(_ret, attr)) except AttributeError: pass self.init_values = self.model._make_all_args(self.init_params) self.best_values = self.model._make_all_args(_ret.params) self.best_fit = self.model.eval(params=_ret.params, **self.userkws) def eval(self, params=None, **kwargs): """Evaluate model function. Parameters ---------- params : Parameters, optional Parameters to use. **kwargs : optional Options to send to Model.eval() Returns ------- out : numpy.ndarray Array for evaluated model. """ userkws = self.userkws.copy() userkws.update(kwargs) if params is None: params = self.params return self.model.eval(params=params, **userkws) def eval_components(self, params=None, **kwargs): """Evaluate each component of a composite model function. Parameters ---------- params : Parameters, optional Parameters, defaults to ModelResult.params **kwargs : optional Keyword arguments to pass to model function. Returns ------- OrderedDict Keys are prefixes of component models, and values are the estimated model value for each component of the model. """ userkws = self.userkws.copy() userkws.update(kwargs) if params is None: params = self.params return self.model.eval_components(params=params, **userkws) def eval_uncertainty(self, params=None, sigma=1, **kwargs): """Evaluate the uncertainty of the *model function*. This can be used to give confidence bands for the model from the uncertainties in the best-fit parameters. Parameters ---------- params : Parameters, optional Parameters, defaults to ModelResult.params. sigma : float, optional Confidence level, i.e. how many sigma (default is 1). **kwargs : optional Values of options, independent variables, etcetera. Returns ------- numpy.ndarray Uncertainty at each value of the model. Example ------- >>> out = model.fit(data, params, x=x) >>> dely = out.eval_uncertainty(x=x) >>> plt.plot(x, data) >>> plt.plot(x, out.best_fit) >>> plt.fill_between(x, out.best_fit-dely, ... out.best_fit+dely, color='#888888') Notes ----- 1. This is based on the excellent and clear example from https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#confidence-and-prediction-intervals, which references the original work of: J. Wolberg, Data Analysis Using the Method of Least Squares, 2006, Springer 2. The value of sigma is number of `sigma` values, and is converted to a probability. Values of 1, 2, or 3 give probabilities of 0.6827, 0.9545, and 0.9973, respectively. If the sigma value is < 1, it is interpreted as the probability itself. That is, `sigma=1` and `sigma=0.6827` will give the same results, within precision errors. """ userkws = self.userkws.copy() userkws.update(kwargs) if params is None: params = self.params nvarys = self.nvarys # ensure fjac and df2 are correct size if independent var updated by kwargs ndata = self.model.eval(self.params, **userkws).size covar = self.covar fjac = np.zeros((nvarys, ndata)) df2 = np.zeros(ndata) # find derivative by hand! pars = self.params.copy() for i in range(nvarys): pname = self.var_names[i] val0 = pars[pname].value dval = pars[pname].stderr/3.0 pars[pname].value = val0 + dval res1 = self.model.eval(pars, **userkws) pars[pname].value = val0 - dval res2 = self.model.eval(pars, **userkws) pars[pname].value = val0 fjac[i] = (res1 - res2) / (2*dval) for i in range(nvarys): for j in range(nvarys): df2 += fjac[i]*fjac[j]*covar[i, j] if sigma < 1.0: prob = sigma else: prob = erf(sigma/np.sqrt(2)) return np.sqrt(df2) * t.ppf((prob+1)/2.0, self.ndata-nvarys) def conf_interval(self, **kwargs): """Calculate the confidence intervals for the variable parameters. Confidence intervals are calculated using the :func:`confidence.conf_interval()` function and keyword arguments (`**kwargs`) are passed to that function. The result is stored in the :attr:`ci_out` attribute so that it can be accessed without recalculating them. """ if self.ci_out is None: self.ci_out = conf_interval(self, self, **kwargs) return self.ci_out def ci_report(self, with_offset=True, ndigits=5, **kwargs): """Return a nicely formatted text report of the confidence intervals. Parameters ---------- with_offset : bool, optional Whether to subtract best value from all other values (default is True). ndigits : int, optional Number of significant digits to show (default is 5). **kwargs: optional Keyword arguments that are passed to the `conf_interval` function. Returns ------- str Text of formatted report on confidence intervals. """ return ci_report(self.conf_interval(**kwargs), with_offset=with_offset, ndigits=ndigits) def fit_report(self, modelpars=None, show_correl=True, min_correl=0.1, sort_pars=False): """Return a printable fit report. The report contains fit statistics and best-fit values with uncertainties and correlations. Parameters ---------- modelpars : Parameters, optional Known Model Parameters. show_correl : bool, optional Whether to show list of sorted correlations (default is True). min_correl : float, optional Smallest correlation in absolute value to show (default is 0.1). sort_pars : callable, optional Whether to show parameter names sorted in alphanumerical order (default is False). If False, then the parameters will be listed in the order as they were added to the Parameters dictionary. If callable, then this (one argument) function is used to extract a comparison key from each list element. Returns ------- text : str Multi-line text of fit report. See Also -------- :func:`fit_report()` """ report = fit_report(self, modelpars=modelpars, show_correl=show_correl, min_correl=min_correl, sort_pars=sort_pars) modname = self.model._reprstring(long=True) return '[[Model]]\n %s\n%s' % (modname, report) def _repr_html_(self, show_correl=True, min_correl=0.1): """Returns a HTML representation of parameters data.""" report = fitreport_html_table(self, show_correl=show_correl, min_correl=min_correl) modname = self.model._reprstring(long=True) return "

    Model

    %s %s" % (modname, report) def dumps(self, **kws): """Represent ModelResult as a JSON string. Parameters ---------- **kws : optional Keyword arguments that are passed to `json.dumps()`. Returns ------- str JSON string representation of ModelResult. See Also -------- loads(), json.dumps() """ out = {'__class__': 'lmfit.ModelResult', '__version__': '1', 'model': encode4js(self.model._get_state())} pasteval = self.params._asteval out['params'] = [p.__getstate__() for p in self.params.values()] out['unique_symbols'] = {key: encode4js(pasteval.symtable[key]) for key in pasteval.user_defined_symbols()} for attr in ('aborted', 'aic', 'best_values', 'bic', 'chisqr', 'ci_out', 'col_deriv', 'covar', 'errorbars', 'flatchain', 'ier', 'init_values', 'lmdif_message', 'message', 'method', 'nan_policy', 'ndata', 'nfev', 'nfree', 'nvarys', 'redchi', 'scale_covar', 'calc_covar', 'success', 'userargs', 'userkws', 'values', 'var_names', 'weights', 'user_options'): val = getattr(self, attr) if isinstance(val, np.bool_): val = bool(val) out[attr] = encode4js(val) return json.dumps(out, **kws) def dump(self, fp, **kws): """Dump serialization of ModelResult to a file. Parameters ---------- fp : file-like object An open and ``.write()``-supporting file-like object. **kws : optional Keyword arguments that are passed to `json.dumps()`. Returns ------- None or int Return value from `fp.write()`. None for Python 2.7 and the number of characters written in Python 3. See Also -------- dumps(), load(), json.dump() """ return fp.write(self.dumps(**kws)) def loads(self, s, funcdefs=None, **kws): """Load ModelResult from a JSON string. Parameters ---------- s : str String representation of ModelResult, as from `dumps()`. funcdefs : dict, optional Dictionary of custom function names and definitions. **kws : optional Keyword arguments that are passed to `json.dumps()`. Returns ------- :class:`ModelResult` ModelResult instance from JSON string representation. See Also -------- load(), dumps(), json.dumps() """ modres = json.loads(s, **kws) if 'modelresult' not in modres['__class__'].lower(): raise AttributeError('ModelResult.loads() needs saved ModelResult') modres = decode4js(modres) if 'model' not in modres or 'params' not in modres: raise AttributeError('ModelResult.loads() needs valid ModelResult') # model self.model = _buildmodel(decode4js(modres['model']), funcdefs=funcdefs) # params self.params = Parameters() state = {'unique_symbols': modres['unique_symbols'], 'params': []} for parstate in modres['params']: _par = Parameter(name='') _par.__setstate__(parstate) state['params'].append(_par) self.params.__setstate__(state) for attr in ('aborted', 'aic', 'best_fit', 'best_values', 'bic', 'chisqr', 'ci_out', 'col_deriv', 'covar', 'data', 'errorbars', 'fjac', 'flatchain', 'ier', 'init_fit', 'init_values', 'kws', 'lmdif_message', 'message', 'method', 'nan_policy', 'ndata', 'nfev', 'nfree', 'nvarys', 'redchi', 'residual', 'scale_covar', 'calc_covar', 'success', 'userargs', 'userkws', 'var_names', 'weights', 'user_options'): setattr(self, attr, decode4js(modres.get(attr, None))) self.best_fit = self.model.eval(self.params, **self.userkws) if len(self.userargs) == 2: self.data = self.userargs[0] self.weights = self.userargs[1] self.init_params = self.model.make_params(**self.init_values) self.result = MinimizerResult() self.result.params = self.params self.init_vals = list(self.init_values.items()) return self def load(self, fp, funcdefs=None, **kws): """Load JSON representation of ModelResult from a file-like object. Parameters ---------- fp : file-like object An open and ``.read()``-supporting file-like object. funcdefs : dict, optional Dictionary of function definitions to use to construct Model. **kws : optional Keyword arguments that are passed to `loads()`. Returns ------- :class:`ModelResult` ModelResult created from `fp`. See Also -------- dump(), loads(), json.load() """ return self.loads(fp.read(), funcdefs=funcdefs, **kws) @_ensureMatplotlib def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, yerr=None, numpoints=None, data_kws=None, fit_kws=None, init_kws=None, ax_kws=None, show_init=False, parse_complex='abs'): """Plot the fit results using matplotlib, if available. The plot will include the data points, the initial fit curve (optional, with `show_init=True`), and the best-fit curve. If the fit model included weights or if `yerr` is specified, errorbars will also be plotted. Parameters ---------- ax : matplotlib.axes.Axes, optional The axes to plot on. The default in None, which means use the current pyplot axis or create one if there is none. datafmt : str, optional Matplotlib format string for data points. fitfmt : str, optional Matplotlib format string for fitted curve. initfmt : str, optional Matplotlib format string for initial conditions for the fit. xlabel : str, optional Matplotlib format string for labeling the x-axis. ylabel : str, optional Matplotlib format string for labeling the y-axis. yerr : numpy.ndarray, optional Array of uncertainties for data array. numpoints : int, optional If provided, the final and initial fit curves are evaluated not only at data points, but refined to contain `numpoints` points in total. data_kws : dict, optional Keyword arguments passed on to the plot function for data points. fit_kws : dict, optional Keyword arguments passed on to the plot function for fitted curve. init_kws : dict, optional Keyword arguments passed on to the plot function for the initial conditions of the fit. ax_kws : dict, optional Keyword arguments for a new axis, if there is one being created. show_init : bool, optional Whether to show the initial conditions for the fit (default is False). parse_complex : str, optional How to reduce complex data for plotting. Options are one of `['real', 'imag', 'abs', 'angle']`, which correspond to the numpy functions of the same name (default is 'abs'). Returns ------- matplotlib.axes.Axes Notes ----- For details about plot format strings and keyword arguments see documentation of matplotlib.axes.Axes.plot. If `yerr` is specified or if the fit model included weights, then matplotlib.axes.Axes.errorbar is used to plot the data. If `yerr` is not specified and the fit includes weights, `yerr` set to 1/self.weights If model returns complex data, `yerr` is treated the same way that weights are in this case. If `ax` is None then `matplotlib.pyplot.gca(**ax_kws)` is called. See Also -------- ModelResult.plot_residuals : Plot the fit residuals using matplotlib. ModelResult.plot : Plot the fit results and residuals using matplotlib. """ from matplotlib import pyplot as plt if data_kws is None: data_kws = {} if fit_kws is None: fit_kws = {} if init_kws is None: init_kws = {} if ax_kws is None: ax_kws = {} # The function reduce_complex will convert complex vectors into real vectors reduce_complex = get_reducer(parse_complex) if len(self.model.independent_vars) == 1: independent_var = self.model.independent_vars[0] else: print('Fit can only be plotted if the model function has one ' 'independent variable.') return False if not isinstance(ax, plt.Axes): ax = plt.gca(**ax_kws) x_array = self.userkws[independent_var] # make a dense array for x-axis if data is not dense if numpoints is not None and len(self.data) < numpoints: x_array_dense = np.linspace(min(x_array), max(x_array), numpoints) else: x_array_dense = x_array if show_init: ax.plot( x_array_dense, reduce_complex(self.model.eval( self.init_params, **{independent_var: x_array_dense})), initfmt, label='init', **init_kws) if yerr is None and self.weights is not None: yerr = 1.0/self.weights if yerr is not None: ax.errorbar(x_array, reduce_complex(self.data), yerr=propagate_err(self.data, yerr, parse_complex), fmt=datafmt, label='data', **data_kws) else: ax.plot(x_array, reduce_complex(self.data), datafmt, label='data', **data_kws) ax.plot( x_array_dense, reduce_complex(self.model.eval(self.params, **{independent_var: x_array_dense})), fitfmt, label='best-fit', **fit_kws) ax.set_title(self.model.name) if xlabel is None: ax.set_xlabel(independent_var) else: ax.set_xlabel(xlabel) if ylabel is None: ax.set_ylabel('y') else: ax.set_ylabel(ylabel) ax.legend(loc='best') return ax @_ensureMatplotlib def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None, fit_kws=None, ax_kws=None, parse_complex='abs'): """Plot the fit residuals using matplotlib, if available. If `yerr` is supplied or if the model included weights, errorbars will also be plotted. Parameters ---------- ax : matplotlib.axes.Axes, optional The axes to plot on. The default in None, which means use the current pyplot axis or create one if there is none. datafmt : str, optional Matplotlib format string for data points. yerr : numpy.ndarray, optional Array of uncertainties for data array. data_kws : dict, optional Keyword arguments passed on to the plot function for data points. fit_kws : dict, optional Keyword arguments passed on to the plot function for fitted curve. ax_kws : dict, optional Keyword arguments for a new axis, if there is one being created. parse_complex : str, optional How to reduce complex data for plotting. Options are one of `['real', 'imag', 'abs', 'angle']`, which correspond to the numpy functions of the same name (default is 'abs'). Returns ------- matplotlib.axes.Axes Notes ----- For details about plot format strings and keyword arguments see documentation of matplotlib.axes.Axes.plot. If `yerr` is specified or if the fit model included weights, then matplotlib.axes.Axes.errorbar is used to plot the data. If `yerr` is not specified and the fit includes weights, `yerr` set to 1/self.weights If model returns complex data, `yerr` is treated the same way that weights are in this case. If `ax` is None then `matplotlib.pyplot.gca(**ax_kws)` is called. See Also -------- ModelResult.plot_fit : Plot the fit results using matplotlib. ModelResult.plot : Plot the fit results and residuals using matplotlib. """ from matplotlib import pyplot as plt if data_kws is None: data_kws = {} if fit_kws is None: fit_kws = {} if ax_kws is None: ax_kws = {} # The function reduce_complex will convert complex vectors into real vectors reduce_complex = get_reducer(parse_complex) if len(self.model.independent_vars) == 1: independent_var = self.model.independent_vars[0] else: print('Fit can only be plotted if the model function has one ' 'independent variable.') return False if not isinstance(ax, plt.Axes): ax = plt.gca(**ax_kws) x_array = self.userkws[independent_var] ax.axhline(0, **fit_kws) if yerr is None and self.weights is not None: yerr = 1.0/self.weights if yerr is not None: ax.errorbar(x_array, reduce_complex(self.eval()) - reduce_complex(self.data), yerr=propagate_err(self.data, yerr, parse_complex), fmt=datafmt, label='residuals', **data_kws) else: ax.plot(x_array, reduce_complex(self.eval()) - reduce_complex(self.data), datafmt, label='residuals', **data_kws) ax.set_title(self.model.name) ax.set_ylabel('residuals') ax.legend(loc='best') return ax @_ensureMatplotlib def plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, yerr=None, numpoints=None, fig=None, data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None, fig_kws=None, show_init=False, parse_complex='abs'): """Plot the fit results and residuals using matplotlib, if available. The method will produce a matplotlib figure with both results of the fit and the residuals plotted. If the fit model included weights, errorbars will also be plotted. To show the initial conditions for the fit, pass the argument `show_init=True`. Parameters ---------- datafmt : str, optional Matplotlib format string for data points. fitfmt : str, optional Matplotlib format string for fitted curve. initfmt : str, optional Matplotlib format string for initial conditions for the fit. xlabel : str, optional Matplotlib format string for labeling the x-axis. ylabel : str, optional Matplotlib format string for labeling the y-axis. yerr : numpy.ndarray, optional Array of uncertainties for data array. numpoints : int, optional If provided, the final and initial fit curves are evaluated not only at data points, but refined to contain `numpoints` points in total. fig : matplotlib.figure.Figure, optional The figure to plot on. The default is None, which means use the current pyplot figure or create one if there is none. data_kws : dict, optional Keyword arguments passed on to the plot function for data points. fit_kws : dict, optional Keyword arguments passed on to the plot function for fitted curve. init_kws : dict, optional Keyword arguments passed on to the plot function for the initial conditions of the fit. ax_res_kws : dict, optional Keyword arguments for the axes for the residuals plot. ax_fit_kws : dict, optional Keyword arguments for the axes for the fit plot. fig_kws : dict, optional Keyword arguments for a new figure, if there is one being created. show_init : bool, optional Whether to show the initial conditions for the fit (default is False). parse_complex : str, optional How to reduce complex data for plotting. Options are one of `['real', 'imag', 'abs', 'angle']`, which correspond to the numpy functions of the same name (default is 'abs'). Returns ------- A tuple with matplotlib's Figure and GridSpec objects. Notes ----- The method combines ModelResult.plot_fit and ModelResult.plot_residuals. If `yerr` is specified or if the fit model included weights, then matplotlib.axes.Axes.errorbar is used to plot the data. If `yerr` is not specified and the fit includes weights, `yerr` set to 1/self.weights If model returns complex data, `yerr` is treated the same way that weights are in this case. If `fig` is None then `matplotlib.pyplot.figure(**fig_kws)` is called, otherwise `fig_kws` is ignored. See Also -------- ModelResult.plot_fit : Plot the fit results using matplotlib. ModelResult.plot_residuals : Plot the fit residuals using matplotlib. """ from matplotlib import pyplot as plt if data_kws is None: data_kws = {} if fit_kws is None: fit_kws = {} if init_kws is None: init_kws = {} if ax_res_kws is None: ax_res_kws = {} if ax_fit_kws is None: ax_fit_kws = {} # make a square figure with side equal to the default figure's x-size figxsize = plt.rcParams['figure.figsize'][0] fig_kws_ = dict(figsize=(figxsize, figxsize)) if fig_kws is not None: fig_kws_.update(fig_kws) if len(self.model.independent_vars) != 1: print('Fit can only be plotted if the model function has one ' 'independent variable.') return False if not isinstance(fig, plt.Figure): fig = plt.figure(**fig_kws_) gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 4]) ax_res = fig.add_subplot(gs[0], **ax_res_kws) ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws) self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr, initfmt=initfmt, xlabel=xlabel, ylabel=ylabel, numpoints=numpoints, data_kws=data_kws, fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws, show_init=show_init, parse_complex=parse_complex) self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr, data_kws=data_kws, fit_kws=fit_kws, ax_kws=ax_res_kws, parse_complex=parse_complex) plt.setp(ax_res.get_xticklabels(), visible=False) ax_fit.set_title('') return fig, gs lmfit-py-1.0.0/lmfit/models.py000066400000000000000000001451051357751001700162350ustar00rootroot00000000000000"""Module containing built-in fitting models.""" import time from asteval import Interpreter, get_ast_names import numpy as np from . import lineshapes from .lineshapes import (breit_wigner, damped_oscillator, dho, donaich, expgaussian, exponential, gaussian, linear, lognormal, lorentzian, moffat, parabolic, pearson7, powerlaw, pvoigt, rectangle, skewed_gaussian, skewed_voigt, split_lorentzian, step, students_t, voigt) from .model import Model tiny = np.finfo(np.float).eps class DimensionalError(Exception): """Raise exception when number of independent variables is not one.""" pass def _validate_1d(independent_vars): if len(independent_vars) != 1: raise DimensionalError( "This model requires exactly one independent variable.") def index_of(arr, val): """Return index of array nearest to a value.""" if val < min(arr): return 0 return np.abs(arr-val).argmin() def fwhm_expr(model): """Return constraint expression for fwhm.""" fmt = "{factor:.7f}*{prefix:s}sigma" return fmt.format(factor=model.fwhm_factor, prefix=model.prefix) def height_expr(model): """Return constraint expression for maximum peak height.""" fmt = "{factor:.7f}*{prefix:s}amplitude/max({}, {prefix:s}sigma)" return fmt.format(tiny, factor=model.height_factor, prefix=model.prefix) def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0): """Estimate amp, cen, sigma for a peak, create params.""" if x is None: return 1.0, 0.0, 1.0 maxy, miny = max(y), min(y) maxx, minx = max(x), min(x) imaxy = index_of(y, maxy) cen = x[imaxy] amp = (maxy - miny)*3.0 sig = (maxx-minx)/6.0 halfmax_vals = np.where(y > (maxy+miny)/2.0)[0] if negative: imaxy = index_of(y, miny) amp = -(maxy - miny)*3.0 halfmax_vals = np.where(y < (maxy+miny)/2.0)[0] if len(halfmax_vals) > 2: sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0 cen = x[halfmax_vals].mean() amp = amp*sig*ampscale sig = sig*sigscale pars = model.make_params(amplitude=amp, center=cen, sigma=sig) pars['%ssigma' % model.prefix].set(min=0.0) return pars def update_param_vals(pars, prefix, **kwargs): """Update parameter values with keyword arguments.""" for key, val in kwargs.items(): pname = "%s%s" % (prefix, key) if pname in pars: pars[pname].value = val pars.update_constraints() return pars COMMON_INIT_DOC = """ Parameters ---------- independent_vars : ['x'] Arguments to func that are independent variables. prefix : str, optional String to prepend to parameter names, needed to add two Models that have parameter names in common. nan_policy : str, optional How to handle NaN and missing values in data. Must be one of: 'raise' (default), 'propagate', or 'omit'. See Notes below. **kwargs : optional Keyword arguments to pass to :class:`Model`. Notes ----- 1. nan_policy sets what to do when a NaN or missing value is seen in the data. Should be one of: - 'raise' : Raise a ValueError (default) - 'propagate' : do nothing - 'omit' : drop missing data """ COMMON_GUESS_DOC = """Guess starting values for the parameters of a model. Parameters ---------- data : array_like Array of data to use to guess parameter values. **kws : optional Additional keyword arguments, passed to model function. Returns ------- params : Parameters """ COMMON_DOC = COMMON_INIT_DOC class ConstantModel(Model): """Constant model, with a single Parameter: ``c``. Note that this is 'constant' in the sense of having no dependence on the independent variable ``x``, not in the sense of being non-varying. To be clear, ``c`` will be a Parameter that will be varied in the fit (by default, of course). """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) def constant(x, c=0.0): return c super().__init__(constant, **kwargs) def guess(self, data, **kwargs): """Estimate initial model parameter values from data.""" pars = self.make_params() pars['%sc' % self.prefix].set(value=data.mean()) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class ComplexConstantModel(Model): """Complex constant model, with wo Parameters: ``re``, and ``im``. Note that ``re`` and ``im`` are 'constant' in the sense of having no dependence on the independent variable ``x``, not in the sense of being non-varying. To be clear, ``re`` and ``im`` will be Parameters that will be varied in the fit (by default, of course). """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', name=None, **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) def constant(x, re=0., im=0.): return re + 1j*im super().__init__(constant, **kwargs) def guess(self, data, **kwargs): """Estimate initial model parameter values from data.""" pars = self.make_params() pars['%sre' % self.prefix].set(value=data.real.mean()) pars['%sim' % self.prefix].set(value=data.imag.mean()) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class LinearModel(Model): """Linear model, with two Parameters ``intercept`` and ``slope``. Defined as: .. math:: f(x; m, b) = m x + b with ``slope`` for :math:`m` and ``intercept`` for :math:`b`. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(linear, **kwargs) def guess(self, data, x=None, **kwargs): """Estimate initial model parameter values from data.""" sval, oval = 0., 0. if x is not None: sval, oval = np.polyfit(x, data, 1) pars = self.make_params(intercept=oval, slope=sval) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class QuadraticModel(Model): """A quadratic model, with three Parameters ``a``, ``b``, and ``c``. Defined as: .. math:: f(x; a, b, c) = a x^2 + b x + c """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(parabolic, **kwargs) def guess(self, data, x=None, **kwargs): """Estimate initial model parameter values from data.""" a, b, c = 0., 0., 0. if x is not None: a, b, c = np.polyfit(x, data, 2) pars = self.make_params(a=a, b=b, c=c) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC ParabolicModel = QuadraticModel class PolynomialModel(Model): r"""A polynomial model with up to 7 Parameters, specfied by ``degree``. .. math:: f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i x^i with parameters ``c0``, ``c1``, ..., ``c7``. The supplied ``degree`` will specify how many of these are actual variable parameters. This uses :numpydoc:`polyval` for its calculation of the polynomial. """ MAX_DEGREE = 7 DEGREE_ERR = "degree must be an integer less than %d." def __init__(self, degree, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) if not isinstance(degree, int) or degree > self.MAX_DEGREE: raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE) self.poly_degree = degree pnames = ['c%i' % (i) for i in range(degree + 1)] kwargs['param_names'] = pnames def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0): return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x) super().__init__(polynomial, **kwargs) def guess(self, data, x=None, **kwargs): """Estimate initial model parameter values from data.""" pars = self.make_params() if x is not None: out = np.polyfit(x, data, self.poly_degree) for i, coef in enumerate(out[::-1]): pars['%sc%i' % (self.prefix, i)].set(value=coef) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class GaussianModel(Model): r"""A model based on a Gaussian or normal distribution lineshape (see https://en.wikipedia.org/wiki/Normal_distribution), with three Parameters: ``amplitude``, ``center``, and ``sigma``. In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to :math:`\mu`, and ``sigma`` to :math:`\sigma`. The full width at half maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately :math:`2.3548\sigma`. """ fwhm_factor = 2*np.sqrt(2*np.log(2)) height_factor = 1./np.sqrt(2*np.pi) def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(gaussian, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('fwhm', expr=fwhm_expr(self)) self.set_param_hint('height', expr=height_expr(self)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class LorentzianModel(Model): r"""A model based on a Lorentzian or Cauchy-Lorentz distribution function (see https://en.wikipedia.org/wiki/Cauchy_distribution), with three Parameters: ``amplitude``, ``center``, and ``sigma``. In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big] where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to :math:`\mu`, and ``sigma`` to :math:`\sigma`. The full width at half maximum is :math:`2\sigma`. """ fwhm_factor = 2.0 height_factor = 1./np.pi def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(lorentzian, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('fwhm', expr=fwhm_expr(self)) self.set_param_hint('height', expr=height_expr(self)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=1.25) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class SplitLorentzianModel(Model): r"""A model based on a Lorentzian or Cauchy-Lorentz distribution function (see https://en.wikipedia.org/wiki/Cauchy_distribution), with four parameters: ``amplitude``, ``center``, ``sigma``, and ``sigma_r``. In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. 'Split' means that the width of the distribution is different between left and right slopes. .. math:: f(x; A, \mu, \sigma, \sigma_r) = \frac{2 A}{\pi (\sigma+\sigma_r)} \big[\frac{\sigma^2}{(x - \mu)^2 + \sigma^2} * H(\mu-x) + \frac{\sigma_r^2}{(x - \mu)^2 + \sigma_r^2} * H(x-\mu)\big] where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to :math:`\mu`, ``sigma`` to :math:`\sigma`, ``sigma_l`` to :math:`\sigma_l`, and :math:`H(x)` is a Heaviside step function: .. math:: H(x) = 0 | x < 0, 1 | x \geq 0 The full width at half maximum is :math:`\sigma_l+\sigma_r`. Just as with the Lorentzian model, integral of this function from ``-.inf`` to ``+.inf`` equals to ``amplitude``. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(split_lorentzian, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): fwhm_expr = '{pre:s}sigma+{pre:s}sigma_r' height_expr = '2*{pre:s}amplitude/{0:.7f}/max({1:.7f}, ({pre:s}sigma+{pre:s}sigma_r))' self.set_param_hint('sigma', min=0) self.set_param_hint('sigma_r', min=0) self.set_param_hint('fwhm', expr=fwhm_expr.format(pre=self.prefix)) self.set_param_hint('height', expr=height_expr.format(np.pi, tiny, pre=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=1.25) sigma = pars['%ssigma' % self.prefix] pars['%ssigma_r' % self.prefix].set(value=sigma.value, min=sigma.min, max=sigma.max) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class VoigtModel(Model): r"""A model based on a Voigt distribution function (see https://en.wikipedia.org/wiki/Voigt_profile), with four Parameters: ``amplitude``, ``center``, ``sigma``, and ``gamma``. By default, ``gamma`` is constrained to have a value equal to ``sigma``, though it can be varied independently. In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. The definition for the Voigt function used here is .. math:: f(x; A, \mu, \sigma, \gamma) = \frac{A \textrm{Re}[w(z)]}{\sigma\sqrt{2 \pi}} where .. math:: :nowrap: \begin{eqnarray*} z &=& \frac{x-\mu +i\gamma}{\sigma\sqrt{2}} \\ w(z) &=& e^{-z^2}{\operatorname{erfc}}(-iz) \end{eqnarray*} and :func:`erfc` is the complementary error function. As above, ``amplitude`` corresponds to :math:`A`, ``center`` to :math:`\mu`, and ``sigma`` to :math:`\sigma`. The parameter ``gamma`` corresponds to :math:`\gamma`. If ``gamma`` is kept at the default value (constrained to ``sigma``), the full width at half maximum is approximately :math:`3.6013\sigma`. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(voigt, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('gamma', expr='%ssigma' % self.prefix) fexpr = ("1.0692*{pre:s}gamma+" + "sqrt(0.8664*{pre:s}gamma**2+5.545083*{pre:s}sigma**2)") hexpr = ("({pre:s}amplitude/(max({0}, {pre:s}sigma*sqrt(2*pi))))*" "wofz((1j*{pre:s}gamma)/(max({0}, {pre:s}sigma*sqrt(2)))).real") self.set_param_hint('fwhm', expr=fexpr.format(pre=self.prefix)) self.set_param_hint('height', expr=hexpr.format(tiny, pre=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=1.5, sigscale=0.65) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class PseudoVoigtModel(Model): r"""A model based on a pseudo-Voigt distribution function (see https://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation), which is a weighted sum of a Gaussian and Lorentzian distribution function that share values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and full width at half maximum ``fwhm`` (and so have constrained values of ``sigma`` (:math:`\sigma`) and ``height`` (maximum peak height). A parameter ``fraction`` (:math:`\alpha`) controls the relative weight of the Gaussian and Lorentzian components, giving the full definition of .. math:: f(x; A, \mu, \sigma, \alpha) = \frac{(1-\alpha)A}{\sigma_g\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma_g}^2}}]} + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big] where :math:`\sigma_g = {\sigma}/{\sqrt{2\ln{2}}}` so that the full width at half maximum of each component and of the sum is :math:`2\sigma`. The :meth:`guess` function always sets the starting value for ``fraction`` at 0.5. """ fwhm_factor = 2.0 def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(pvoigt, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('fraction', value=0.5, min=0.0, max=1.0) self.set_param_hint('fwhm', expr=fwhm_expr(self)) fmt = ("(((1-{prefix:s}fraction)*{prefix:s}amplitude)/" "max({0}, ({prefix:s}sigma*sqrt(pi/log(2))))+" "({prefix:s}fraction*{prefix:s}amplitude)/" "max({0}, (pi*{prefix:s}sigma)))") self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=1.25) pars['%sfraction' % self.prefix].set(value=0.5, min=0.0, max=1.0) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class MoffatModel(Model): r"""A model based on the Moffat distribution function (see https://en.wikipedia.org/wiki/Moffat_distribution), with four Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), a width parameter ``sigma`` (:math:`\sigma`) and an exponent ``beta`` (:math:`\beta`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma, \beta) = A \big[(\frac{x-\mu}{\sigma})^2+1\big]^{-\beta} the full width have maximum is :math:`2\sigma\sqrt{2^{1/\beta}-1}`. The :meth:`guess` function always sets the starting value for ``beta`` to 1. Note that for (:math:`\beta=1`) the Moffat has a Lorentzian shape. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(moffat, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('beta') self.set_param_hint('fwhm', expr="2*%ssigma*sqrt(2**(1.0/max(1e-3, %sbeta))-1)" % (self.prefix, self.prefix)) self.set_param_hint('height', expr="%samplitude" % self.prefix) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=0.5, sigscale=1.) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class Pearson7Model(Model): r"""A model based on a Pearson VII distribution (see https://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution), with four parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``exponent`` (:math:`m`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2} \bigr]^{-m} where :math:`\beta` is the beta function (see :scipydoc:`special.beta`) The :meth:`guess` function always gives a starting value for ``exponent`` of 1.5. In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(pearson7, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('expon', value=1.5, max=100) fmt = ("sqrt(2**(1/{prefix:s}expon)-1)*2*{prefix:s}sigma") self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix)) fmt = ("{prefix:s}amplitude * gamfcn({prefix:s}expon)/" "max({0}, (gamfcn(0.5)*gamfcn({prefix:s}expon-0.5)*{prefix:s}sigma))") self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative) pars['%sexpon' % self.prefix].set(value=1.5) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class StudentsTModel(Model): r"""A model based on a Student's t distribution function (see https://en.wikipedia.org/wiki/Student%27s_t-distribution), with three Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma) = \frac{A \Gamma(\frac{\sigma+1}{2})} {\sqrt{\sigma\pi}\,\Gamma(\frac{\sigma}{2})} \Bigl[1+\frac{(x-\mu)^2}{\sigma}\Bigr]^{-\frac{\sigma+1}{2}} where :math:`\Gamma(x)` is the gamma function. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(students_t, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0.0, max=100) fmt = ("{prefix:s}amplitude*gamfcn(({prefix:s}sigma+1)/2)/" "(sqrt({prefix:s}sigma*pi)*gamfcn({prefix:s}sigma/2))") self.set_param_hint('height', expr=fmt.format(prefix=self.prefix)) fmt = ("2*sqrt(2**(2/({prefix:s}sigma+1))*" "{prefix:s}sigma-{prefix:s}sigma)") self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class BreitWignerModel(Model): r"""A model based on a Breit-Wigner-Fano function (see https://en.wikipedia.org/wiki/Fano_resonance), with four Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``q`` (:math:`q`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma, q) = \frac{A (q\sigma/2 + x - \mu)^2}{(\sigma/2)^2 + (x - \mu)^2} """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(breit_wigner, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0.0) fmt = ("{prefix:s}amplitude*{prefix:s}q**2") self.set_param_hint('height', expr=fmt.format(prefix=self.prefix)) fmt = ("2*(sqrt({prefix:s}q**2*{prefix:s}sigma**2*({prefix:s}q**2+2))/" "max({0}, 2*({prefix:s}q**2)-2))") self.set_param_hint('fwhm', expr=fmt.format(tiny, prefix=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative) pars['%sq' % self.prefix].set(value=1.0) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class LognormalModel(Model): r"""A model based on the Log-normal distribution function (see https://en.wikipedia.org/wiki/Lognormal), with three Parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}}\frac{e^{-(\ln(x) - \mu)^2/ 2\sigma^2}}{x} """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(lognormal, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('center', min=1.e-19) self.set_param_hint('sigma', min=0) fmt = ("{prefix:s}amplitude/max({0}, ({prefix:s}sigma*sqrt(2*pi)))" "*exp({prefix:s}sigma**2/2-{prefix:s}center)") self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix)) fmt = ("exp({prefix:s}center-{prefix:s}sigma**2+{prefix:s}sigma*sqrt(" "2*log(2)))-" "exp({prefix:s}center-{prefix:s}sigma**2-{prefix:s}sigma*sqrt(" "2*log(2)))") self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25) pars['%ssigma' % self.prefix].set(min=0.0) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class DampedOscillatorModel(Model): r"""A model based on the Damped Harmonic Oscillator Amplitude (see https://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part), with three Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma) = \frac{A}{\sqrt{ [1 - (x/\mu)^2]^2 + (2\sigma x/\mu)^2}} """ height_factor = 0.5 def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(damped_oscillator, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('height', expr=height_expr(self)) fmt = ("sqrt(abs({prefix:s}center**2*(1-2*{prefix:s}sigma**2)+" "(2*sqrt({prefix:s}center**4*{prefix:s}sigma**2*" "({prefix:s}sigma**2+3)))))-" "sqrt(abs({prefix:s}center**2*(1-2*{prefix:s}sigma**2)-" "(2*sqrt({prefix:s}center**4*{prefix:s}sigma**2*" "({prefix:s}sigma**2+3)))))") self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=0.1, sigscale=0.1) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class DampedHarmonicOscillatorModel(Model): r"""A model based on a variation of the Damped Harmonic Oscillator (see https://en.wikipedia.org/wiki/Harmonic_oscillator), following the definition given in DAVE/PAN (see https://www.ncnr.nist.gov/dave/) with four Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma, \gamma) = \frac{A\sigma}{\pi [1 - \exp(-x/\gamma)]} \Big[ \frac{1}{(x-\mu)^2 + \sigma^2} - \frac{1}{(x+\mu)^2 + \sigma^2} \Big] where :math:`\gamma=kT` k is the Boltzmann constant in :math:`evK^-1` and T is the temperature in :math:`K`. """ fwhm_factor = 2.0 def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(dho, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('gamma', min=1.e-19) fmt = ("({prefix:s}amplitude*{prefix:s}sigma)/" "max({0}, (pi*(1-exp(-{prefix:s}center/max({0}, {prefix:s}gamma)))))*" "(1/max({0}, {prefix:s}sigma**2)-1/" "max({0}, (4*{prefix:s}center**2+{prefix:s}sigma**2)))") self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix)) self.set_param_hint('fwhm', expr=fwhm_expr(self)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=0.1, sigscale=0.1) pars['%sgamma' % self.prefix].set(value=1.0, min=0.0) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class ExponentialGaussianModel(Model): r"""A model of an Exponentially modified Gaussian distribution (see https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution) with four Parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2} \exp\bigl[\gamma({\mu - x + \gamma\sigma^2/2})\bigr] {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr) where :func:`erfc` is the complementary error function. """ fwhm_factor = 2*np.sqrt(2*np.log(2)) def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(expgaussian, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('gamma', min=0, max=20) fmt = ("{prefix:s}amplitude*{prefix:s}gamma/2*" "exp({prefix:s}gamma**2*{prefix:s}sigma**2/2)*" "erfc({prefix:s}gamma*{prefix:s}sigma/sqrt(2))") self.set_param_hint('height', expr=fmt.format(prefix=self.prefix)) self.set_param_hint('fwhm', expr=fwhm_expr(self)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class SkewedGaussianModel(Model): r"""A variation of the Exponential Gaussian, this uses a skewed normal distribution (see https://en.wikipedia.org/wiki/Skew_normal_distribution), with Parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`). In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, respectively. .. math:: f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 + {\operatorname{erf}}\bigl[ \frac{{\gamma}(x-\mu)}{\sigma\sqrt{2}} \bigr] \Bigr\} where :func:`erf` is the error function. """ fwhm_factor = 2*np.sqrt(2*np.log(2)) height_factor = 1./np.sqrt(2*np.pi) def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(skewed_gaussian, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('height', expr=height_expr(self)) self.set_param_hint('fwhm', expr=fwhm_expr(self)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class SkewedVoigtModel(Model): r"""A variation of the Skewed Gaussian, this applies the same skewing to a Voigt distribution (see https://en.wikipedia.org/wiki/Voigt_distribution). It has Parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`), as usual for a Voigt distribution, and add a Parameter ``skew``. In addition, parameters ``fwhm`` and ``height`` are included as constraints to report full width at half maximum and maximum peak height, of the Voigt distribution, respectively. .. math:: f(x; A, \mu, \sigma, \gamma, \rm{skew}) = {\rm{Voigt}}(x; A, \mu, \sigma, \gamma) \Bigl\{ 1 + {\operatorname{erf}}\bigl[ \frac{{\rm{skew}}(x-\mu)}{\sigma\sqrt{2}} \bigr] \Bigr\} where :func:`erf` is the error function. """ fwhm_factor = 3.60131 height_factor = 1./np.sqrt(2*np.pi) def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(skewed_voigt, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('sigma', min=0) self.set_param_hint('gamma', expr='%ssigma' % self.prefix) self.set_param_hint('height', expr=height_expr(self)) self.set_param_hint('fwhm', expr=fwhm_expr(self)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class DonaichModel(Model): r"""A model of an Doniach Sunjic asymmetric lineshape (see https://www.casaxps.com/help_manual/line_shapes.htm), used in photo-emission, with four Parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`). In addition, parameter ``height`` is included as a constraint. .. math:: f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma^{1-\gamma}} \frac{\cos\bigl[\pi\gamma/2 + (1-\gamma) \arctan{((x - \mu)}/\sigma)\bigr]} {\bigr[1 + (x-\mu)/\sigma\bigl]^{(1-\gamma)/2}} """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(donaich, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): fmt = ("{prefix:s}amplitude/max({0}, ({prefix:s}sigma**(1-{prefix:s}gamma)))" "*cos(pi*{prefix:s}gamma/2)") self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix)) def guess(self, data, x=None, negative=False, **kwargs): """Estimate initial model parameter values from data.""" pars = guess_from_peak(self, data, x, negative, ampscale=0.5) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class PowerLawModel(Model): r"""A model based on a Power Law (see https://en.wikipedia.org/wiki/Power_law), with two Parameters: ``amplitude`` (:math:`A`), and ``exponent`` (:math:`k`), in: .. math:: f(x; A, k) = A x^k """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(powerlaw, **kwargs) def guess(self, data, x=None, **kwargs): """Estimate initial model parameter values from data.""" try: expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1) except TypeError: expon, amp = 1, np.log(abs(max(data)+1.e-9)) pars = self.make_params(amplitude=np.exp(amp), exponent=expon) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class ExponentialModel(Model): r"""A model based on an exponential decay function (see https://en.wikipedia.org/wiki/Exponential_decay) with two Parameters: ``amplitude`` (:math:`A`), and ``decay`` (:math:`\tau`), in: .. math:: f(x; A, \tau) = A e^{-x/\tau} """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars}) super().__init__(exponential, **kwargs) def guess(self, data, x=None, **kwargs): """Estimate initial model parameter values from data.""" try: sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1) except TypeError: sval, oval = 1., np.log(abs(max(data)+1.e-9)) pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class StepModel(Model): r"""A model based on a Step function, with three Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`). There are four choices for ``form``: - ``linear`` (the default) - ``atan`` or ``arctan`` for an arc-tangent function - ``erf`` for an error function - ``logistic`` for a logistic function (see https://en.wikipedia.org/wiki/Logistic_function) The step function starts with a value 0, and ends with a value of :math:`A` rising to :math:`A/2` at :math:`\mu`, with :math:`\sigma` setting the characteristic width. The functional forms are defined as: .. math:: :nowrap: \begin{eqnarray*} & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) & = A \min{[1, \max{(0, \alpha)}]} \\ & f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\ & f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\ & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 + e^{\alpha}} ] \end{eqnarray*} where :math:`\alpha = (x - \mu)/{\sigma}`. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', form='linear', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'form': form, 'independent_vars': independent_vars}) super().__init__(step, **kwargs) def guess(self, data, x=None, **kwargs): """Estimate initial model parameter values from data.""" if x is None: return ymin, ymax = min(data), max(data) xmin, xmax = min(x), max(x) pars = self.make_params(amplitude=(ymax-ymin), center=(xmax+xmin)/2.0) pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class RectangleModel(Model): r"""A model based on a Step-up and Step-down function, with five Parameters: ``amplitude`` (:math:`A`), ``center1`` (:math:`\mu_1`), ``center2`` (:math:`\mu_2`), `sigma1`` (:math:`\sigma_1`) and ``sigma2`` (:math:`\sigma_2`). There are four choices for ``form``, which is used for both the Step up and the Step down: - ``linear`` (the default) - ``atan`` or ``arctan`` for an arc-tangent function - ``erf`` for an error function - ``logistic`` for a logistic function (see https://en.wikipedia.org/wiki/Logistic_function) The function starts with a value 0, transitions to a value of :math:`A`, taking the value :math:`A/2` at :math:`\mu_1`, with :math:`\sigma_1` setting the characteristic width. The function then transitions again to the value :math:`A/2` at :math:`\mu_2`, with :math:`\sigma_2` setting the characteristic width. The functional forms are defined as: .. math:: :nowrap: \begin{eqnarray*} &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0, \alpha_2)}]} \} \\ &f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\ &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\ &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} ] \end{eqnarray*} where :math:`\alpha_1 = (x - \mu_1)/{\sigma_1}` and :math:`\alpha_2 = -(x - \mu_2)/{\sigma_2}`. """ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise', form='linear', **kwargs): kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'form': form, 'independent_vars': independent_vars}) super().__init__(rectangle, **kwargs) self._set_paramhints_prefix() def _set_paramhints_prefix(self): self.set_param_hint('center1') self.set_param_hint('center2') self.set_param_hint('midpoint', expr='(%scenter1+%scenter2)/2.0' % (self.prefix, self.prefix)) def guess(self, data, x=None, **kwargs): """Estimate initial model parameter values from data.""" if x is None: return ymin, ymax = min(data), max(data) xmin, xmax = min(x), max(x) pars = self.make_params(amplitude=(ymax-ymin), center1=(xmax+xmin)/4.0, center2=3*(xmax+xmin)/4.0) pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0) pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0) return update_param_vals(pars, self.prefix, **kwargs) __init__.__doc__ = COMMON_INIT_DOC guess.__doc__ = COMMON_GUESS_DOC class ExpressionModel(Model): idvar_missing = "No independent variable found in\n %s" idvar_notfound = "Cannot find independent variables '%s' in\n %s" no_prefix = "ExpressionModel does not support `prefix` argument" def __init__(self, expr, independent_vars=None, init_script=None, nan_policy='raise', **kws): """Generate a model from user-supplied expression. Parameters ---------- expr : str Mathematical expression for model. independent_vars : list of str or None, optional Variable names to use as independent variables. init_script : str or None, optional Initial script to run in asteval interpreter. nan_policy : str, optional How to handle NaN and missing values in data. Must be one of: 'raise' (default), 'propagate', or 'omit'. See Notes below. **kws : optional Keyword arguments to pass to :class:`Model`. Notes ----- 1. each instance of ExpressionModel will create and using its own version of an asteval interpreter. 2. prefix is **not supported** for ExpressionModel. 3. nan_policy sets what to do when a NaN or missing value is seen in the data. Should be one of: - 'raise' : Raise a ValueError (default) - 'propagate' : do nothing - 'omit' : drop missing data """ # create ast evaluator, load custom functions self.asteval = Interpreter() for name in lineshapes.functions: self.asteval.symtable[name] = getattr(lineshapes, name, None) if init_script is not None: self.asteval.eval(init_script) # save expr as text, parse to ast, save for later use self.expr = expr.strip() self.astcode = self.asteval.parse(self.expr) # find all symbol names found in expression sym_names = get_ast_names(self.astcode) if independent_vars is None and 'x' in sym_names: independent_vars = ['x'] if independent_vars is None: raise ValueError(self.idvar_missing % (self.expr)) # determine which named symbols are parameter names, # try to find all independent variables idvar_found = [False]*len(independent_vars) param_names = [] for name in sym_names: if name in independent_vars: idvar_found[independent_vars.index(name)] = True elif name not in param_names and name not in self.asteval.symtable: param_names.append(name) # make sure we have all independent parameters if not all(idvar_found): lost = [] for ix, found in enumerate(idvar_found): if not found: lost.append(independent_vars[ix]) lost = ', '.join(lost) raise ValueError(self.idvar_notfound % (lost, self.expr)) kws['independent_vars'] = independent_vars if 'prefix' in kws: raise Warning(self.no_prefix) def _eval(**kwargs): for name, val in kwargs.items(): self.asteval.symtable[name] = val self.asteval.start_time = time.time() return self.asteval.run(self.astcode) kws["nan_policy"] = nan_policy super().__init__(_eval, **kws) # set param names here, and other things normally # set in _parse_params(), which will be short-circuited. self.independent_vars = independent_vars self._func_allargs = independent_vars + param_names self._param_names = param_names self._func_haskeywords = True self.def_vals = {} def __repr__(self): """Return printable representation of ExpressionModel.""" return "" % (self.expr) def _parse_params(self): """Over-write ExpressionModel._parse_params with `pass`. This prevents normal parsing of function for parameter names. """ pass lmfit-py-1.0.0/lmfit/parameter.py000066400000000000000000001030061357751001700167240ustar00rootroot00000000000000"""Parameter class.""" from collections import OrderedDict from copy import deepcopy import importlib import json import warnings from asteval import Interpreter, get_ast_names, valid_symbol_name from numpy import arcsin, array, cos, inf, isclose, nan, sin, sqrt import scipy.special import uncertainties from .jsonutils import decode4js, encode4js from .printfuncs import params_html_table SCIPY_FUNCTIONS = {'gamfcn': scipy.special.gamma} for name in ('erf', 'erfc', 'wofz'): SCIPY_FUNCTIONS[name] = getattr(scipy.special, name) def check_ast_errors(expr_eval): """Check for errors derived from asteval.""" if len(expr_eval.error) > 0: expr_eval.raise_exception(None) class Parameters(OrderedDict): """An ordered dictionary of all the Parameter objects required to specify a fit model. All minimization and Model fitting routines in lmfit will use exactly one Parameters object, typically given as the first argument to the objective function. All keys of a Parameters() instance must be strings and valid Python symbol names, so that the name must match ``[a-z_][a-z0-9_]*`` and cannot be a Python reserved word. All values of a Parameters() instance must be Parameter objects. A Parameters() instance includes an asteval interpreter used for evaluation of constrained Parameters. Parameters() support copying and pickling, and have methods to convert to and from serializations using json strings. """ def __init__(self, asteval=None, usersyms=None, *args, **kwds): """ Arguments --------- asteval : :class:`asteval.Interpreter`, optional Instance of the asteval Interpreter to use for constraint expressions. If None, a new interpreter will be created. Warning: *deprecated, use usersyms if possible* usersyms : dictionary of symbols to add to the :class:`asteval.Interpreter`. *args : optional Arguments. **kwds : optional Keyword arguments. """ super().__init__(self) self._asteval = asteval if self._asteval is None: self._asteval = Interpreter() _syms = {} _syms.update(SCIPY_FUNCTIONS) if usersyms is not None: _syms.update(usersyms) for key, val in _syms.items(): self._asteval.symtable[key] = val def copy(self): """Parameters.copy() should always be a deepcopy.""" return self.__deepcopy__(None) def update(self, other): """Update values and symbols with another Parameters object.""" if not isinstance(other, Parameters): raise ValueError("'%s' is not a Parameters object" % other) self.add_many(*other.values()) for sym in other._asteval.user_defined_symbols(): self._asteval.symtable[sym] = other._asteval.symtable[sym] return self def __copy__(self): """Parameters.copy() should always be a deepcopy.""" return self.__deepcopy__(None) def __deepcopy__(self, memo): """Implementation of Parameters.deepcopy(). The method needs to make sure that asteval is available and that all individual Parameter objects are copied. """ _pars = Parameters(asteval=None) # find the symbols that were added by users, not during construction unique_symbols = {key: self._asteval.symtable[key] for key in self._asteval.user_defined_symbols()} _pars._asteval.symtable.update(unique_symbols) # we're just about to add a lot of Parameter objects to the newly parameter_list = [] for key, par in self.items(): if isinstance(par, Parameter): param = Parameter(name=par.name, value=par.value, min=par.min, max=par.max) param.vary = par.vary param.brute_step = par.brute_step param.stderr = par.stderr param.correl = par.correl param.init_value = par.init_value param.expr = par.expr param.user_data = par.user_data parameter_list.append(param) _pars.add_many(*parameter_list) return _pars def __setitem__(self, key, par): """Set items of Parameters object.""" if key not in self: if not valid_symbol_name(key): raise KeyError("'%s' is not a valid Parameters name" % key) if par is not None and not isinstance(par, Parameter): raise ValueError("'%s' is not a Parameter" % par) OrderedDict.__setitem__(self, key, par) par.name = key par._expr_eval = self._asteval self._asteval.symtable[key] = par.value def __add__(self, other): """Add Parameters objects.""" if not isinstance(other, Parameters): raise ValueError("'%s' is not a Parameters object" % other) out = deepcopy(self) out.add_many(*other.values()) for sym in other._asteval.user_defined_symbols(): if sym not in out._asteval.symtable: out._asteval.symtable[sym] = other._asteval.symtable[sym] return out def __iadd__(self, other): """Add/assign Parameters objects.""" self.update(other) return self def __array__(self): """Convert Parameters to array.""" return array([float(k) for k in self.values()]) def __reduce__(self): """Reduce Parameters instance such that it can be pickled.""" # make a list of all the parameters params = [self[k] for k in self] # find the symbols from _asteval.symtable, that need to be remembered. sym_unique = self._asteval.user_defined_symbols() unique_symbols = {key: deepcopy(self._asteval.symtable[key]) for key in sym_unique} return self.__class__, (), {'unique_symbols': unique_symbols, 'params': params} def __setstate__(self, state): """Unpickle a Parameters instance. Parameters ---------- state : dict state['unique_symbols'] is a dictionary containing symbols that need to be injected into _asteval.symtable state['params'] is a list of Parameter instances to be added """ # first update the Interpreter symbol table. This needs to be done # first because Parameter's early in the list may depend on later # Parameter's. This leads to problems because add_many eventually leads # to a Parameter value being retrieved with _getval, which, if the # dependent value hasn't already been added to the symtable, leads to # an Error. Another way of doing this would be to remove all the expr # from the Parameter instances before they get added, then to restore # them. # self._asteval.symtable.update(state['unique_symbols']) symtab = self._asteval.symtable for key, val in state['unique_symbols'].items(): if key not in symtab: if isinstance(val, dict): value = val.get('__name__', None) symname = val.get('__name__', None) importer = val.get('importer', None) if value is None and symname is not None and importer is not None: _mod = importlib.import_module(importer) value = getattr(_mod, symname, None) if value is not None: symtab[key] = value else: symtab[key] = val # then add all the parameters self.add_many(*state['params']) def eval(self, expr): """Evaluate a statement using the asteval Interpreter. Parameters ---------- expr : string An expression containing parameter names and other symbols recognizable by the asteval Interpreter. Returns ------- The result of the expression. """ return self._asteval.eval(expr) def update_constraints(self): """Update all constrained parameters, checking that dependencies are evaluated as needed.""" requires_update = {name for name, par in self.items() if par._expr is not None} updated_tracker = set(requires_update) def _update_param(name): """Update a parameter value, including setting bounds. For a constrained parameter (one with an `expr` defined), this first updates (recursively) all parameters on which the parameter depends (using the 'deps' field). """ par = self.__getitem__(name) if par._expr_eval is None: par._expr_eval = self._asteval for dep in par._expr_deps: if dep in updated_tracker: _update_param(dep) self._asteval.symtable[name] = par.value updated_tracker.discard(name) for name in requires_update: _update_param(name) def pretty_repr(self, oneline=False): """Return a pretty representation of a Parameters class. Parameters ---------- oneline : bool, optional If True prints a one-line parameters representation (default is False). Returns ------- s: str Parameters representation. """ if oneline: return super().__repr__() s = "Parameters({\n" for key in self.keys(): s += " '%s': %s, \n" % (key, self[key]) s += " })\n" return s def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g', columns=['value', 'min', 'max', 'stderr', 'vary', 'expr', 'brute_step']): """Pretty-print of parameters data. Parameters ---------- oneline : bool, optional If True prints a one-line parameters representation (default is False). colwidth : int, optional Column width for all columns specified in :attr:`columns`. precision : int, optional Number of digits to be printed after floating point. fmt : {'g', 'e', 'f'}, optional Single-character numeric formatter. Valid values are: 'f' floating point, 'g' floating point and exponential, or 'e' exponential. columns : :obj:`list` of :obj:`str`, optional List of :class:`Parameter` attribute names to print. """ if oneline: print(self.pretty_repr(oneline=oneline)) return name_len = max(len(s) for s in self) allcols = ['name'] + columns title = '{:{name_len}} ' + len(columns) * ' {:>{n}}' print(title.format(*allcols, name_len=name_len, n=colwidth).title()) numstyle = '{%s:>{n}.{p}{f}}' # format for numeric columns otherstyles = dict(name='{name:<{name_len}} ', stderr='{stderr!s:>{n}}', vary='{vary!s:>{n}}', expr='{expr!s:>{n}}', brute_step='{brute_step!s:>{n}}') line = ' '.join([otherstyles.get(k, numstyle % k) for k in allcols]) for name, values in sorted(self.items()): pvalues = {k: getattr(values, k) for k in columns} pvalues['name'] = name # stderr is a special case: it is either numeric or None (i.e. str) if 'stderr' in columns and pvalues['stderr'] is not None: pvalues['stderr'] = (numstyle % '').format( pvalues['stderr'], n=colwidth, p=precision, f=fmt) elif 'brute_step' in columns and pvalues['brute_step'] is not None: pvalues['brute_step'] = (numstyle % '').format( pvalues['brute_step'], n=colwidth, p=precision, f=fmt) print(line.format(name_len=name_len, n=colwidth, p=precision, f=fmt, **pvalues)) def _repr_html_(self): """Returns a HTML representation of parameters data.""" return params_html_table(self) def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None, brute_step=None): """Add a Parameter. Parameters ---------- name : str Name of parameter. Must match ``[a-z_][a-z0-9_]*`` and cannot be a Python reserved word. value : float, optional Numerical Parameter value, typically the *initial value*. vary : bool, optional Whether the Parameter is varied during a fit (default is True). min : float, optional Lower bound for value (default is `-numpy.inf`, no lower bound). max : float, optional Upper bound for value (default is `numpy.inf`, no upper bound). expr : str, optional Mathematical expression used to constrain the value during the fit. brute_step : float, optional Step size for grid points in the `brute` method. Examples -------- >>> params = Parameters() >>> params.add('xvar', value=0.50, min=0, max=1) >>> params.add('yvar', expr='1.0 - xvar') which is equivalent to: >>> params = Parameters() >>> params['xvar'] = Parameter(name='xvar', value=0.50, min=0, max=1) >>> params['yvar'] = Parameter(name='yvar', expr='1.0 - xvar') """ if isinstance(name, Parameter): self.__setitem__(name.name, name) else: self.__setitem__(name, Parameter(value=value, name=name, vary=vary, min=min, max=max, expr=expr, brute_step=brute_step)) def add_many(self, *parlist): """Add many parameters, using a sequence of tuples. Parameters ---------- parlist : :obj:`sequence` of :obj:`tuple` or :class:`Parameter` A sequence of tuples, or a sequence of `Parameter` instances. If it is a sequence of tuples, then each tuple must contain at least the name. The order in each tuple must be `(name, value, vary, min, max, expr, brute_step)`. Examples -------- >>> params = Parameters() # add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP) >>> params.add_many(('amp', 10, True, None, None, None, None), ... ('cen', 4, True, 0.0, None, None, None), ... ('wid', 1, False, None, None, None, None), ... ('frac', 0.5)) # add a sequence of Parameters >>> f = Parameter('par_f', 100) >>> g = Parameter('par_g', 2.) >>> params.add_many(f, g) """ __params = [] for par in parlist: if not isinstance(par, Parameter): par = Parameter(*par) __params.append(par) par._delay_asteval = True self.__setitem__(par.name, par) for para in __params: para._delay_asteval = False def valuesdict(self): """Return an ordered dictionary of parameter values. Returns ------- OrderedDict An ordered dictionary of :attr:`name`::attr:`value` pairs for each Parameter. """ return OrderedDict((p.name, p.value) for p in self.values()) def dumps(self, **kws): """Represent Parameters as a JSON string. Parameters ---------- **kws : optional Keyword arguments that are passed to `json.dumps()`. Returns ------- str JSON string representation of Parameters. See Also -------- dump(), loads(), load(), json.dumps() """ params = [p.__getstate__() for p in self.values()] sym_unique = self._asteval.user_defined_symbols() unique_symbols = {key: encode4js(deepcopy(self._asteval.symtable[key])) for key in sym_unique} return json.dumps({'unique_symbols': unique_symbols, 'params': params}, **kws) def loads(self, s, **kws): """Load Parameters from a JSON string. Parameters ---------- **kws : optional Keyword arguments that are passed to `json.loads()`. Returns ------- :class:`Parameters` Updated Parameters from the JSON string. Notes ----- Current Parameters will be cleared before loading the data from the JSON string. See Also -------- dump(), dumps(), load(), json.loads() """ self.clear() tmp = decode4js(json.loads(s, **kws)) state = {'unique_symbols': tmp['unique_symbols'], 'params': []} for parstate in tmp['params']: _par = Parameter(name='') _par.__setstate__(parstate) state['params'].append(_par) self.__setstate__(state) return self def dump(self, fp, **kws): """Write JSON representation of Parameters to a file-like object. Parameters ---------- fp : file-like object An open and ``.write()``-supporting file-like object. **kws : optional Keyword arguments that are passed to `dumps()`. Returns ------- None or int Return value from `fp.write()`. None for Python 2.7 and the number of characters written in Python 3. See Also -------- dump(), load(), json.dump() """ return fp.write(self.dumps(**kws)) def load(self, fp, **kws): """Load JSON representation of Parameters from a file-like object. Parameters ---------- fp : file-like object An open and ``.read()``-supporting file-like object. **kws : optional Keyword arguments that are passed to `loads()`. Returns ------- :class:`Parameters` Updated Parameters loaded from `fp`. See Also -------- dump(), loads(), json.load() """ return self.loads(fp.read(), **kws) class Parameter: """A Parameter is an object that can be varied in a fit, or one of the controlling variables in a model. It is a central component of lmfit, and all minimization and modeling methods use Parameter objects. A Parameter has a `name` attribute, and a scalar floating point `value`. It also has a `vary` attribute that describes whether the value should be varied during the minimization. Finite bounds can be placed on the Parameter's value by setting its `min` and/or `max` attributes. A Parameter can also have its value determined by a mathematical expression of other Parameter values held in the `expr` attrribute. Additional attributes include `brute_step` used as the step size in a brute-force minimization, and `user_data` reserved exclusively for user's need. After a minimization, a Parameter may also gain other attributes, including `stderr` holding the estimated standard error in the Parameter's value, and `correl`, a dictionary of correlation values with other Parameters used in the minimization. """ def __init__(self, name, value=None, vary=True, min=-inf, max=inf, expr=None, brute_step=None, user_data=None): """ Parameters ---------- name : str Name of the Parameter. value : float, optional Numerical Parameter value. vary : bool, optional Whether the Parameter is varied during a fit (default is True). min : float, optional Lower bound for value (default is `-numpy.inf`, no lower bound). max : float, optional Upper bound for value (default is `numpy.inf`, no upper bound). expr : str, optional Mathematical expression used to constrain the value during the fit. brute_step : float, optional Step size for grid points in the `brute` method. user_data : optional User-definable extra attribute used for a Parameter. Attributes ---------- stderr : float The estimated standard error for the best-fit value. correl : dict A dictionary of the correlation with the other fitted Parameters of the form:: `{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}` """ self.name = name self.user_data = user_data self.init_value = value self.min = min self.max = max self.brute_step = brute_step self.vary = vary self._expr = expr self._expr_ast = None self._expr_eval = None self._expr_deps = [] self._delay_asteval = False self.stderr = None self.correl = None self.from_internal = lambda val: val self._val = value self._init_bounds() def set(self, value=None, vary=None, min=None, max=None, expr=None, brute_step=None): """Set or update Parameter attributes. Parameters ---------- value : float, optional Numerical Parameter value. vary : bool, optional Whether the Parameter is varied during a fit. min : float, optional Lower bound for value. To remove a lower bound you must use `-numpy.inf`. max : float, optional Upper bound for value. To remove an upper bound you must use `numpy.inf`. expr : str, optional Mathematical expression used to constrain the value during the fit. To remove a constraint you must supply an empty string. brute_step : float, optional Step size for grid points in the `brute` method. To remove the step size you must use ``0``. Notes ----- Each argument to `set()` has a default value of `None`, which will leave the current value for the attribute unchanged. Thus, to lift a lower or upper bound, passing in `None` will not work. Instead, you must set these to `-numpy.inf` or `numpy.inf`, as with:: par.set(min=None) # leaves lower bound unchanged par.set(min=-numpy.inf) # removes lower bound Similarly, to clear an expression, pass a blank string, (not ``None``!) as with:: par.set(expr=None) # leaves expression unchanged par.set(expr='') # removes expression Explicitly setting a value or setting `vary=True` will also clear the expression. Finally, to clear the brute_step size, pass ``0``, not ``None``:: par.set(brute_step=None) # leaves brute_step unchanged par.set(brute_step=0) # removes brute_step """ if value is not None: self.value = value self.__set_expression('') if vary is not None: self.vary = vary if vary: self.__set_expression('') if min is not None: self.min = min if max is not None: self.max = max if expr is not None: self.__set_expression(expr) if brute_step is not None: if brute_step == 0.0: self.brute_step = None else: self.brute_step = brute_step def _init_bounds(self): """Make sure initial bounds are self-consistent.""" # _val is None means - infinity. if self.max is None: self.max = inf if self.min is None: self.min = -inf if self._val is None: self._val = -inf if self.min > self.max: self.min, self.max = self.max, self.min if isclose(self.min, self.max, atol=1e-13, rtol=1e-13): raise ValueError("Parameter '%s' has min == max" % self.name) if self._val > self.max: self._val = self.max if self._val < self.min: self._val = self.min self.setup_bounds() def __getstate__(self): """Get state for pickle.""" return (self.name, self.value, self.vary, self.expr, self.min, self.max, self.brute_step, self.stderr, self.correl, self.init_value, self.user_data) def __setstate__(self, state): """Set state for pickle.""" (self.name, _value, self.vary, self.expr, self.min, self.max, self.brute_step, self.stderr, self.correl, self.init_value, self.user_data) = state self._expr_ast = None self._expr_eval = None self._expr_deps = [] self._delay_asteval = False self.value = _value self._init_bounds() def __repr__(self): """Return printable representation of a Parameter object.""" s = [] sval = "value=%s" % repr(self._getval()) if not self.vary and self._expr is None: sval += " (fixed)" elif self.stderr is not None: sval += " +/- %.3g" % self.stderr s.append(sval) s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max))) if self._expr is not None: s.append("expr='%s'" % self.expr) if self.brute_step is not None: s.append("brute_step=%s" % (self.brute_step)) return "" % (self.name, ', '.join(s)) def setup_bounds(self): """Set up Minuit-style internal/external parameter transformation of min/max bounds. As a side-effect, this also defines the self.from_internal method used to re-calculate self.value from the internal value, applying the inverse Minuit-style transformation. This method should be called prior to passing a Parameter to the user-defined objective function. This code borrows heavily from JJ Helmus' leastsqbound.py Returns ------- _val : float The internal value for parameter from self.value (which holds the external, user-expected value). This internal value should actually be used in a fit. """ if self.min is None: self.min = -inf if self.max is None: self.max = inf if self.min == -inf and self.max == inf: self.from_internal = lambda val: val _val = self._val elif self.max == inf: self.from_internal = lambda val: self.min - 1.0 + sqrt(val*val + 1) _val = sqrt((self._val - self.min + 1.0)**2 - 1) elif self.min == -inf: self.from_internal = lambda val: self.max + 1 - sqrt(val*val + 1) _val = sqrt((self.max - self._val + 1.0)**2 - 1) else: self.from_internal = lambda val: self.min + (sin(val) + 1) * \ (self.max - self.min) / 2.0 _val = arcsin(2*(self._val - self.min)/(self.max - self.min) - 1) return _val def scale_gradient(self, val): """Return scaling factor for gradient. Parameters ---------- val: float Numerical Parameter value. Returns ------- float Scaling factor for gradient the according to Minuit-style transformation. """ if self.min == -inf and self.max == inf: return 1.0 elif self.max == inf: return val / sqrt(val*val + 1) elif self.min == -inf: return -val / sqrt(val*val + 1) return cos(val) * (self.max - self.min) / 2.0 def _getval(self): """Get value, with bounds applied.""" # Note assignment to self._val has been changed to self.value # The self.value property setter makes sure that the # _expr_eval.symtable is kept up-to-date. # If you just assign to self._val then _expr_eval.symtable[self.name] # becomes stale if parameter.expr is not None. if (isinstance(self._val, uncertainties.core.Variable) and self._val is not nan): msg = ("Please make sure that the Parameter value is a number, " "not an instance of 'uncertainties.core.Variable'. This " "automatic conversion will be removed in the next release.") warnings.warn(FutureWarning(msg)) try: self.value = self._val.nominal_value except AttributeError: pass if self._expr is not None: if self._expr_ast is None: self.__set_expression(self._expr) if self._expr_eval is not None: if not self._delay_asteval: self.value = self._expr_eval(self._expr_ast) check_ast_errors(self._expr_eval) return self._val def set_expr_eval(self, evaluator): """Set expression evaluator instance.""" self._expr_eval = evaluator @property def value(self): """Return the numerical value of the Parameter, with bounds applied.""" return self._getval() @value.setter def value(self, val): """Set the numerical Parameter value.""" self._val = val if self._val is not None: if self._val > self.max: self._val = self.max elif self._val < self.min: self._val = self.min if not hasattr(self, '_expr_eval'): self._expr_eval = None if self._expr_eval is not None: self._expr_eval.symtable[self.name] = self._val @property def expr(self): """Return the mathematical expression used to constrain the value during the fit.""" return self._expr @expr.setter def expr(self, val): """Set the mathematical expression used to constrain the value during the fit. To remove a constraint you must supply an empty string. """ self.__set_expression(val) def __set_expression(self, val): if val == '': val = None self._expr = val if val is not None: self.vary = False if not hasattr(self, '_expr_eval'): self._expr_eval = None if val is None: self._expr_ast = None if val is not None and self._expr_eval is not None: self._expr_eval.error = [] self._expr_eval.error_msg = None self._expr_ast = self._expr_eval.parse(val) check_ast_errors(self._expr_eval) self._expr_deps = get_ast_names(self._expr_ast) def __array__(self): """array""" return array(float(self._getval())) def __str__(self): """string""" return self.__repr__() def __abs__(self): """abs""" return abs(self._getval()) def __neg__(self): """neg""" return -self._getval() def __pos__(self): """positive""" return +self._getval() def __bool__(self): """bool""" return self._getval() != 0 def __int__(self): """int""" return int(self._getval()) def __float__(self): """float""" return float(self._getval()) def __trunc__(self): """trunc""" return self._getval().__trunc__() def __add__(self, other): """+""" return self._getval() + other def __sub__(self, other): """-""" return self._getval() - other def __truediv__(self, other): """/""" return self._getval() / other def __floordiv__(self, other): """//""" return self._getval() // other def __divmod__(self, other): """divmod""" return divmod(self._getval(), other) def __mod__(self, other): """%""" return self._getval() % other def __mul__(self, other): """*""" return self._getval() * other def __pow__(self, other): """**""" return self._getval() ** other def __gt__(self, other): """>""" return self._getval() > other def __ge__(self, other): """>=""" return self._getval() >= other def __le__(self, other): """<=""" return self._getval() <= other def __lt__(self, other): """<""" return self._getval() < other def __eq__(self, other): """==""" return self._getval() == other def __ne__(self, other): """!=""" return self._getval() != other def __radd__(self, other): """+ (right)""" return other + self._getval() def __rtruediv__(self, other): """/ (right)""" return other / self._getval() def __rdivmod__(self, other): """divmod (right)""" return divmod(other, self._getval()) def __rfloordiv__(self, other): """// (right)""" return other // self._getval() def __rmod__(self, other): """% (right)""" return other % self._getval() def __rmul__(self, other): """* (right)""" return other * self._getval() def __rpow__(self, other): """** (right)""" return other ** self._getval() def __rsub__(self, other): """- (right)""" return other - self._getval() def isParameter(x): """Test for Parameter-ness.""" msg = 'The isParameter function will be removed in the next release.' warnings.warn(FutureWarning(msg)) return (isinstance(x, Parameter) or x.__class__.__name__ == 'Parameter') lmfit-py-1.0.0/lmfit/printfuncs.py000066400000000000000000000316121357751001700171420ustar00rootroot00000000000000"""Functions to display fitting results and confidence intervals.""" from math import log10 import re import warnings import numpy as np try: import numdifftools # noqa: F401 HAS_NUMDIFFTOOLS = True except ImportError: HAS_NUMDIFFTOOLS = False def alphanumeric_sort(s, _nsre=re.compile('([0-9]+)')): """Sort alphanumeric string.""" return [int(text) if text.isdigit() else text.lower() for text in re.split(_nsre, s)] def getfloat_attr(obj, attr, length=11): """Format an attribute of an object for printing.""" val = getattr(obj, attr, None) if val is None: return 'unknown' elif isinstance(val, int): return '%d' % val elif isinstance(val, float): return gformat(val, length=length).strip() return repr(val) def gformat(val, length=11): """Format a number with '%g'-like format. Except that: a) the length of the output string will be of the requested length. b) positive numbers will have a leading blank. b) the precision will be as high as possible. c) trailing zeros will not be trimmed. The precision will typically be length-7. Parameters ---------- val : float Value to be formatted. length : int, optional Length of output string (default is 11). Returns ------- str String of specified length. Notes ------ Positive values will have leading blank. """ try: expon = int(log10(abs(val))) except (OverflowError, ValueError): expon = 0 length = max(length, 7) form = 'e' prec = length - 7 if abs(expon) > 99: prec -= 1 elif ((expon > 0 and expon < (prec+4)) or (expon <= 0 and -expon < (prec-1))): form = 'f' prec += 4 if expon > 0: prec -= expon fmt = '{0: %i.%i%s}' % (length, prec, form) return fmt.format(val)[:length] CORREL_HEAD = '[[Correlations]] (unreported correlations are < %.3f)' def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1, sort_pars=False): """Generate a report of the fitting results. The report contains the best-fit values for the parameters and their uncertainties and correlations. Parameters ---------- inpars : Parameters Input Parameters from fit or MinimizerResult returned from a fit. modelpars : Parameters, optional Known Model Parameters. show_correl : bool, optional Whether to show list of sorted correlations (default is True). min_correl : float, optional Smallest correlation in absolute value to show (default is 0.1). sort_pars : bool or callable, optional Whether to show parameter names sorted in alphanumerical order. If False (default), then the parameters will be listed in the order they were added to the Parameters dictionary. If callable, then this (one argument) function is used to extract a comparison key from each list element. Returns ------- string Multi-line text of fit report. """ from .parameter import Parameters if isinstance(inpars, Parameters): result, params = None, inpars if hasattr(inpars, 'params'): result = inpars params = inpars.params if sort_pars: if callable(sort_pars): key = sort_pars else: key = alphanumeric_sort parnames = sorted(params, key=key) else: # dict.keys() returns a KeysView in py3, and they're indexed # further down parnames = list(params.keys()) buff = [] add = buff.append namelen = max([len(n) for n in parnames]) if result is not None: add("[[Fit Statistics]]") add(" # fitting method = %s" % (result.method)) add(" # function evals = %s" % getfloat_attr(result, 'nfev')) add(" # data points = %s" % getfloat_attr(result, 'ndata')) add(" # variables = %s" % getfloat_attr(result, 'nvarys')) add(" chi-square = %s" % getfloat_attr(result, 'chisqr')) add(" reduced chi-square = %s" % getfloat_attr(result, 'redchi')) add(" Akaike info crit = %s" % getfloat_attr(result, 'aic')) add(" Bayesian info crit = %s" % getfloat_attr(result, 'bic')) if not result.errorbars: add("## Warning: uncertainties could not be estimated:") if result.method in ('leastsq', 'least_squares') or HAS_NUMDIFFTOOLS: parnames_varying = [par for par in result.params if result.params[par].vary] for name in parnames_varying: par = params[name] space = ' '*(namelen-len(name)) if par.init_value and np.allclose(par.value, par.init_value): add(' %s:%s at initial value' % (name, space)) if (np.allclose(par.value, par.min) or np.allclose(par.value, par.max)): add(' %s:%s at boundary' % (name, space)) else: add(" this fitting method does not natively calculate uncertainties") add(" and numdifftools is not installed for lmfit to do this. Use") add(" `pip install numdifftools` for lmfit to estimate uncertainties") add(" with this fitting method.") add("[[Variables]]") for name in parnames: par = params[name] space = ' '*(namelen-len(name)) nout = "%s:%s" % (name, space) inval = '(init = ?)' if par.init_value is not None: inval = '(init = %.7g)' % par.init_value if modelpars is not None and name in modelpars: inval = '%s, model_value = %.7g' % (inval, modelpars[name].value) try: sval = gformat(par.value) except (TypeError, ValueError): sval = ' Non Numeric Value?' if par.stderr is not None: serr = gformat(par.stderr) try: spercent = '({:.2%})'.format(abs(par.stderr/par.value)) except ZeroDivisionError: spercent = '' sval = '%s +/-%s %s' % (sval, serr, spercent) if par.vary: add(" %s %s %s" % (nout, sval, inval)) elif par.expr is not None: add(" %s %s == '%s'" % (nout, sval, par.expr)) else: add(" %s % .7g (fixed)" % (nout, par.value)) if show_correl: correls = {} for i, name in enumerate(parnames): par = params[name] if not par.vary: continue if hasattr(par, 'correl') and par.correl is not None: for name2 in parnames[i+1:]: if (name != name2 and name2 in par.correl and abs(par.correl[name2]) > min_correl): correls["%s, %s" % (name, name2)] = par.correl[name2] sort_correl = sorted(correls.items(), key=lambda it: abs(it[1])) sort_correl.reverse() if len(sort_correl) > 0: add(CORREL_HEAD % min_correl) maxlen = max([len(k) for k in list(correls.keys())]) for name, val in sort_correl: lspace = max(0, maxlen - len(name)) add(' C(%s)%s = % .3f' % (name, (' '*30)[:lspace], val)) return '\n'.join(buff) def fitreport_html_table(result, show_correl=True, min_correl=0.1): """Generate a report of the fitting result as an HTML table.""" html = [] add = html.append def stat_row(label, val, val2=''): add('%s%s%s' % (label, val, val2)) add('

    Fit Statistics

    ') add('') stat_row('fitting method', result.method) stat_row('# function evals', result.nfev) stat_row('# data points', result.ndata) stat_row('# variables', result.nvarys) stat_row('chi-square', gformat(result.chisqr)) stat_row('reduced chi-square', gformat(result.redchi)) stat_row('Akaike info crit.', gformat(result.aic)) stat_row('Bayesian info crit.', gformat(result.bic)) add('
    ') add('

    Variables

    ') add(result.params._repr_html_()) if show_correl: correls = [] parnames = list(result.params.keys()) for i, name in enumerate(result.params): par = result.params[name] if not par.vary: continue if hasattr(par, 'correl') and par.correl is not None: for name2 in parnames[i+1:]: if (name != name2 and name2 in par.correl and abs(par.correl[name2]) > min_correl): correls.append((name, name2, par.correl[name2])) if len(correls) > 0: sort_correls = sorted(correls, key=lambda val: abs(val[2])) sort_correls.reverse() extra = '(unreported correlations are < %.3f)' % (min_correl) add('

    Correlations %s

    ' % extra) add('') for name1, name2, val in sort_correls: stat_row(name1, name2, "%.4f" % val) add('
    ') return ''.join(html) def params_html_table(params): """Return an HTML representation of Parameters.""" has_err = any([p.stderr is not None for p in params.values()]) has_expr = any([p.expr is not None for p in params.values()]) has_brute = any([p.brute_step is not None for p in params.values()]) html = [] add = html.append def cell(x, cat='td'): return add('<%s> %s ' % (cat, x, cat)) add('') headers = ['name', 'value'] if has_err: headers.extend(['standard error', 'relative error']) headers.extend(['initial value', 'min', 'max', 'vary']) if has_expr: headers.append('expression') if has_brute: headers.append('brute step') for h in headers: cell(h, cat='th') add('') for par in params.values(): rows = [par.name, gformat(par.value)] if has_err: serr = '' if par.stderr is not None: serr = gformat(par.stderr) try: spercent = '({:.2%})'.format(abs(par.stderr/par.value)) except ZeroDivisionError: spercent = '' rows.extend([serr, spercent]) rows.extend((par.init_value, gformat(par.min), gformat(par.max), '%s' % par.vary)) if has_expr: expr = '' if par.expr is not None: expr = par.expr rows.append(expr) if has_brute: brute_step = 'None' if par.brute_step is not None: brute_step = gformat(par.brute_step) rows.append(brute_step) add('') for r in rows: cell(r) add('') add('
    ') return ''.join(html) def report_errors(params, **kws): """Print a report for fitted params: see error_report().""" warnings.warn("The function 'report_errors' is deprecated as of lmfit " "0.9.14 and will be removed in the next release. Please " "use 'report_fit' instead.", DeprecationWarning) print(fit_report(params, **kws)) def report_fit(params, **kws): """Print a report of the fitting results.""" print(fit_report(params, **kws)) def ci_report(ci, with_offset=True, ndigits=5): """Return text of a report for confidence intervals. Parameters ---------- with_offset : bool, optional Whether to subtract best value from all other values (default is True). ndigits : int, optional Number of significant digits to show (default is 5). Returns ------- str Text of formatted report on confidence intervals. """ maxlen = max([len(i) for i in ci]) buff = [] add = buff.append def convp(x): """Convert probabilities into header for CI report.""" if abs(x[0]) < 1.e-2: return "_BEST_" return "%.2f%%" % (x[0]*100) title_shown = False fmt_best = fmt_diff = "{0:.%if}" % ndigits if with_offset: fmt_diff = "{0:+.%if}" % ndigits for name, row in ci.items(): if not title_shown: add("".join([''.rjust(maxlen+1)] + [i.rjust(ndigits+5) for i in map(convp, row)])) title_shown = True thisrow = [" %s:" % name.ljust(maxlen)] offset = 0.0 if with_offset: for cval, val in row: if abs(cval) < 1.e-2: offset = val for cval, val in row: if cval < 1.e-2: sval = fmt_best.format(val) else: sval = fmt_diff.format(val-offset) thisrow.append(sval.rjust(ndigits+5)) add("".join(thisrow)) return '\n'.join(buff) def report_ci(ci): """Print a report for confidence intervals.""" print(ci_report(ci)) lmfit-py-1.0.0/lmfit/ui/000077500000000000000000000000001357751001700150075ustar00rootroot00000000000000lmfit-py-1.0.0/lmfit/ui/__init__.py000066400000000000000000000030541357751001700171220ustar00rootroot00000000000000# These variables are used at the end of the module to decide # which BaseFitter subclass the Fitter will point to. import warnings from .basefitter import BaseFitter has_ipython, has_matplotlib = False, False try: import matplotlib except ImportError: pass else: has_matplotlib = True try: import IPython except ImportError: warnings.warn("lmfit.Fitter will use basic mode, not IPython: need matplotlib") else: _ipy_msg1 = "lmfit.Fitter will use basic mode, not IPython: need IPython2." _ipy_msg2 = "lmfit.Fitter will use basic mode, not IPython: could not get IPython version" _ipy_msg3 = "lmfit.Fitter will use basic mode, not IPython: need ipywidgets." try: major_version = IPython.release.version_info[0] if major_version < 2: warnings.warn(_ipy_msg1) elif major_version > 3: # After IPython 3, widgets were moved to a separate package. # There is a shim to allow the old import, but the package has to be # installed for that to work. try: import ipywidgets except ImportError: warnings.warn(_ipy_msg3) else: # has_ipython = iPython installed and we are in an IPython session. has_ipython = IPython.get_ipython() is not None except Exception as e: warnings.warn(_ipy_msg2) Fitter = BaseFitter if has_matplotlib: from .basefitter import MPLFitter Fitter = MPLFitter if has_ipython: from .ipy_fitter import NotebookFitter Fitter = NotebookFitter lmfit-py-1.0.0/lmfit/ui/basefitter.py000066400000000000000000000300711357751001700175120ustar00rootroot00000000000000import warnings # noqa: F401 from asteval import Interpreter from asteval.astutils import NameFinder import numpy as np # noqa: F401 from ..model import Model # noqa: F401 from ..models import ExponentialModel # arbitrary default from ..parameter import check_ast_errors _COMMON_DOC = """ This an interactive container for fitting models to particular data. It maintains the attributes `current_params` and `current_result`. When its fit() method is called, the best fit becomes the new `current_params`. The most basic usage is iteratively fitting data, taking advantage of this stateful memory that keep the parameters between each fit. """ _COMMON_EXAMPLES_DOC = """ Examples -------- >>> fitter = Fitter(data, model=SomeModel, x=x) >>> fitter.model # This property can be changed, to try different models on the same # data with the same independent vars. # (This is especially handy in the notebook.) >>> fitter.current_params # This copy of the model's Parameters is updated after each fit. >>> fitter.fit() # Perform a fit using fitter.current_params as a guess. # Optionally, pass a params argument or individual keyword arguments # to override current_params. >>> fitter.current_result # This is the result of the latest fit. It contain the usual # copies of the Parameters, in the attributes params and init_params. >>> fitter.data = new_data # If this property is updated, the `current_params` are retained an used # as an initial guess if fit() is called again. """ class BaseFitter: __doc__ = _COMMON_DOC + """ Parameters ---------- data : array-like model : lmfit.Model optional initial Model to use, maybe be set or changed later """ + _COMMON_EXAMPLES_DOC def __init__(self, data, model=None, **kwargs): self._data = data self.kwargs = kwargs # GUI-based subclasses need a default value for the menu of models, # and so an arbitrary default is applied here, for uniformity # among the subclasses. if model is None: model = ExponentialModel self.model = model def _on_model_value_change(self, name, value): self.model = value def _on_fit_button_click(self, b): self.fit() def _on_guess_button_click(self, b): self.guess() @property def data(self): return self._data @data.setter def data(self, value): self._data = value @property def model(self): return self._model @model.setter def model(self, value): if callable(value): model = value() else: model = value self._model = model self.current_result = None self._current_params = model.make_params() # Use these to evaluate any Parameters that use expressions. self.asteval = Interpreter() self.namefinder = NameFinder() self._finalize_model(value) self.guess() def _finalize_model(self, value): # subclasses optionally override to update display here pass @property def current_params(self): """Each time fit() is called, these will be updated to reflect the latest best params. They will be used as the initial guess for the next fit, unless overridden by arguments to fit().""" return self._current_params @current_params.setter def current_params(self, new_params): # Copy contents, but retain original params objects. for name, par in new_params.items(): self._current_params[name].value = par.value self._current_params[name].expr = par.expr self._current_params[name].vary = par.vary self._current_params[name].min = par.min self._current_params[name].max = par.max # Compute values for expression-based Parameters. self.__assign_deps(self._current_params) for _, par in self._current_params.items(): if par.value is None: self.__update_paramval(self._current_params, par.name) self._finalize_params() def _finalize_params(self): # subclasses can override this to pass params to display pass def guess(self): count_indep_vars = len(self.model.independent_vars) guessing_successful = True try: if count_indep_vars == 0: guess = self.model.guess(self._data) elif count_indep_vars == 1: key = self.model.independent_vars[0] val = self.kwargs[key] d = {key: val} guess = self.model.guess(self._data, **d) self.current_params = guess except NotImplementedError: guessing_successful = False return guessing_successful def __assign_deps(self, params): # N.B. This does not use self.current_params but rather # new Parameters that are being built by self.guess(). for name, par in params.items(): if par.expr is not None: par.ast = self.asteval.parse(par.expr) check_ast_errors(self.asteval.error) par.deps = [] self.namefinder.names = [] self.namefinder.generic_visit(par.ast) for symname in self.namefinder.names: if (symname in self.current_params and symname not in par.deps): par.deps.append(symname) self.asteval.symtable[name] = par.value if par.name is None: par.name = name def __update_paramval(self, params, name): # N.B. This does not use self.current_params but rather # new Parameters that are being built by self.guess(). par = params[name] if getattr(par, 'expr', None) is not None: if getattr(par, 'ast', None) is None: par.ast = self.asteval.parse(par.expr) if par.deps is not None: for dep in par.deps: self.__update_paramval(params, dep) par.value = self.asteval.run(par.ast) out = check_ast_errors(self.asteval.error) if out is not None: self.asteval.raise_exception(None) self.asteval.symtable[name] = par.value def fit(self, *args, **kwargs): "Use current_params unless overridden by arguments passed here." guess = dict(self.current_params) guess.update(self.kwargs) # from __init__, e.g. x=x guess.update(kwargs) self.current_result = self.model.fit(self._data, *args, **guess) self.current_params = self.current_result.params class MPLFitter(BaseFitter): # This is a small elaboration on BaseModel; it adds a plot() # method that depends on matplotlib. It adds several plot- # styling arguments to the signature. __doc__ = _COMMON_DOC + """ Parameters ---------- data : array-like model : lmfit.Model optional initial Model to use, maybe be set or changed later Additional Parameters --------------------- axes_style : dictionary representing style keyword arguments to be passed through to `Axes.set(...)` data_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the data points init_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the initial fit line best_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the best fit line **kwargs : independent variables or extra arguments, passed like `x=x` """ + _COMMON_EXAMPLES_DOC def __init__(self, data, model=None, axes_style={}, data_style={}, init_style={}, best_style={}, **kwargs): self.axes_style = axes_style self.data_style = data_style self.init_style = init_style self.best_style = best_style super().__init__(data, model, **kwargs) def plot(self, axes_style={}, data_style={}, init_style={}, best_style={}, ax=None): """Plot data, initial guess fit, and best fit. Optional style arguments pass keyword dictionaries through to their respective components of the matplotlib plot. Precedence is: 1. arguments passed to this function, plot() 2. arguments passed to the Fitter when it was first declared 3. hard-coded defaults Parameters --------------------- axes_style : dictionary representing style keyword arguments to be passed through to `Axes.set(...)` data_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the data points init_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the initial fit line best_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the best fit line ax : matplotlib.Axes optional `Axes` object. Axes will be generated if not provided. """ try: import matplotlib.pyplot as plt except ImportError: raise ImportError("Matplotlib is required to use this Fitter. " "Use BaseFitter or a subclass thereof " "that does not depend on matplotlib.") # Configure style _axes_style = {} # none, but this is here for possible future use _axes_style.update(self.axes_style) _axes_style.update(axes_style) _data_style = dict(color='blue', marker='o', linestyle='none') _data_style.update(**_normalize_kwargs(self.data_style, 'line2d')) _data_style.update(**_normalize_kwargs(data_style, 'line2d')) _init_style = dict(color='gray') _init_style.update(**_normalize_kwargs(self.init_style, 'line2d')) _init_style.update(**_normalize_kwargs(init_style, 'line2d')) _best_style = dict(color='red') _best_style.update(**_normalize_kwargs(self.best_style, 'line2d')) _best_style.update(**_normalize_kwargs(best_style, 'line2d')) if ax is None: fig, ax = plt.subplots() count_indep_vars = len(self.model.independent_vars) if count_indep_vars == 0: ax.plot(self._data, **_data_style) elif count_indep_vars == 1: indep_var = self.kwargs[self.model.independent_vars[0]] ax.plot(indep_var, self._data, **_data_style) else: raise NotImplementedError("Cannot plot models with more than one " "indepedent variable.") result = self.current_result # alias for brevity if not result: ax.set(**_axes_style) return # short-circuit the rest of the plotting if count_indep_vars == 0: ax.plot(result.init_fit, **_init_style) ax.plot(result.best_fit, **_best_style) elif count_indep_vars == 1: ax.plot(indep_var, result.init_fit, **_init_style) ax.plot(indep_var, result.best_fit, **_best_style) ax.set(**_axes_style) def _normalize_kwargs(kwargs, kind='patch'): """Convert matplotlib keywords from short to long form.""" # Source: # github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py if kind == 'line2d': long_names = dict(c='color', ls='linestyle', lw='linewidth', mec='markeredgecolor', mew='markeredgewidth', mfc='markerfacecolor', ms='markersize',) elif kind == 'patch': long_names = dict(c='color', ls='linestyle', lw='linewidth', ec='edgecolor', fc='facecolor',) for short_name in long_names: if short_name in kwargs: kwargs[long_names[short_name]] = kwargs.pop(short_name) return kwargs lmfit-py-1.0.0/lmfit/ui/ipy_fitter.py000066400000000000000000000242231357751001700175420ustar00rootroot00000000000000import IPython from IPython.display import clear_output, display import numpy as np from ..model import Model from .basefitter import _COMMON_DOC, _COMMON_EXAMPLES_DOC, MPLFitter # Note: If IPython is not available of the version is < 2, # this module will not be imported, and a different Fitter. # Widgets were only experimental in IPython 2.x, but this does work there. # Handle the change in naming from 2.x to 3.x. IPY2 = IPython.release.version_info[0] == 2 IPY3 = IPython.release.version_info[0] == 3 if IPY2: from IPython.html.widgets import DropdownWidget as Dropdown from IPython.html.widgets import ButtonWidget as Button from IPython.html.widgets import ContainerWidget from IPython.html.widgets import FloatTextWidget as FloatText from IPython.html.widgets import CheckboxWidget as Checkbox class HBox(ContainerWidget): def __init__(self, *args, **kwargs): self.add_class('hbox') super(self, ContainerWidget).__init__(*args, **kwargs) elif IPY3: # as of IPython 3.x: from IPython.html.widgets import Dropdown from IPython.html.widgets import Button from IPython.html.widgets import HBox from IPython.html.widgets import FloatText from IPython.html.widgets import Checkbox else: # as of IPython 4.x+: from ipywidgets import Dropdown from ipywidgets import Button from ipywidgets import HBox from ipywidgets import FloatText from ipywidgets import Checkbox class ParameterWidgetGroup: """Construct several widgets that together represent a Parameter. This will only be used if IPython is available.""" def __init__(self, par): self.par = par # Define widgets. self.value_text = FloatText(description=par.name, min=self.par.min, max=self.par.max) self.value_text.width = 100 self.min_text = FloatText(description='min', max=self.par.max) self.min_text.width = 100 self.max_text = FloatText(description='max', min=self.par.min) self.max_text.width = 100 self.min_checkbox = Checkbox(description='min') self.max_checkbox = Checkbox(description='max') self.vary_checkbox = Checkbox(description='vary') # Set widget values and visibility. if par.value is not None: self.value_text.value = self.par.value min_unset = self.par.min is None or self.par.min == -np.inf max_unset = self.par.max is None or self.par.max == np.inf self.min_checkbox.value = not min_unset self.min_text.visible = not min_unset self.min_text.value = self.par.min self.max_checkbox.value = not max_unset self.max_text.visible = not max_unset self.max_text.value = self.par.max self.vary_checkbox.value = self.par.vary # Configure widgets to sync with par attributes. self.value_text.on_trait_change(self._on_value_change, 'value') self.min_text.on_trait_change(self._on_min_value_change, 'value') self.max_text.on_trait_change(self._on_max_value_change, 'value') self.min_checkbox.on_trait_change(self._on_min_checkbox_change, 'value') self.max_checkbox.on_trait_change(self._on_max_checkbox_change, 'value') self.vary_checkbox.on_trait_change(self._on_vary_change, 'value') def _on_value_change(self, name, value): self.par.value = value def _on_min_checkbox_change(self, name, value): self.min_text.visible = value if value: # -np.inf does not play well with a numerical text field, # so set min to -1 if activated (and back to -inf if deactivated). self.min_text.value = -1 self.par.min = self.min_text.value self.value_text.min = self.min_text.value else: self.par.min = None def _on_max_checkbox_change(self, name, value): self.max_text.visible = value if value: # np.inf does not play well with a numerical text field, # so set max to 1 if activated (and back to inf if deactivated). self.max_text.value = 1 self.par.max = self.max_text.value self.value_text.max = self.max_text.value else: self.par.max = None def _on_min_value_change(self, name, value): self.par.min = value self.value_text.min = value self.max_text.min = value def _on_max_value_change(self, name, value): self.par.max = value self.value_text.max = value self.min_text.max = value def _on_vary_change(self, name, value): self.par.vary = value # self.value_text.disabled = not value def close(self): # one convenience method to close (i.e., hide and disconnect) all # widgets in this group self.value_text.close() self.min_text.close() self.max_text.close() self.vary_checkbox.close() self.min_checkbox.close() self.max_checkbox.close() def _repr_html_(self): box = HBox() box.children = [self.value_text, self.vary_checkbox, self.min_checkbox, self.min_text, self.max_checkbox, self.max_text] display(box) # Make it easy to set the widget attributes directly. @property def value(self): return self.value_text.value @value.setter def value(self, value): self.value_text.value = value @property def vary(self): return self.vary_checkbox.value @vary.setter def vary(self, value): self.vary_checkbox.value = value @property def min(self): return self.min_text.value @min.setter def min(self, value): self.min_text.value = value @property def max(self): return self.max_text.value @max.setter def max(self, value): self.max_text.value = value @property def name(self): return self.par.name class NotebookFitter(MPLFitter): __doc__ = _COMMON_DOC + """ If IPython is available, it uses the IPython notebook's rich display to fit data interactively in a web-based GUI. The Parameters are represented in a web-based form that is kept in sync with `current_params`. All subclasses to Model, including user-defined ones, are shown in a drop-down menu. Clicking the "Fit" button updates a plot, as above, and updates the Parameters in the form to reflect the best fit. Parameters ---------- data : array-like model : lmfit.Model optional initial Model to use, maybe be set or changed later all_models : list optional list of Models to populate drop-down menu, by default all built-in and user-defined subclasses of Model are used Additional Parameters --------------------- axes_style : dictionary representing style keyword arguments to be passed through to `Axes.set(...)` data_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the data points init_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the initial fit line best_style : dictionary representing style keyword arguments to be passed through to the matplotlib `plot()` command the plots the best fit line **kwargs : independent variables or extra arguments, passed like `x=x` """ + _COMMON_EXAMPLES_DOC def __init__(self, data, model=None, all_models=None, axes_style={}, data_style={}, init_style={}, best_style={}, **kwargs): # Dropdown menu of all subclasses of Model, incl. user-defined. self.models_menu = Dropdown() # Dropbox API is very different between IPy 2.x and 3.x. if IPY2: if all_models is None: all_models = {m.__name__: m for m in Model.__subclasses__()} self.models_menu.values = all_models else: if all_models is None: all_models = [(m.__name__, m) for m in Model.__subclasses__()] self.models_menu.options = all_models self.models_menu.on_trait_change(self._on_model_value_change, 'value') # Button to trigger fitting. self.fit_button = Button(description='Fit') self.fit_button.on_click(self._on_fit_button_click) # Button to trigger guessing. self.guess_button = Button(description='Auto-Guess') self.guess_button.on_click(self._on_guess_button_click) # Parameter widgets are not built here. They are (re-)built when # the model is (re-)set. super().__init__(data, model, axes_style, data_style, init_style, best_style, **kwargs) def _repr_html_(self): display(self.models_menu) button_box = HBox() button_box.children = [self.fit_button, self.guess_button] display(button_box) for pw in self.param_widgets: display(pw) self.plot() def guess(self): guessing_successful = super().guess() self.guess_button.disabled = not guessing_successful def _finalize_model(self, value): first_run = not hasattr(self, 'param_widgets') if not first_run: # Remove all Parameter widgets, and replace them with widgets # for the new model. for pw in self.param_widgets: pw.close() self.models_menu.value = value self.param_widgets = [ParameterWidgetGroup(p) for _, p in self._current_params.items()] if not first_run: for pw in self.param_widgets: display(pw) def _finalize_params(self): for pw in self.param_widgets: pw.value = self._current_params[pw.name].value pw.min = self._current_params[pw.name].min pw.max = self._current_params[pw.name].max pw.vary = self._current_params[pw.name].vary def plot(self): clear_output(wait=True) super().plot() def fit(self): super().fit() self.plot() lmfit-py-1.0.0/publish_docs.sh000066400000000000000000000016671357751001700163030ustar00rootroot00000000000000installdir='/www/apache/htdocs/software/python/lmfit' docbuild='doc/_build' cd doc echo '# Making docs' make all cd ../ echo '# Building tarball of docs' mkdir _tmpdoc cp -pr doc/lmfit.pdf _tmpdoc/lmfit.pdf cp -pr doc/_build/html/* _tmpdoc/. cd _tmpdoc tar czf ../../lmfit_docs.tar.gz . cd .. rm -rf _tmpdoc # echo "# Switching to gh-pages branch" git checkout gh-pages if [ $? -ne 0 ] ; then echo ' failed.' exit fi tar xzf ../lmfit_docs.tar.gz . echo "# commit changes to gh-pages branch" git commit -am "changed docs" if [ $? -ne 0 ] ; then echo ' failed.' exit fi echo "# Pushing docs to github" git push echo "# switch back to master branch" git checkout master if [ $? -ne 0 ] ; then echo ' failed.' exit fi # install locally echo "# Installing docs to CARS web pages" cp ../lmfit_docs.tar.gz $installdir/.. cd $installdir if [ $? -ne 0 ] ; then echo ' failed.' exit fi tar xvzf ../lmfit_docs.tar.gz lmfit-py-1.0.0/requirements.txt000066400000000000000000000000741357751001700165440ustar00rootroot00000000000000asteval>=0.9.16 numpy>=1.16 scipy>=1.2 uncertainties>=3.0.1 lmfit-py-1.0.0/setup.cfg000066400000000000000000000013761357751001700151070ustar00rootroot00000000000000[versioneer] vcs = git style = pep440 versionfile_source = lmfit/_version.py versionfile_build = lmfit/_version.py tag_prefix = parentdir_prefix = lmfit- [isort] skip = versioneer.py,lmfit/_version.py,lmfit/__init__.py,doc/conf.py known_third_party = asteval,dill,emcee,IPython,matplotlib,numdifftools,numpy,NISTModels,pandas,pytest,scipy,six,uncertainties known_first_party = lmfit force_sort_within_sections = True [rstcheck] report = warning ignore_substitutions = release ignore_roles = scipydoc,numpydoc ignore_directives = autoclass,autodoc,autofunction,automethod,jupyter-execute [flake8] ignore = E121,E123,E126,E226,W503,W504,E501,E731 exclude = doc/conf.py, versioneer.py, lmfit/__init__.py, lmfit/ui/__init__.py [egg_info] tag_build = tag_date = 0 lmfit-py-1.0.0/setup.py000066400000000000000000000047751357751001700150060ustar00rootroot00000000000000#!/usr/bin/env python from setuptools import setup import versioneer long_desc = """A library for least-squares minimization and data fitting in Python. Built on top of scipy.optimize, lmfit provides a Parameter object which can be set as fixed or free, can have upper and/or lower bounds, or can be written in terms of algebraic constraints of other Parameters. The user writes a function to be minimized as a function of these Parameters, and the scipy.optimize methods are used to find the optimal values for the Parameters. The Levenberg-Marquardt (leastsq) is the default minimization algorithm, and provides estimated standard errors and correlations between varied Parameters. Other minimization methods, including Nelder-Mead's downhill simplex, Powell's method, BFGS, Sequential Least Squares, and others are also supported. Bounds and contraints can be placed on Parameters for all of these methods. In addition, methods for explicitly calculating confidence intervals are provided for exploring minmization problems where the approximation of estimating Parameter uncertainties from the covariance matrix is questionable. """ setup(name='lmfit', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), author='LMFit Development Team', author_email='matt.newville@gmail.com', url='https://lmfit.github.io/lmfit-py/', download_url='https://lmfit.github.io//lmfit-py/', install_requires=['asteval>=0.9.16', 'numpy>=1.16', 'scipy>=1.2', 'uncertainties>=3.0.1'], python_requires='>=3.5', license='BSD-3', description="Least-Squares Minimization with Bounds and Constraints", long_description=long_desc, platforms=['Windows', 'Linux', 'Mac OS X'], classifiers=['Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering', ], keywords='curve-fitting, least-squares minimization', tests_require=['pytest'], package_dir={'lmfit': 'lmfit'}, packages=['lmfit', 'lmfit.ui'], ) lmfit-py-1.0.0/tests/000077500000000000000000000000001357751001700144215ustar00rootroot00000000000000lmfit-py-1.0.0/tests/NISTModels.py000066400000000000000000000133111357751001700167130ustar00rootroot00000000000000import os from numpy import arctan, array, cos, exp, log, sin from lmfit import Parameters thisdir, thisfile = os.path.split(__file__) NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD') def read_params(params): if isinstance(params, Parameters): return [par.value for par in params.values()] else: return params def Bennet5(b, x, y=0): b = read_params(b) return y - b[0] * (b[1]+x)**(-1/b[2]) def BoxBOD(b, x, y=0): b = read_params(b) return y - b[0]*(1-exp(-b[1]*x)) def Chwirut(b, x, y=0): b = read_params(b) return y - exp(-b[0]*x)/(b[1]+b[2]*x) def DanWood(b, x, y=0): b = read_params(b) return y - b[0]*x**b[1] def ENSO(b, x, y=0): b = read_params(b) pi = 3.141592653589793238462643383279 return y - b[0] + (b[1]*cos(2*pi*x/12) + b[2]*sin(2*pi*x/12) + b[4]*cos(2*pi*x/b[3]) + b[5]*sin(2*pi*x/b[3]) + b[7]*cos(2*pi*x/b[6]) + b[8]*sin(2*pi*x/b[6])) def Eckerle4(b, x, y=0): b = read_params(b) return y - (b[0]/b[1]) * exp(-0.5*((x-b[2])/b[1])**2) def Gauss(b, x, y=0): b = read_params(b) return y - b[0]*exp(-b[1]*x) + (b[2]*exp(-(x-b[3])**2 / b[4]**2) + b[5]*exp(-(x-b[6])**2 / b[7]**2)) def Hahn1(b, x, y=0): b = read_params(b) return y - ((b[0]+b[1]*x+b[2]*x**2+b[3]*x**3) / (1+b[4]*x+b[5]*x**2+b[6]*x**3)) def Kirby(b, x, y=0): b = read_params(b) return y - (b[0] + b[1]*x + b[2]*x**2) / (1 + b[3]*x + b[4]*x**2) def Lanczos(b, x, y=0): b = read_params(b) return y - b[0]*exp(-b[1]*x) + b[2]*exp(-b[3]*x) + b[4]*exp(-b[5]*x) def MGH09(b, x, y=0): b = read_params(b) return y - b[0]*(x**2+x*b[1]) / (x**2+x*b[2]+b[3]) def MGH10(b, x, y=0): b = read_params(b) return y - b[0] * exp(b[1]/(x+b[2])) def MGH17(b, x, y=0): b = read_params(b) return y - b[0] + b[1]*exp(-x*b[3]) + b[2]*exp(-x*b[4]) def Misra1a(b, x, y=0): b = read_params(b) return y - b[0]*(1-exp(-b[1]*x)) def Misra1b(b, x, y=0): b = read_params(b) return y - b[0] * (1-(1+b[1]*x/2)**(-2)) def Misra1c(b, x, y=0): b = read_params(b) return y - b[0] * (1-(1+2*b[1]*x)**(-.5)) def Misra1d(b, x, y=0): b = read_params(b) return y - b[0]*b[1]*x*((1+b[1]*x)**(-1)) def Nelson(b, x, y=None): b = read_params(b) x1 = x[:, 0] x2 = x[:, 1] if y is None: return - exp(b[0] - b[1]*x1 * exp(-b[2]*x2)) return log(y) - (b[0] - b[1]*x1 * exp(-b[2]*x2)) def Rat42(b, x, y=0): b = read_params(b) return y - b[0] / (1+exp(b[1]-b[2]*x)) def Rat43(b, x, y=0): b = read_params(b) return y - b[0] / ((1+exp(b[1]-b[2]*x))**(1/b[3])) def Roszman1(b, x, y=0): b = read_params(b) pi = 3.141592653589793238462643383279 return y - b[0] - b[1]*x - arctan(b[2]/(x-b[3]))/pi def Thurber(b, x, y=0): b = read_params(b) return y - ((b[0] + b[1]*x + b[2]*x**2 + b[3]*x**3) / (1 + b[4]*x + b[5]*x**2 + b[6]*x**3)) # Model name fcn, #fitting params, dim of x Models = {'Bennett5': (Bennet5, 3, 1), 'BoxBOD': (BoxBOD, 2, 1), 'Chwirut1': (Chwirut, 3, 1), 'Chwirut2': (Chwirut, 3, 1), 'DanWood': (DanWood, 2, 1), 'ENSO': (ENSO, 9, 1), 'Eckerle4': (Eckerle4, 3, 1), 'Gauss1': (Gauss, 8, 1), 'Gauss2': (Gauss, 8, 1), 'Gauss3': (Gauss, 8, 1), 'Hahn1': (Hahn1, 7, 1), 'Kirby2': (Kirby, 5, 1), 'Lanczos1': (Lanczos, 6, 1), 'Lanczos2': (Lanczos, 6, 1), 'Lanczos3': (Lanczos, 6, 1), 'MGH09': (MGH09, 4, 1), 'MGH10': (MGH10, 3, 1), 'MGH17': (MGH17, 5, 1), 'Misra1a': (Misra1a, 2, 1), 'Misra1b': (Misra1b, 2, 1), 'Misra1c': (Misra1c, 2, 1), 'Misra1d': (Misra1d, 2, 1), 'Nelson': (Nelson, 3, 2), 'Rat42': (Rat42, 3, 1), 'Rat43': (Rat43, 4, 1), 'Roszman1': (Roszman1, 4, 1), 'Thurber': (Thurber, 7, 1)} def ReadNistData(dataset): """NIST STRD data is in a simple, fixed format with line numbers being significant! """ finp = open(os.path.join(NIST_DIR, "%s.dat" % dataset), 'r') lines = [l[:-1] for l in finp.readlines()] finp.close() ModelLines = lines[30:39] ParamLines = lines[40:58] DataLines = lines[60:] words = ModelLines[1].strip().split() nparams = int(words[0]) start1 = [0]*nparams start2 = [0]*nparams certval = [0]*nparams certerr = [0]*nparams for i, text in enumerate(ParamLines[:nparams]): [s1, s2, val, err] = [float(x) for x in text.split('=')[1].split()] start1[i] = s1 start2[i] = s2 certval[i] = val certerr[i] = err for t in ParamLines[nparams:]: t = t.strip() if ':' not in t: continue val = float(t.split(':')[1]) if t.startswith('Residual Sum of Squares'): sum_squares = val elif t.startswith('Residual Standard Deviation'): std_dev = val elif t.startswith('Degrees of Freedom'): nfree = int(val) elif t.startswith('Number of Observations'): ndata = int(val) y, x = [], [] for d in DataLines: vals = [float(i) for i in d.strip().split()] y.append(vals[0]) if len(vals) > 2: x.append(vals[1:]) else: x.append(vals[1]) y = array(y) x = array(x) out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata, 'nfree': nfree, 'start1': start1, 'start2': start2, 'sum_squares': sum_squares, 'std_dev': std_dev, 'cert': certval, 'cert_values': certval, 'cert_stderr': certerr} return out lmfit-py-1.0.0/tests/conftest.py000066400000000000000000000015751357751001700166300ustar00rootroot00000000000000import os import numpy as np import pytest import lmfit @pytest.fixture def minimizer_Alpine02(): """Return a lmfit Minimizer object for the Alpine02 function.""" def residual_Alpine02(params): x0 = params['x0'].value x1 = params['x1'].value return np.prod(np.sqrt(x0) * np.sin(x0)) * np.prod(np.sqrt(x1) * np.sin(x1)) # create Parameters and set initial values and bounds pars = lmfit.Parameters() pars.add_many(('x0', 1., True, 0.0, 10.0), ('x1', 1., True, 0.0, 10.0)) mini = lmfit.Minimizer(residual_Alpine02, pars) return mini @pytest.fixture def peakdata(): """Return the peak-like test data.""" data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) return data.T lmfit-py-1.0.0/tests/lmfit_testutils.py000066400000000000000000000016661357751001700202370ustar00rootroot00000000000000from numpy.testing import assert_allclose from lmfit import Parameter def assert_paramval(param, val, tol=1.e-3): """assert that a named parameter's value is close to expected value""" assert(isinstance(param, Parameter)) pval = param.value assert_allclose([pval], [val], rtol=tol, atol=tol, err_msg='', verbose=True) def assert_paramattr(param, attr, val): """assert that a named parameter's value is a value""" assert(isinstance(param, Parameter)) assert(hasattr(param, attr)) assert(getattr(param, attr) == val) def assert_between(val, minval, maxval): """assert that a value is between minval and maxval""" assert(val >= minval) assert(val <= maxval) def assert_param_between(param, minval, maxval): """assert that a named parameter's value is between minval and maxval""" assert(isinstance(param, Parameter)) assert_between(param.value, minval, maxval) lmfit-py-1.0.0/tests/test_1variable.py000066400000000000000000000025431357751001700177040ustar00rootroot00000000000000# test of fitting one variable # From Nick Schurch import numpy from numpy.testing import assert_allclose import lmfit def linear_chisq(params, x, data, errs=None): """Calculates chi-squared residuals for linear model.""" if not isinstance(params, lmfit.parameter.Parameters): msg = "Params argument is not a lmfit parameter set" raise TypeError(msg) if "m" not in params.keys(): msg = "No slope parameter (m) defined in the model" raise KeyError(msg) if "c" not in params.keys(): msg = "No intercept parameter (c) defined in the model" raise KeyError(msg) model = params["m"]*x + params["c"] residuals = (data-model) if errs is not None: residuals = residuals/errs return(residuals) def test_1var(): rands = [-0.21698284, 0.41900591, 0.02349374, -0.218552, -0.3513699, 0.33418304, 0.04226855, 0.213303, 0.45948731, 0.33587736] x = numpy.arange(10)+1 y = numpy.arange(10)+1+rands params = lmfit.Parameters() params.add(name="m", value=1.0, vary=True) params.add(name="c", value=0.0, vary=False) out = lmfit.minimize(linear_chisq, params, args=(x, y)) assert_allclose(params['m'].value, 1.025, rtol=0.02, atol=0.02) assert(len(params) == 2) assert(out.nvarys == 1) assert(out.chisqr > 0.01) assert(out.chisqr < 5.00) lmfit-py-1.0.0/tests/test_NIST_Strd.py000066400000000000000000000153161357751001700176110ustar00rootroot00000000000000import math from optparse import OptionParser from NISTModels import Models, ReadNistData from lmfit import Parameters, minimize def ndig(a, b): """Precision for NIST values.""" return round(-math.log10((abs(abs(a)-abs(b)) + 1.e-15) / abs(b))) ABAR = ' |----------------+----------------+------------------+-------------------|' def Compare_NIST_Results(DataSet, myfit, params, NISTdata): buff = [' ======================================', ' %s: ' % DataSet, ' | Parameter Name | Value Found | Certified Value | # Matching Digits |'] buff.append(ABAR) val_dig_min = 200 err_dig_min = 200 fmt = ' | %s | % -.7e | % -.7e | %2i |' for i in range(NISTdata['nparams']): parname = 'b%i' % (i+1) par = params[parname] thisval = par.value certval = NISTdata['cert_values'][i] vdig = ndig(thisval, certval) pname = (parname + ' value ' + ' '*14)[:14] buff.append(fmt % (pname, thisval, certval, vdig)) val_dig_min = min(val_dig_min, vdig) thiserr = par.stderr certerr = NISTdata['cert_stderr'][i] if thiserr is not None and myfit.errorbars: edig = ndig(thiserr, certerr) ename = (parname + ' stderr' + ' '*14)[:14] buff.append(fmt % (ename, thiserr, certerr, edig)) err_dig_min = min(err_dig_min, edig) buff.append(ABAR) sumsq = NISTdata['sum_squares'] try: chi2 = myfit.chisqr buff.append(' | Sum of Squares | %.7e | %.7e | %2i |' % (chi2, sumsq, ndig(chi2, sumsq))) except Exception: pass buff.append(ABAR) if not myfit.errorbars: buff.append(' | * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * * |') err_dig_min = 0 if err_dig_min < 199: buff.append(' Worst agreement: %i digits for value, %i digits for error ' % (val_dig_min, err_dig_min)) else: buff.append(' Worst agreement: %i digits' % (val_dig_min)) return val_dig_min, '\n'.join(buff) def NIST_Dataset(DataSet, method='leastsq', start='start2', plot=False, verbose=False): NISTdata = ReadNistData(DataSet) resid, npar, dimx = Models[DataSet] y = NISTdata['y'] x = NISTdata['x'] params = Parameters() for i in range(npar): pname = 'b%i' % (i+1) pval1 = NISTdata[start][i] params.add(pname, value=pval1) myfit = minimize(resid, params, method=method, args=(x,), kws={'y': y}) digs, buff = Compare_NIST_Results(DataSet, myfit, myfit.params, NISTdata) if verbose: print(buff) return digs > 1 def build_usage(): modelnames = [] ms = '' for d in sorted(Models.keys()): ms = ms + ' %s ' % d if len(ms) > 55: modelnames.append(ms) ms = ' ' modelnames.append(ms) modelnames = '\n'.join(modelnames) usage = """ === Test Fit to NIST StRD Models === usage: ------ python fit_NIST.py [options] Model Start where Start is one of 'start1','start2' or 'cert', for different starting values, and Model is one of %s if Model = 'all', all models and starting values will be run. options: -------- -m name of fitting method. One of: leastsq, nelder, powell, lbfgsb, bfgs, tnc, cobyla, slsqp, cg, newto-cg leastsq (Levenberg-Marquardt) is the default """ % modelnames return usage ############################ def run_interactive(): usage = build_usage() parser = OptionParser(usage=usage, prog="fit-NIST.py") parser.add_option("-m", "--method", dest="method", metavar='METH', default='leastsq', help="set method name, default = 'leastsq'") (opts, args) = parser.parse_args() dset = '' start = 'start2' if len(args) > 0: dset = args[0] if len(args) > 1: start = args[1] if dset.lower() == 'all': tpass = 0 tfail = 0 failures = [] dsets = sorted(Models.keys()) for dset in dsets: for start in ('start1', 'start2', 'cert'): if NIST_Dataset(dset, method=opts.method, start=start, plot=False, verbose=True): tpass += 1 else: tfail += 1 failures.append(" %s (starting at '%s')" % (dset, start)) print('--------------------------------------') print(' Fit Method: %s ' % opts.method) print(' Final Results: %i pass, %i fail.' % (tpass, tfail)) print(' Tests Failed for:\n %s' % '\n '.join(failures)) print('--------------------------------------') elif dset not in Models: print(usage) else: return NIST_Dataset(dset, method=opts.method, start=start, plot=False, verbose=True) def RunNIST_Model(model): out1 = NIST_Dataset(model, start='start1', plot=False, verbose=False) out2 = NIST_Dataset(model, start='start2', plot=False, verbose=False) print("NIST Test", model, out1, out2) assert(out1 or out2) return out1 or out2 def test_Bennett5(): return RunNIST_Model('Bennett5') def test_BoxBOD(): return RunNIST_Model('BoxBOD') def test_Chwirut1(): return RunNIST_Model('Chwirut1') def test_Chwirut2(): return RunNIST_Model('Chwirut2') def test_DanWood(): return RunNIST_Model('DanWood') def test_ENSO(): return RunNIST_Model('ENSO') def test_Eckerle4(): return RunNIST_Model('Eckerle4') def test_Gauss1(): return RunNIST_Model('Gauss1') def test_Gauss2(): return RunNIST_Model('Gauss2') def test_Gauss3(): return RunNIST_Model('Gauss3') def test_Hahn1(): return RunNIST_Model('Hahn1') def test_Kirby2(): return RunNIST_Model('Kirby2') def test_Lanczos1(): return RunNIST_Model('Lanczos1') def test_Lanczos2(): return RunNIST_Model('Lanczos2') def test_Lanczos3(): return RunNIST_Model('Lanczos3') def test_MGH09(): return RunNIST_Model('MGH09') def test_MGH10(): return RunNIST_Model('MGH10') def test_MGH17(): return RunNIST_Model('MGH17') def test_Misra1a(): return RunNIST_Model('Misra1a') def test_Misra1b(): return RunNIST_Model('Misra1b') def test_Misra1c(): return RunNIST_Model('Misra1c') def test_Misra1d(): return RunNIST_Model('Misra1d') def test_Nelson(): return RunNIST_Model('Nelson') def test_Rat42(): return RunNIST_Model('Rat42') def test_Rat43(): return RunNIST_Model('Rat43') def test_Roszman1(): return RunNIST_Model('Roszman1') def test_Thurber(): return RunNIST_Model('Thurber') if __name__ == '__main__': run_interactive() lmfit-py-1.0.0/tests/test_algebraic_constraint.py000066400000000000000000000075571357751001700222250ustar00rootroot00000000000000"""Tests for algebraic parameter constraints.""" import numpy as np import pytest from lmfit import Minimizer, Model, Parameters from lmfit.lineshapes import gaussian, lorentzian @pytest.fixture def minimizer(): """Return the Minimizer object.""" def residual(pars, x, sigma=None, data=None): """Define objective function.""" yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g']) yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l']) model = yg + yl + pars['line_off'] + x * pars['line_slope'] if data is None: return model if sigma is None: return model - data return (model-data) / sigma # generate synthetic data n = 601 xmin = 0. xmax = 20.0 x = np.linspace(xmin, xmax, n) data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) + np.random.normal(scale=0.23, size=n) + x*0.5) # create initial Parameters pars = Parameters() pars.add(name='amp_g', value=10) pars.add(name='cen_g', value=9) pars.add(name='wid_g', value=1) pars.add(name='amp_tot', value=20) pars.add(name='amp_l', expr='amp_tot - amp_g') pars.add(name='cen_l', expr='1.5+cen_g') pars.add(name='wid_l', expr='2*wid_g') pars.add(name='line_slope', value=0.0) pars.add(name='line_off', value=0.0) sigma = 0.021 # estimate of data error (for all data points) mini = Minimizer(residual, pars, fcn_args=(x,), fcn_kws={'sigma': sigma, 'data': data}) return mini def test_algebraic_constraints(minimizer): """Test algebraic constraints.""" result = minimizer.minimize(method='leastsq') pfit = result.params assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value assert pfit['wid_l'].value == 2.0 * pfit['wid_g'].value def test_algebraic_constraints_function(minimizer): """Test constraints with a user-defined function added to symbol table.""" def width_func(wpar): return 2.5*wpar minimizer.params._asteval.symtable['wfun'] = width_func minimizer.params.add(name='wid_l', expr='wfun(wid_g)') result = minimizer.minimize(method='leastsq') pfit = result.params assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value assert pfit['wid_l'].value == 2.5 * pfit['wid_g'].value def test_constraints_function_call(): """Test a constraint with simple function call in Model class.""" x = [1723, 1773, 1823, 1523, 1773, 1033.03078, 1042.98077, 1047.90937, 1053.95899, 1057.94906, 1063.13788, 1075.74218, 1086.03102] y = [0.79934, -0.31876, -0.46852, 0.05, -0.21, 11.1708, 10.31844, 9.73069, 9.21319, 9.12457, 9.05243, 8.66407, 8.29664] def VFT(T, ninf=-3, A=5e3, T0=800): return ninf + A/(T-T0) vftModel = Model(VFT) vftModel.set_param_hint('D', vary=False, expr=r'A*log(10)/T0') result = vftModel.fit(y, T=x) assert 2600.0 < result.params['A'].value < 2650.0 assert 7.0 < result.params['D'].value < 7.5 def test_constraints(minimizer): """Test changing of algebraic constraints.""" result = minimizer.minimize(method='leastsq') pfit = result.params assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value assert pfit['wid_l'].value == 2.0*pfit['wid_g'].value # now, change fit slightly and re-run minimizer.params['wid_l'].expr = '1.25*wid_g' result = minimizer.minimize(method='leastsq') pfit = result.params assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value assert pfit['wid_l'].value == 1.25*pfit['wid_g'].value lmfit-py-1.0.0/tests/test_ampgo.py000066400000000000000000000102021357751001700171300ustar00rootroot00000000000000"""Tests for the AMPGO global minimization algorithm.""" import sys import numpy as np from numpy.testing import assert_allclose import pytest import lmfit # correct result for Alpine02 function global_optimum = [7.91705268, 4.81584232] fglob = -6.12950 @pytest.mark.parametrize("tabustrategy", ['farthest', 'oldest']) def test_ampgo_Alpine02(minimizer_Alpine02, tabustrategy): """Test AMPGO algorithm on Alpine02 function.""" kws = {'tabustrategy': tabustrategy} out = minimizer_Alpine02.minimize(method='ampgo', **kws) out_x = np.array([out.params['x0'].value, out.params['x1'].value]) assert_allclose(out.residual, fglob, rtol=1e-5) assert_allclose(min(out_x), min(global_optimum), rtol=1e-3) assert_allclose(max(out_x), max(global_optimum), rtol=1e-3) assert 'global' in out.ampgo_msg def test_ampgo_bounds(minimizer_Alpine02): """Test AMPGO algorithm with bounds.""" # change boundaries of parameters pars_bounds = lmfit.Parameters() pars_bounds.add_many(('x0', 1., True, 5.0, 15.0), ('x1', 1., True, 2.5, 7.5)) out = minimizer_Alpine02.minimize(params=pars_bounds, method='ampgo') assert 5.0 <= out.params['x0'].value <= 15.0 assert 2.5 <= out.params['x1'].value <= 7.5 def test_ampgo_disp_true(minimizer_Alpine02, capsys): """Test AMPGO algorithm with disp is True.""" # disp to False for L-BFGS-B to avoid too much output... kws = {'disp': True, 'local_opts': {'disp': False}} minimizer_Alpine02.minimize(method='ampgo', **kws) captured = capsys.readouterr() assert "Starting MINIMIZATION Phase" in captured.out def test_ampgo_maxfunevals(minimizer_Alpine02): """Test AMPGO algorithm with maxfunevals.""" # disp to False for L-BFGS-B to avoid too much output... kws = {'maxfunevals': 5, 'disp': True, 'local_opts': {'disp': False}} out = minimizer_Alpine02.minimize(method='ampgo', **kws) assert out.ampgo_msg == 'Maximum number of function evaluations exceeded' def test_ampgo_local_solver(minimizer_Alpine02): """Test AMPGO algorithm with local solver.""" kws = {'local': 'Nelder-Mead'} msg = r'Method Nelder-Mead cannot handle constraints nor bounds' with pytest.warns(RuntimeWarning, match=msg): out = minimizer_Alpine02.minimize(method='ampgo', **kws) out_x = np.array([out.params['x0'].value, out.params['x1'].value]) assert 'ampgo' and 'Nelder-Mead' in out.method assert_allclose(out.residual, fglob, rtol=1e-5) assert_allclose(min(out_x), min(global_optimum), rtol=1e-3) assert_allclose(max(out_x), max(global_optimum), rtol=1e-3) assert 'global' in out.ampgo_msg def test_ampgo_invalid_local_solver(minimizer_Alpine02): """Test AMPGO algorithm with invalid local solvers.""" kws = {'local': 'leastsq'} with pytest.raises(Exception, match=r'Invalid local solver selected'): minimizer_Alpine02.minimize(method='ampgo', **kws) def test_ampgo_invalid_tabulistsize(minimizer_Alpine02): """Test AMPGO algorithm with invalid tabulistsize.""" kws = {'tabulistsize': 0} with pytest.raises(Exception, match=r'Invalid tabulistsize specified'): minimizer_Alpine02.minimize(method='ampgo', **kws) def test_ampgo_invalid_tabustrategy(minimizer_Alpine02): """Test AMPGO algorithm with invalid tabustrategy.""" kws = {'tabustrategy': 'unknown'} with pytest.raises(Exception, match=r'Invalid tabustrategy specified'): minimizer_Alpine02.minimize(method='ampgo', **kws) @pytest.mark.skipif(sys.version_info.major == 2, reason="does not throw an exception in Python 2") def test_ampgo_local_opts(minimizer_Alpine02): """Test AMPGO algorithm, pass local_opts to solver.""" # use local_opts to pass maxiter to the local optimizer: providing a string # whereas an integer is required, this should throw an error. kws = {'local_opts': {'maxiter': 'string'}} with pytest.raises(TypeError): minimizer_Alpine02.minimize(method='ampgo', **kws) # for coverage: make sure that both occurences are reached kws = {'local_opts': {'maxiter': 10}, 'maxfunevals': 50} minimizer_Alpine02.minimize(method='ampgo', **kws) lmfit-py-1.0.0/tests/test_basicfit.py000066400000000000000000000023321357751001700176160ustar00rootroot00000000000000import numpy as np from lmfit import Parameters, minimize from lmfit_testutils import assert_paramval def test_basic(): # create data to be fitted x = np.linspace(0, 15, 301) data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) + np.random.normal(size=len(x), scale=0.2)) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """ model decaying sine wave, subtract data""" amp = params['amp'] shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0) params.add('decay', value=0.1) params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2) params.add('omega', value=3.0) # do fit, here with leastsq model result = minimize(fcn2min, params, args=(x, data)) assert(result.nfev > 5) assert(result.nfev < 500) assert(result.chisqr > 1) assert(result.nvarys == 4) assert_paramval(result.params['amp'], 5.03, tol=0.05) assert_paramval(result.params['omega'], 2.0, tol=0.05) lmfit-py-1.0.0/tests/test_basinhopping.py000066400000000000000000000071171357751001700205210ustar00rootroot00000000000000"""Tests for the basinhopping minimization algorithm.""" import numpy as np from numpy.testing import assert_allclose import pytest from scipy.optimize import basinhopping import lmfit def test_basinhopping_lmfit_vs_scipy(): """Test basinhopping in lmfit versus scipy.""" # SciPy def func(x): return np.cos(14.5*x - 0.3) + (x+0.2) * x minimizer_kwargs = {'method': 'L-BFGS-B'} x0 = [1.] ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, seed=7) # lmfit def residual(params): x = params['x'].value return np.cos(14.5*x - 0.3) + (x+0.2) * x pars = lmfit.Parameters() pars.add_many(('x', 1.)) kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7} mini = lmfit.Minimizer(residual, pars) out = mini.minimize(method='basinhopping', **kws) assert_allclose(out.residual, ret.fun) assert_allclose(out.params['x'].value, ret.x, rtol=1e-5) def test_basinhopping_2d_lmfit_vs_scipy(): """Test basinhopping in lmfit versus scipy.""" # SciPy def func2d(x): return np.cos(14.5*x[0] - 0.3) + (x[1]+0.2) * x[1] + (x[0]+0.2) * x[0] minimizer_kwargs = {'method': 'L-BFGS-B'} x0 = [1.0, 1.0] ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, seed=7) # lmfit def residual_2d(params): x0 = params['x0'].value x1 = params['x1'].value return np.cos(14.5*x0 - 0.3) + (x1+0.2) * x1 + (x0+0.2) * x0 pars = lmfit.Parameters() pars.add_many(('x0', 1.), ('x1', 1.)) mini = lmfit.Minimizer(residual_2d, pars) kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7} out = mini.minimize(method='basinhopping', **kws) assert_allclose(out.residual, ret.fun) assert_allclose(out.params['x0'].value, ret.x[0], rtol=1e-5) assert_allclose(out.params['x1'].value, ret.x[1], rtol=1e-5) def test_basinhopping_Alpine02(minimizer_Alpine02): """Test basinhopping on Alpine02 function.""" global_optimum = [7.91705268, 4.81584232] fglob = -6.12950 kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7} out = minimizer_Alpine02.minimize(method='basinhopping', **kws) out_x = np.array([out.params['x0'].value, out.params['x1'].value]) assert_allclose(out.residual, fglob, rtol=1e-5) assert_allclose(min(out_x), min(global_optimum), rtol=1e-3) assert_allclose(max(out_x), max(global_optimum), rtol=1e-3) assert out.method == 'basinhopping' def test_basinhopping_bounds(minimizer_Alpine02): """Test basinhopping algorithm with bounds.""" # change boundaries of parameters pars_bounds = lmfit.Parameters() pars_bounds.add_many(('x0', 1., True, 5.0, 15.0), ('x1', 1., True, 2.5, 7.5)) kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7} out = minimizer_Alpine02.minimize(params=pars_bounds, method='basinhopping', **kws) assert 5.0 <= out.params['x0'].value <= 15.0 assert 2.5 <= out.params['x1'].value <= 7.5 def test_basinhopping_solver_options(minimizer_Alpine02): """Test basinhopping algorithm, pass incorrect options to solver.""" # use minimizer_kwargs to pass an invalid method for local solver to # scipy.basinhopping kws = {'minimizer_kwargs': {'method': 'unknown'}} with pytest.raises(ValueError, match=r'Unknown solver'): minimizer_Alpine02.minimize(method='basinhopping', **kws) # pass an incorrect value for niter to scipy.basinhopping kws = {'niter': 'string'} with pytest.raises(TypeError): minimizer_Alpine02.minimize(method='basinhopping', **kws) lmfit-py-1.0.0/tests/test_bounded_jacobian.py000066400000000000000000000016201357751001700212770ustar00rootroot00000000000000import numpy as np from lmfit import Parameters, minimize from lmfit_testutils import assert_paramval def test_bounded_jacobian(): pars = Parameters() pars.add('x0', value=2.0) pars.add('x1', value=2.0, min=1.5) global jac_count jac_count = 0 def resid(params): x0 = params['x0'] x1 = params['x1'] return np.array([10 * (x1 - x0*x0), 1-x0]) def jac(params): global jac_count jac_count += 1 x0 = params['x0'] return np.array([[-20*x0, 10], [-1, 0]]) out0 = minimize(resid, pars, Dfun=None) assert_paramval(out0.params['x0'], 1.2243, tol=0.02) assert_paramval(out0.params['x1'], 1.5000, tol=0.02) assert(jac_count == 0) out1 = minimize(resid, pars, Dfun=jac) assert_paramval(out1.params['x0'], 1.2243, tol=0.02) assert_paramval(out1.params['x1'], 1.5000, tol=0.02) assert(jac_count > 5) lmfit-py-1.0.0/tests/test_bounds.py000066400000000000000000000026121357751001700173250ustar00rootroot00000000000000from numpy import exp, linspace, pi, random, sign, sin from lmfit import Parameters, minimize from lmfit_testutils import assert_paramval def test_bounds(): p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.4321) p_true.add('shift', value=0.12345) p_true.add('decay', value=0.01000) def residual(pars, x, data=None): amp = pars['amp'] per = pars['period'] shift = pars['shift'] decay = pars['decay'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = amp*sin(shift + x/per) * exp(-x*x*decay*decay) if data is None: return model return (model - data) n = 1500 xmin = 0. xmax = 250.0 random.seed(0) noise = random.normal(scale=2.80, size=n) x = linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0, max=20, min=0.0) fit_params.add('period', value=2, max=10) fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.) fit_params.add('decay', value=0.02, max=0.10, min=0.00) out = minimize(residual, fit_params, args=(x,), kws={'data': data}) assert(out.nfev > 10) assert(out.nfree > 50) assert(out.chisqr > 1.0) assert_paramval(out.params['decay'], 0.01, tol=1.e-2) assert_paramval(out.params['shift'], 0.123, tol=1.e-2) lmfit-py-1.0.0/tests/test_brute.py000066400000000000000000000262561357751001700171660ustar00rootroot00000000000000"""Tests for the brute force algorithm (aka 'grid-search'). Use example problem described in the SciPy documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html """ import pickle import numpy as np from numpy.testing import assert_allclose, assert_equal import pytest from scipy import optimize import lmfit def func_scipy(z, *params): x, y = z a, b, c, d, e, f, g, h, i, j, k, l, scale = params f1 = a * x**2 + b*x*y + c * y**2 + d*x + e*y + f f2 = -g*np.exp(-((x-h)**2 + (y-i)**2) / scale) f3 = -j*np.exp(-((x-k)**2 + (y-l)**2) / scale) return f1 + f2 + f3 params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) def func_lmfit(p): par = p.valuesdict() f1 = (par['a'] * par['x']**2 + par['b']*par['x']*par['y'] + par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] + par['f']) f2 = (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 + (par['y']-par['i'])**2) / par['scale'])) f3 = (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 + (par['y']-par['l'])**2) / par['scale'])) return f1 + f2 + f3 @pytest.fixture def params_lmfit(): """Return lmfit.Parameters class with initial values and bounds.""" params = lmfit.Parameters() params.add_many( ('a', 2, False), ('b', 3, False), ('c', 7, False), ('d', 8, False), ('e', 9, False), ('f', 10, False), ('g', 44, False), ('h', -1, False), ('i', 2, False), ('j', 26, False), ('k', 1, False), ('l', -2, False), ('scale', 0.5, False), ('x', -4.0, True, -4.0, 4.0, None, None), ('y', -2.0, True, -2.0, 2.0, None, None)) return params def test_brute_lmfit_vs_scipy_default(params_lmfit): """TEST 1: using finite bounds with Ns=20, keep=50 and brute_step=None.""" assert params_lmfit['x'].brute_step is None assert params_lmfit['y'].brute_step is None rranges = ((-4, 4), (-2, 2)) ret = optimize.brute(func_scipy, rranges, args=params, full_output=True, Ns=20, finish=None) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute', Ns=20) assert out.method == 'brute' assert_equal(out.nfev, 20**len(out.var_names)) # Ns * nmb varying params assert_equal(len(out.candidates), 50) # top-50 candidates are stored assert_equal(ret[2], out.brute_grid) # grid identical assert_equal(ret[3], out.brute_Jout) # function values on grid identical # best-fit values identical / stored correctly in MinimizerResult assert_equal(ret[0][0], out.brute_x0[0]) assert_equal(ret[0][0], out.params['x'].value) assert_equal(ret[0][1], out.brute_x0[1]) assert_equal(ret[0][1], out.params['y'].value) assert_equal(ret[1], out.brute_fval) assert_equal(ret[1], out.residual) def test_brute_lmfit_vs_scipy_Ns(params_lmfit): """TEST 2: using finite bounds, with Ns=40 and brute_step=None.""" rranges = ((-4, 4), (-2, 2)) ret = optimize.brute(func_scipy, rranges, args=params, full_output=True, Ns=40, finish=None) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute', Ns=40) assert_equal(ret[2], out.brute_grid) # grid identical assert_equal(ret[3], out.brute_Jout) # function values on grid identical assert_equal(out.nfev, 40**len(out.var_names)) # Ns * nmb varying params # best-fit values and function value identical assert_equal(ret[0][0], out.brute_x0[0]) assert_equal(ret[0][1], out.brute_x0[1]) assert_equal(ret[1], out.brute_fval) def test_brute_lmfit_vs_scipy_stepsize(params_lmfit): """TEST 3: using finite bounds and brute_step for both parameters.""" # set brute_step for parameters and assert whether that worked correctly params_lmfit['x'].set(brute_step=0.25) params_lmfit['y'].set(brute_step=0.25) assert_equal(params_lmfit['x'].brute_step, 0.25) assert_equal(params_lmfit['y'].brute_step, 0.25) rranges = (slice(-4, 4, 0.25), slice(-2, 2, 0.25)) ret = optimize.brute(func_scipy, rranges, args=params, full_output=True, Ns=20, finish=None) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute') assert_equal(ret[2], out.brute_grid) # grid identical assert_equal(ret[3], out.brute_Jout) # function values on grid identical # best-fit values and function value identical assert_equal(ret[0][0], out.brute_x0[0]) assert_equal(ret[0][1], out.brute_x0[1]) assert_equal(ret[1], out.brute_fval) points_x = np.arange(rranges[0].start, rranges[0].stop, rranges[0].step).size points_y = np.arange(rranges[1].start, rranges[1].stop, rranges[1].step).size nmb_evals = points_x * points_y assert_equal(out.nfev, nmb_evals) def test_brute_lmfit_vs_scipy_Ns_stepsize(params_lmfit): """TEST 4: using finite bounds, using Ns, brute_step for 'x'.""" # set brute_step for x to 0.15 and reset to None for y and assert result params_lmfit['x'].set(brute_step=0.15) assert_equal(params_lmfit['x'].brute_step, 0.15) rranges = (slice(-4, 4, 0.15), (-2, 2)) ret = optimize.brute(func_scipy, rranges, args=params, full_output=True, Ns=10, finish=None) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute', Ns=10) assert_equal(ret[2], out.brute_grid) # grid identical assert_equal(ret[3], out.brute_Jout) # function values on grid identical points_x = np.arange(rranges[0].start, rranges[0].stop, rranges[0].step).size points_y = 10 nmb_evals = points_x * points_y assert_equal(out.nfev, nmb_evals) # best-fit values and function value identical assert_equal(ret[0][0], out.brute_x0[0]) assert_equal(ret[0][1], out.brute_x0[1]) assert_equal(ret[1], out.brute_fval) def test_brute_upper_bounds_and_brute_step(params_lmfit): """TEST 5: using finite upper bounds, Ns=20, and brute_step specified.""" Ns = 20 params_lmfit['x'].set(min=-np.inf) params_lmfit['x'].set(brute_step=0.25) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute', Ns=Ns) assert_equal(out.params['x'].min, -np.inf) assert_equal(out.params['x'].brute_step, 0.25) grid_x_expected = np.linspace(params_lmfit['x'].max - Ns*params_lmfit['x'].brute_step, params_lmfit['x'].max, Ns, False) grid_x = np.unique([par.ravel() for par in out.brute_grid][0]) assert_allclose(grid_x, grid_x_expected) grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].max, Ns) grid_y = np.unique([par.ravel() for par in out.brute_grid][1]) assert_allclose(grid_y, grid_y_expected) def test_brute_lower_bounds_and_brute_step(params_lmfit): """TEST 6: using finite lower bounds, Ns=15, and brute_step specified.""" Ns = 15 params_lmfit['y'].set(max=np.inf) params_lmfit['y'].set(brute_step=0.1) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute', Ns=Ns) grid_x_expected = np.linspace(params_lmfit['x'].min, params_lmfit['x'].max, Ns) grid_x = np.unique([par.ravel() for par in out.brute_grid][0]) assert_allclose(grid_x, grid_x_expected) grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].min + Ns*params_lmfit['y'].brute_step, Ns, False) grid_y = np.unique([par.ravel() for par in out.brute_grid][1]) assert_allclose(grid_y, grid_y_expected) def test_brute_no_bounds_with_brute_step(params_lmfit): """TEST 7: using no bounds, but brute_step specified (Ns=15).""" Ns = 15 params_lmfit['x'].set(min=-np.inf, max=np.inf, brute_step=0.1) params_lmfit['y'].set(min=-np.inf, max=np.inf, brute_step=0.2) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute', Ns=15) grid_x_expected = np.linspace(params_lmfit['x'].value - (Ns//2)*params_lmfit['x'].brute_step, params_lmfit['x'].value + (Ns//2)*params_lmfit['x'].brute_step, Ns) grid_x = np.unique([par.ravel() for par in out.brute_grid][0]) assert_allclose(grid_x, grid_x_expected) grid_y_expected = np.linspace(params_lmfit['y'].value - (Ns//2)*params_lmfit['y'].brute_step, params_lmfit['y'].value + (Ns//2)*params_lmfit['y'].brute_step, Ns) grid_y = np.unique([par.ravel() for par in out.brute_grid][1]) assert_allclose(grid_y, grid_y_expected) def test_brute_no_bounds_no_brute_step(params_lmfit): """TEST 8: insufficient information provided.""" params_lmfit['x'].set(min=-np.inf, max=np.inf) mini = lmfit.Minimizer(func_lmfit, params_lmfit) msg = r'Not enough information provided for the brute force method.' with pytest.raises(ValueError, match=msg): mini.minimize(method='brute') def test_brute_one_parameter(params_lmfit): """TEST 9: only one varying parameter.""" params_lmfit['x'].set(vary=False) mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute') assert out.candidates[0].score <= out.candidates[1].score assert isinstance(out.candidates[0], lmfit.minimizer.Candidate) assert isinstance(out.candidates[0].params, lmfit.Parameters) assert isinstance(out.candidates[0].score, np.float64) def test_brute_keep(params_lmfit, capsys): """TEST 10: using 'keep' argument and check candidates attribute.""" mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute') assert_equal(len(out.candidates), 50) # default out_keep_all = mini.minimize(method='brute', keep='all') assert_equal(len(out_keep_all.candidates), len(out_keep_all.brute_Jout.ravel())) out_keep10 = mini.minimize(method='brute', keep=10) assert_equal(len(out_keep10.candidates), 10) assert isinstance(out.candidates[0], lmfit.minimizer.Candidate) assert isinstance(out.candidates[0].params, lmfit.Parameters) assert isinstance(out.candidates[0].score, np.float64) with pytest.raises(ValueError, match=r"'candidate_nmb' should be between"): out_keep10.show_candidates(25) with pytest.raises(ValueError, match=r"'candidate_nmb' should be between"): out_keep10.show_candidates(0) out_keep10.show_candidates(5) captured = capsys.readouterr() assert 'Candidate #5' in captured.out # for coverage and to make sure the 'all' argument works; no assert... out_keep10.show_candidates('all') def test_brute_pickle(params_lmfit): """TEST 11: make sure the MinimizerResult can be pickle'd.""" mini = lmfit.Minimizer(func_lmfit, params_lmfit) out = mini.minimize(method='brute') pickle.dumps(out) def test_nfev_workers(params_lmfit): """TEST 12: make sure the nfev is correct for workers != 1.""" mini = lmfit.Minimizer(func_lmfit, params_lmfit, workers=-1) out = mini.minimize(method='brute') assert_equal(out.nfev, 20**len(out.var_names)) lmfit-py-1.0.0/tests/test_confidence.py000066400000000000000000000155041357751001700201340ustar00rootroot00000000000000"""Tests for the calculation of confidence intervals.""" import numpy as np from numpy.testing import assert_allclose import pytest from scipy.stats import f import lmfit from lmfit_testutils import assert_paramval @pytest.fixture def data(): """Generate synthetic data.""" x = np.linspace(0.3, 10, 100) np.random.seed(0) y = 1.0 / (0.1 * x) + 2.0 + 0.1 * np.random.randn(x.size) return (x, y) @pytest.fixture def pars(): """Create and initialize parameter set.""" parameters = lmfit.Parameters() parameters.add_many(('a', 0.1), ('b', 1)) return parameters def residual(params, x, data): """Define objective function for the minimization.""" return data - 1.0 / (params['a'] * x) + params['b'] @pytest.mark.parametrize("verbose", [False, True]) def test_confidence_leastsq(data, pars, verbose, capsys): """Calculate confidence interval after leastsq minimization.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data)) out = minimizer.leastsq() assert 5 < out.nfev < 500 assert out.chisqr < 3.0 assert out.nvarys == 2 assert_paramval(out.params['a'], 0.1, tol=0.1) assert_paramval(out.params['b'], -2.0, tol=0.1) ci = lmfit.conf_interval(minimizer, out, verbose=verbose) assert_allclose(ci['b'][0][0], 0.997, rtol=0.01) assert_allclose(ci['b'][0][1], -2.022, rtol=0.01) assert_allclose(ci['b'][2][0], 0.683, rtol=0.01) assert_allclose(ci['b'][2][1], -1.997, rtol=0.01) assert_allclose(ci['b'][5][0], 0.95, rtol=0.01) assert_allclose(ci['b'][5][1], -1.96, rtol=0.01) if verbose: captured = capsys.readouterr() assert 'Calculating CI for' in captured.out def test_confidence_pnames(data, pars): """Test if pnames works as expected.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data)) out = minimizer.leastsq() assert_paramval(out.params['a'], 0.1, tol=0.1) assert_paramval(out.params['b'], -2.0, tol=0.1) ci = lmfit.conf_interval(minimizer, out, p_names=['a']) assert 'a' in ci assert 'b' not in ci def test_confidence_bounds_reached(data, pars): """Check if conf_interval handles bounds correctly""" # Should work pars['a'].max = 0.2 minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data)) out = minimizer.leastsq() out.params['a'].stderr = 1 lmfit.conf_interval(minimizer, out, verbose=True) # Should warn pars['b'].max = 2.03 pars['b'].min = 1.97 minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data)) out = minimizer.leastsq() out.params['b'].stderr = 0.005 out.params['a'].stderr = 0.01 with pytest.warns(UserWarning, match="Bound reached"): lmfit.conf_interval(minimizer, out, verbose=True) def test_confidence_sigma_vs_prob(data, pars): """Calculate confidence by specifying sigma or probability.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data)) out = minimizer.leastsq() ci_sigmas = lmfit.conf_interval(minimizer, out, sigmas=[1, 2, 3]) ci_1sigma = lmfit.conf_interval(minimizer, out, sigmas=[1]) ci_probs = lmfit.conf_interval(minimizer, out, sigmas=[0.68269, 0.9545, 0.9973]) assert_allclose(ci_sigmas['a'][0][1], ci_probs['a'][0][1], rtol=0.01) assert_allclose(ci_sigmas['b'][2][1], ci_probs['b'][2][1], rtol=0.01) assert len(ci_1sigma['a']) == 3 assert len(ci_probs['a']) == 7 def test_confidence_exceptions(data, pars): """Make sure the proper exceptions are raised when needed.""" minimizer = lmfit.Minimizer(residual, pars, calc_covar=False, fcn_args=data) out = minimizer.minimize(method='nelder') out_lsq = minimizer.minimize(params=out.params, method='leastsq') # no uncertainty estimated msg = 'Cannot determine Confidence Intervals without sensible uncertainty' with pytest.raises(lmfit.MinimizerException, match=msg): lmfit.conf_interval(minimizer, out) # uncertainty is NaN out_lsq.params['a'].stderr = np.nan with pytest.raises(lmfit.MinimizerException, match=msg): lmfit.conf_interval(minimizer, out_lsq) # only one varying parameter out_lsq.params['a'].vary = False msg = r'Cannot determine Confidence Intervals with < 2 variables' with pytest.raises(lmfit.MinimizerException, match=msg): lmfit.conf_interval(minimizer, out_lsq) def test_confidence_warnings(data, pars): """Make sure the proper warnings are emitted when needed.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=data) out = minimizer.minimize(method='leastsq') with pytest.warns(UserWarning) as record: lmfit.conf_interval(minimizer, out, maxiter=1) assert 'maxiter=1 reached and prob' in str(record[0].message) def test_confidence_with_trace(data, pars): """Test calculation of confidence intervals with trace.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data)) out = minimizer.leastsq() ci, tr = lmfit.conf_interval(minimizer, out, sigmas=[0.6827], trace=True) for p in out.params: diff1 = ci[p][1][1] - ci[p][0][1] diff2 = ci[p][2][1] - ci[p][1][1] stderr = out.params[p].stderr assert abs(diff1 - stderr) / stderr < 0.05 assert abs(diff2 - stderr) / stderr < 0.05 assert p in tr.keys() assert 'prob' in tr[p].keys() def test_confidence_2d(data, pars): """Test the 2D confidence interval calculation.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=data) out = minimizer.minimize(method='leastsq') cx, cy, grid = lmfit.conf_interval2d(minimizer, out, 'a', 'b', 30, 20) assert len(cx.ravel()) == 30 assert len(cy.ravel()) == 20 assert grid.shape == (20, 30) def test_confidence_2d_limits(data, pars): """Test the 2D confidence interval calculation using limits.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=data) out = minimizer.minimize(method='leastsq') lim = ((1.0e-6, 0.02), (1.0e-6, -4.0)) cx, cy, grid = lmfit.conf_interval2d(minimizer, out, 'a', 'b', limits=lim) assert grid.shape == (10, 10) assert_allclose(min(cx.ravel()), 1.0e-6) assert_allclose(max(cx.ravel()), 0.02) assert_allclose(min(cy.ravel()), -4.0) assert_allclose(max(cy.ravel()), 1.0e-6) def test_confidence_prob_func(data, pars): """Test conf_interval with alternate prob_func.""" minimizer = lmfit.Minimizer(residual, pars, fcn_args=data) out = minimizer.minimize(method='leastsq') def my_f_compare(best_fit, new_fit): nfree = best_fit.nfree nfix = best_fit.nfree - new_fit.nfree dchi = new_fit.chisqr / best_fit.chisqr - 1.0 return f.cdf(dchi * nfree / nfix, nfix, nfree) lmfit.conf_interval(minimizer, out, sigmas=[1], prob_func=my_f_compare) lmfit-py-1.0.0/tests/test_copy_params.py000066400000000000000000000015721357751001700203540ustar00rootroot00000000000000import numpy as np from lmfit import Parameters, minimize def get_data(): x = np.arange(0, 1, 0.01) y1 = 1.5*np.exp(0.9*x) + np.random.normal(scale=0.001, size=len(x)) y2 = 2.0 + x + 1/2.*x**2 + 1/3.*x**3 y2 = y2 + np.random.normal(scale=0.001, size=len(x)) return x, y1, y2 def residual(params, x, data): model = params['a']*np.exp(params['b']*x) return (data-model) def test_copy_params(): x, y1, y2 = get_data() params = Parameters() params.add('a', value=2.0) params.add('b', value=2.0) # fit to first data set out1 = minimize(residual, params, args=(x, y1)) # fit to second data set out2 = minimize(residual, params, args=(x, y2)) adiff = out1.params['a'].value - out2.params['a'].value bdiff = out1.params['b'].value - out2.params['b'].value assert(abs(adiff) > 1.e-2) assert(abs(bdiff) > 1.e-2) lmfit-py-1.0.0/tests/test_covariance_matrix.py000066400000000000000000000222461357751001700215360ustar00rootroot00000000000000import os import numpy as np from numpy import pi from numpy.testing import assert_allclose, assert_almost_equal import pytest from lmfit import Parameters, minimize from lmfit.lineshapes import exponential from lmfit.models import ExponentialModel, VoigtModel def check(para, real_val, sig=3): err = abs(para.value - real_val) assert(err < sig * para.stderr) def test_bounded_parameters(): # create data to be fitted np.random.seed(1) x = np.linspace(0, 15, 301) data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) + np.random.normal(size=len(x), scale=0.2)) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """ model decaying sine wave, subtract data""" amp = params['amp'] shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0, max=50) params.add('decay', value=0.1, min=0, max=10) params.add('shift', value=0.0, min=-pi/2., max=pi/2.) params.add('omega', value=3.0, min=0, max=np.inf) # do fit, here with leastsq model result = minimize(fcn2min, params, args=(x, data)) # assert that the real parameters are found for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]): check(para, val) # assert that the covariance matrix is correct [cf. lmfit v0.9.10] cov_x = np.array([ [1.42428250e-03, 9.45395985e-06, -4.33997922e-05, 1.07362106e-05], [9.45395985e-06, 1.84110424e-07, -2.90588963e-07, 7.19107184e-08], [-4.33997922e-05, -2.90588963e-07, 9.53427031e-05, -2.37750362e-05], [1.07362106e-05, 7.19107184e-08, -2.37750362e-05, 9.60952336e-06]]) assert_allclose(result.covar, cov_x, rtol=1e-6) # assert that stderr and correlations are correct [cf. lmfit v0.9.10] assert_almost_equal(result.params['amp'].stderr, 0.03773967, decimal=6) assert_almost_equal(result.params['decay'].stderr, 4.2908e-04, decimal=6) assert_almost_equal(result.params['shift'].stderr, 0.00976436, decimal=6) assert_almost_equal(result.params['omega'].stderr, 0.00309992, decimal=6) assert_almost_equal(result.params['amp'].correl['decay'], 0.5838166760743324, decimal=6) assert_almost_equal(result.params['amp'].correl['shift'], -0.11777303073961824, decimal=6) assert_almost_equal(result.params['amp'].correl['omega'], 0.09177027400788784, decimal=6) assert_almost_equal(result.params['decay'].correl['shift'], -0.0693579417651835, decimal=6) assert_almost_equal(result.params['decay'].correl['omega'], 0.05406342001021014, decimal=6) assert_almost_equal(result.params['shift'].correl['omega'], -0.7854644476455469, decimal=6) def test_bounds_expression(): # load data to be fitted data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['amplitude'].set(min=0, max=100) params['center'].set(min=5, max=10) # do fit, here with leastsq model result = mod.fit(y, params, x=x) # assert that stderr and correlations are correct [cf. lmfit v0.9.10] assert_almost_equal(result.params['sigma'].stderr, 0.00368468, decimal=6) assert_almost_equal(result.params['center'].stderr, 0.00505496, decimal=6) assert_almost_equal(result.params['amplitude'].stderr, 0.13861506, decimal=6) assert_almost_equal(result.params['gamma'].stderr, 0.00368468, decimal=6) assert_almost_equal(result.params['fwhm'].stderr, 0.00806917, decimal=6) assert_almost_equal(result.params['height'].stderr, 0.03009459, decimal=6) assert_almost_equal(result.params['sigma'].correl['center'], -4.6623973788006615e-05, decimal=6) assert_almost_equal(result.params['sigma'].correl['amplitude'], 0.651304091954038, decimal=6) assert_almost_equal(result.params['center'].correl['amplitude'], -4.390334984618851e-05, decimal=6) @pytest.mark.parametrize("fit_method", ['nelder', 'lbfgs']) def test_numdifftools_no_bounds(fit_method): pytest.importorskip("numdifftools") np.random.seed(7) x = np.linspace(0, 100, num=50) noise = np.random.normal(scale=0.25, size=x.size) y = exponential(x, amplitude=5, decay=15) + noise mod = ExponentialModel() params = mod.guess(y, x=x) # do fit, here with leastsq model result = mod.fit(y, params, x=x, method='leastsq') result_ndt = mod.fit(y, params, x=x, method=fit_method) # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()] assert_allclose(vals_ndt, vals, rtol=0.1) assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5) # assert that parameter uncertaintes from leastsq and calculated from # the covariance matrix using numdifftools are very similar stderr = [result.params[p].stderr for p in result.params.valuesdict()] stderr_ndt = [result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()] perr = np.array(stderr) / np.array(vals) perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt) assert_almost_equal(perr_ndt, perr, decimal=3) # assert that parameter correlatations from leastsq and calculated from # the covariance matrix using numdifftools are very similar for par1 in result.var_names: cor = [result.params[par1].correl[par2] for par2 in result.params[par1].correl.keys()] cor_ndt = [result_ndt.params[par1].correl[par2] for par2 in result_ndt.params[par1].correl.keys()] assert_almost_equal(cor_ndt, cor, decimal=2) @pytest.mark.parametrize("fit_method", ['nelder', 'basinhopping', 'ampgo', 'shgo', 'dual_annealing']) def test_numdifftools_with_bounds(fit_method): pytest.importorskip("numdifftools") if fit_method in ['shgo', 'dual_annealing']: pytest.importorskip("scipy", minversion="1.2") # load data to be fitted data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['amplitude'].set(min=25, max=70) params['sigma'].set(max=1) params['center'].set(min=5, max=15) # do fit, here with leastsq model result = mod.fit(y, params, x=x, method='leastsq') result_ndt = mod.fit(y, params, x=x, method=fit_method) # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()] assert_allclose(vals_ndt, vals, rtol=0.1) assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5) # assert that parameter uncertaintes from leastsq and calculated from # the covariance matrix using numdifftools are very similar stderr = [result.params[p].stderr for p in result.params.valuesdict()] stderr_ndt = [result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()] perr = np.array(stderr) / np.array(vals) perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt) assert_almost_equal(perr_ndt, perr, decimal=3) # assert that parameter correlatations from leastsq and calculated from # the covariance matrix using numdifftools are very similar for par1 in result.var_names: cor = [result.params[par1].correl[par2] for par2 in result.params[par1].correl.keys()] cor_ndt = [result_ndt.params[par1].correl[par2] for par2 in result_ndt.params[par1].correl.keys()] assert_almost_equal(cor_ndt, cor, decimal=2) def test_numdifftools_calc_covar_false(): pytest.importorskip("numdifftools") # load data to be fitted data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['sigma'].set(min=-np.inf) # do fit, with leastsq and nelder result = mod.fit(y, params, x=x, method='leastsq') result_ndt = mod.fit(y, params, x=x, method='nelder', calc_covar=False) # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()] assert_allclose(vals_ndt, vals, rtol=5e-3) assert_allclose(result_ndt.chisqr, result.chisqr) assert result_ndt.covar is None assert result_ndt.errorbars is False lmfit-py-1.0.0/tests/test_custom_independentvar.py000066400000000000000000000023351357751001700224350ustar00rootroot00000000000000import numpy as np from lmfit.lineshapes import gaussian from lmfit.models import Model class Stepper: def __init__(self, start, stop, npts): self.start = start self.stop = stop self.npts = npts def get_x(self): return np.linspace(self.start, self.stop, self.npts) def gaussian_mod(obj, amplitude, center, sigma): return gaussian(obj.get_x(), amplitude, center, sigma) def test_custom_independentvar(): """Tests using a non-trivial object as an independent variable.""" npts = 501 xmin = 1 xmax = 21 cen = 8 obj = Stepper(xmin, xmax, npts) y = gaussian(obj.get_x(), amplitude=3.0, center=cen, sigma=2.5) y += np.random.normal(scale=0.2, size=npts) gmod = Model(gaussian_mod) params = gmod.make_params(amplitude=2, center=5, sigma=8) out = gmod.fit(y, params, obj=obj) assert(out.nvarys == 3) assert(out.nfev > 10) assert(out.chisqr > 1) assert(out.chisqr < 100) assert(out.params['sigma'].value < 3) assert(out.params['sigma'].value > 2) assert(out.params['center'].value > xmin) assert(out.params['center'].value < xmax) assert(out.params['amplitude'].value > 1) assert(out.params['amplitude'].value < 5) lmfit-py-1.0.0/tests/test_default_kws.py000066400000000000000000000011471357751001700203450ustar00rootroot00000000000000import numpy as np from lmfit.lineshapes import gaussian from lmfit.models import GaussianModel def test_default_inputs_gauss(): area = 1 cen = 0 std = 0.2 x = np.arange(-3, 3, 0.01) y = gaussian(x, area, cen, std) g = GaussianModel() fit_option1 = {'maxfev': 5000, 'xtol': 1e-2} result1 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5, fit_kws=fit_option1) fit_option2 = {'maxfev': 5000, 'xtol': 1e-6} result2 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5, fit_kws=fit_option2) assert(result1.values != result2.values) lmfit-py-1.0.0/tests/test_dual_annealing.py000066400000000000000000000045071357751001700210010ustar00rootroot00000000000000"""Tests for the Dual Annealing algorithm.""" import numpy as np from numpy.testing import assert_allclose import pytest import scipy import lmfit # dual_annealing algorithm is present in SciPy >= 1.2 pytest.importorskip("scipy", minversion="1.2") def eggholder(x): return (-(x[1] + 47.0) * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))) def eggholder_lmfit(params): x0 = params['x0'].value x1 = params['x1'].value return (-(x1 + 47.0) * np.sin(np.sqrt(abs(x0/2.0 + (x1 + 47.0)))) - x0 * np.sin(np.sqrt(abs(x0 - (x1 + 47.0))))) def test_da_scipy_vs_lmfit(): """Test DA algorithm in lmfit versus SciPy.""" bounds = [(-512, 512), (-512, 512)] result_scipy = scipy.optimize.dual_annealing(eggholder, bounds, seed=7) pars = lmfit.Parameters() pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512)) mini = lmfit.Minimizer(eggholder_lmfit, pars) result = mini.minimize(method='dual_annealing', seed=7) out_x = np.array([result.params['x0'].value, result.params['x1'].value]) assert_allclose(result_scipy.fun, result.residual) assert_allclose(result_scipy.x, out_x) # TODO: add scipy example from docstring after the reproducibility issue in # https://github.com/scipy/scipy/issues/9732 is resolved. # correct result for Alpine02 function global_optimum = [7.91705268, 4.81584232] fglob = -6.12950 def test_da_Alpine02(minimizer_Alpine02): """Test dual_annealing algorithm on Alpine02 function.""" out = minimizer_Alpine02.minimize(method='dual_annealing') out_x = np.array([out.params['x0'].value, out.params['x1'].value]) assert_allclose(out.residual, fglob, rtol=1e-5) assert_allclose(min(out_x), min(global_optimum), rtol=1e-3) assert_allclose(max(out_x), max(global_optimum), rtol=1e-3) assert out.method == 'dual_annealing' def test_da_bounds(minimizer_Alpine02): """Test dual_annealing algorithm with bounds.""" pars_bounds = lmfit.Parameters() pars_bounds.add_many(('x0', 1., True, 5.0, 15.0), ('x1', 1., True, 2.5, 7.5)) out = minimizer_Alpine02.minimize(params=pars_bounds, method='dual_annealing') assert 5.0 <= out.params['x0'].value <= 15.0 assert 2.5 <= out.params['x1'].value <= 7.5 lmfit-py-1.0.0/tests/test_itercb.py000066400000000000000000000031651357751001700173070ustar00rootroot00000000000000"""Tests for the Iteration Callback Function.""" import numpy as np import pytest from lmfit.lineshapes import gaussian from lmfit.models import GaussianModel, LinearModel try: import numdifftools # noqa: F401 calc_covar_options = [False, True] except ImportError: calc_covar_options = [False] np.random.seed(7) x = np.linspace(0, 20, 401) y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23) y -= 0.20*x + 3.333 + np.random.normal(scale=0.23, size=len(x)) mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_') pars = mod.make_params(peak_amplitude=21.0, peak_center=7.0, peak_sigma=2.0, bkg_intercept=2, bkg_slope=0.0) # set bounds for use with 'differential_evolution' and 'brute' pars['bkg_intercept'].set(min=0, max=10) pars['bkg_slope'].set(min=-5, max=5) pars['peak_amplitude'].set(min=20, max=25) pars['peak_center'].set(min=5, max=10) pars['peak_sigma'].set(min=0.5, max=2) def per_iteration(pars, iter, resid, *args, **kws): """Iteration callback, will abort at iteration 23.""" return iter == 23 @pytest.mark.parametrize("calc_covar", calc_covar_options) @pytest.mark.parametrize("method", ['ampgo', 'brute', 'basinhopping', 'differential_evolution', 'leastsq', 'least_squares', 'nelder']) def test_itercb(method, calc_covar): """Test the iteration callback for all solvers.""" out = mod.fit(y, pars, x=x, method=method, iter_cb=per_iteration, calc_covar=calc_covar) assert out.nfev == 23 assert out.aborted assert not out.errorbars assert not out.success lmfit-py-1.0.0/tests/test_jsonutils.py000066400000000000000000000040041357751001700200620ustar00rootroot00000000000000"""Tests for the JSON utilities.""" from types import BuiltinFunctionType, FunctionType import numpy as np import pytest from scipy.optimize import basinhopping import lmfit from lmfit.jsonutils import decode4js, encode4js, find_importer, import_from from lmfit.printfuncs import alphanumeric_sort @pytest.mark.parametrize('obj', [alphanumeric_sort, np.array, basinhopping]) def test_import_from(obj): """Check return value of find_importer function.""" importer = find_importer(obj) assert isinstance(import_from(importer, obj.__name__), (BuiltinFunctionType, FunctionType)) objects = [('test_string', (str,)), (np.array([7.0]), np.ndarray), (np.array([1.0+2.0j]), np.ndarray), (123.456, np.float), (10, np.float), ('café', (str,)), (10.0-5.0j, np.complex), (['a', 'b', 'c'], list), (('a', 'b', 'c'), tuple), ({'a': 1.0, 'b': 2.0, 'c': 3.0}, dict), (lmfit.lineshapes.gaussian, FunctionType)] @pytest.mark.parametrize('obj, obj_type', objects) def test_encode_decode(obj, obj_type): """Test encoding/decoding of the various object types to/from JSON.""" encoded = encode4js(obj) decoded = decode4js(encoded) assert decoded == obj assert isinstance(decoded, obj_type) def test_encode_decode_pandas(): """Test encoding/decoding of various pandas objects to/from JSON.""" pytest.importorskip('pandas') import pandas as pd obj_df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c']) encoded_df = encode4js(obj_df) decoded_df = decode4js(encoded_df) assert np.all(pd.DataFrame.eq(obj_df, decoded_df)) assert isinstance(decoded_df, pd.DataFrame) obj_ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) encoded_ser = encode4js(obj_ser) decoded_ser = decode4js(encoded_ser) assert np.all(pd.Series.eq(obj_ser, decoded_ser)) assert isinstance(decoded_ser, pd.Series) lmfit-py-1.0.0/tests/test_least_squares.py000066400000000000000000000140671357751001700207150ustar00rootroot00000000000000"""Tests for the least_squares minimization algorithm.""" import numpy as np from numpy.testing import assert_allclose import pytest from scipy.sparse import bsr_matrix from scipy.sparse.linalg import aslinearoperator import lmfit from lmfit.models import VoigtModel def test_least_squares_with_bounds(): """Test least_squares algorihhm with bounds.""" # define "true" parameters p_true = lmfit.Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.4321) p_true.add('shift', value=0.12345) p_true.add('decay', value=0.01000) def residual(pars, x, data=None): """Objective function of decaying sine wave.""" amp = pars['amp'] per = pars['period'] shift = pars['shift'] decay = pars['decay'] if abs(shift) > np.pi/2: shift = shift - np.sign(shift)*np.pi model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay) if data is None: return model return model - data # generate synthetic data np.random.seed(0) x = np.linspace(0.0, 250.0, 1500) noise = np.random.normal(scale=2.80, size=x.size) data = residual(p_true, x) + noise # create Parameters and set initial values and bounds fit_params = lmfit.Parameters() fit_params.add('amp', value=13.0, min=0.0, max=20) fit_params.add('period', value=2, max=10) fit_params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2.) fit_params.add('decay', value=0.02, min=0.0, max=0.10) mini = lmfit.Minimizer(residual, fit_params, fcn_args=(x, data)) out = mini.minimize(method='least_squares') assert out.method == 'least_squares' assert out.nfev > 10 assert out.nfree > 50 assert out.chisqr > 1.0 assert out.errorbars assert out.success assert_allclose(out.params['decay'], p_true['decay'], rtol=1e-2) assert_allclose(out.params['shift'], p_true['shift'], rtol=1e-2) @pytest.mark.parametrize("bounds", [False, True]) def test_least_squares_cov_x(peakdata, bounds): """Test calculation of cov. matrix from Jacobian, with/without bounds.""" x = peakdata[0] y = peakdata[1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) if bounds: params['amplitude'].set(min=25, max=70) params['sigma'].set(min=0, max=1) params['center'].set(min=5, max=15) else: params['sigma'].set(min=-np.inf) # do fit with least_squares and leastsq algorithm result = mod.fit(y, params, x=x, method='least_squares') result_lsq = mod.fit(y, params, x=x, method='leastsq') # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_lsq = [result_lsq.params[p].value for p in result_lsq.params.valuesdict()] assert_allclose(vals, vals_lsq, rtol=1e-5) assert_allclose(result.chisqr, result_lsq.chisqr) # assert that parameter uncertaintes obtained from the leastsq method and # those from the covariance matrix estimated from the Jacbian matrix in # least_squares are similar stderr = [result.params[p].stderr for p in result.params.valuesdict()] stderr_lsq = [result_lsq.params[p].stderr for p in result_lsq.params.valuesdict()] assert_allclose(stderr, stderr_lsq, rtol=1e-4) # assert that parameter correlations obtained from the leastsq method and # those from the covariance matrix estimated from the Jacbian matrix in # least_squares are similar for par1 in result.var_names: cor = [result.params[par1].correl[par2] for par2 in result.params[par1].correl.keys()] cor_lsq = [result_lsq.params[par1].correl[par2] for par2 in result_lsq.params[par1].correl.keys()] assert_allclose(cor, cor_lsq, rtol=1e-2) def test_least_squares_solver_options(peakdata, capsys): """Test least_squares algorithm, pass options to solver.""" x = peakdata[0] y = peakdata[1] mod = VoigtModel() params = mod.guess(y, x=x) solver_kws = {'verbose': 2} mod.fit(y, params, x=x, method='least_squares', fit_kws=solver_kws) captured = capsys.readouterr() assert 'Iteration' in captured.out assert 'final cost' in captured.out def test_least_squares_jacobian_types(): """Test support for Jacobian of all types supported by least_squares.""" # Build function # f(x, y) = (x - a)^2 + (y - b)^2 np.random.seed(42) a = np.random.normal(0, 1, 50) np.random.seed(43) b = np.random.normal(0, 1, 50) def f(params): return (params['x'] - a)**2 + (params['y'] - b)**2 # Build analytic Jacobian functions with the different possible return types # numpy.ndarray, scipy.sparse.spmatrix, scipy.sparse.linalg.LinearOperator # J = [ 2x - 2a , 2y - 2b ] def jac_array(params, *args, **kwargs): return np.column_stack((2 * params[0] - 2 * a, 2 * params[1] - 2 * b)) def jac_sparse(params, *args, **kwargs): return bsr_matrix(jac_array(params, *args, **kwargs)) def jac_operator(params, *args, **kwargs): return aslinearoperator(jac_array(params, *args, **kwargs)) # Build parameters params = lmfit.Parameters() params.add('x', value=0) params.add('y', value=0) # Solve model for numerical Jacobian and each analytic Jacobian function result = lmfit.minimize(f, params, method='least_squares') result_array = lmfit.minimize( f, params, method='least_squares', jac=jac_array) result_sparse = lmfit.minimize( f, params, method='least_squares', jac=jac_sparse) result_operator = lmfit.minimize( f, params, method='least_squares', jac=jac_operator) # Check that all have uncertainties assert result.errorbars assert result_array.errorbars assert result_sparse.errorbars assert result_operator.errorbars # Check that all have ~equal covariance matrix assert_allclose(result.covar, result_array.covar) assert_allclose(result.covar, result_sparse.covar) assert_allclose(result.covar, result_operator.covar) lmfit-py-1.0.0/tests/test_lineshapes_models.py000066400000000000000000000244541357751001700215410ustar00rootroot00000000000000"""Tests for lineshape functions and built-in models.""" import inspect import sys import numpy as np from numpy.testing import assert_allclose import pytest from scipy.optimize import fsolve from lmfit import lineshapes, models if sys.version_info[0] == 2: inspect_args = inspect.getargspec elif sys.version_info[0] == 3: inspect_args = inspect.getfullargspec def check_height_fwhm(x, y, lineshape, model): """Check height and fwhm parameters.""" pars = model.guess(y, x=x) out = model.fit(y, pars, x=x) # account for functions whose centers are not mu mu = out.params['center'].value if lineshape is lineshapes.lognormal: cen = np.exp(mu - out.params['sigma']**2) else: cen = mu # get arguments for lineshape args = {key: out.best_values[key] for key in inspect_args(lineshape)[0] if key != 'x'} # output format for assertion errors fmt = ("Program calculated values and real values do not match!\n" "{:^20s}{:^20s}{:^20s}{:^20s}\n" "{:^20s}{:^20f}{:^20f}{:^20f}") if 'height' in out.params: height_pro = out.params['height'].value height_act = lineshape(cen, **args) diff = height_act - height_pro assert abs(diff) < 0.001, fmt.format(model._name, 'Actual', 'program', 'Difference', 'Height', height_act, height_pro, diff) if 'fwhm' in out.params: fwhm_pro = out.params['fwhm'].value func = lambda x: lineshape(x, **args) - 0.5*height_act ret = fsolve(func, [cen - fwhm_pro/4, cen + fwhm_pro/2]) fwhm_act = ret[1] - ret[0] diff = fwhm_act - fwhm_pro assert abs(diff) < 0.5, fmt.format(model._name, 'Actual', 'program', 'Difference', 'FWHM', fwhm_act, fwhm_pro, diff) def test_height_fwhm_calculation(peakdata): """Test for correctness of height and FWHM calculation.""" # mu = 0 # variance = 1.0 # sigma = np.sqrt(variance) # x = np.linspace(mu - 20*sigma, mu + 20*sigma, 100.0) # y = norm.pdf(x, mu, 1) x = peakdata[0] y = peakdata[1] check_height_fwhm(x, y, lineshapes.voigt, models.VoigtModel()) check_height_fwhm(x, y, lineshapes.pvoigt, models.PseudoVoigtModel()) check_height_fwhm(x, y, lineshapes.pearson7, models.Pearson7Model()) check_height_fwhm(x, y, lineshapes.moffat, models.MoffatModel()) check_height_fwhm(x, y, lineshapes.students_t, models.StudentsTModel()) check_height_fwhm(x, y, lineshapes.breit_wigner, models.BreitWignerModel()) check_height_fwhm(x, y, lineshapes.damped_oscillator, models.DampedOscillatorModel()) check_height_fwhm(x, y, lineshapes.dho, models.DampedHarmonicOscillatorModel()) check_height_fwhm(x, y, lineshapes.expgaussian, models.ExponentialGaussianModel()) check_height_fwhm(x, y, lineshapes.skewed_gaussian, models.SkewedGaussianModel()) check_height_fwhm(x, y, lineshapes.donaich, models.DonaichModel()) x = x-9 # Lognormal will only fit peaks with centers < 1 check_height_fwhm(x, y, lineshapes.lognormal, models.LognormalModel()) @pytest.mark.parametrize("lineshape", lineshapes.functions) def test_finite_output_lineshape(lineshape): """Test for finite output of lineshape functions.""" x = np.linspace(0, 100) # no need to test the lineshapes below if lineshape in ['linear', 'exponential', 'sine', 'expsine', 'powerlaw', 'parabolic', 'erf', 'erfc', 'wofz', 'gamma', 'gammaln']: return None elif lineshape in ['gaussian', 'lorentzian', 'damped_oscillator', 'logistic', 'lognormal', 'students_t']: func_args = (x, 1.0, x.size/2.0, 0.0) elif lineshape in ['split_lorentzian', 'voigt', 'pvoigt', 'dho', 'expgaussian', 'donaich', 'skewed_gaussian']: func_args = (x, 1.0, x.size/2.0, 0.0, 0.0) elif lineshape in ['moffat', 'pearson7', 'breit_wigner']: func_args = (x, 1.0, x.size/2.0, 0.0, 1.0) elif lineshape in ['skewed_voigt']: func_args = (x, 1.0, x.size/2.0, 0.0, 0.0, 0.0) elif lineshape == 'step': func_args = (x, 1.0, x.size/2.0, 0.0, 'linear') elif lineshape == 'rectangle': func_args = (x, 1.0, x.size/2.0, 0.0, x.size/2.0, 0.0, 'linear') ls = getattr(lineshapes, lineshape) out = ls(*func_args) assert np.all(np.isfinite(out)) def test_height_and_fwhm_expression_evalution_in_builtin_models(): """Assert models do not throw an ZeroDivisionError.""" mod = models.GaussianModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9) params.update_constraints() mod = models.LorentzianModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9) params.update_constraints() mod = models.SplitLorentzianModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.0) params.update_constraints() mod = models.VoigtModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=1.0) params.update_constraints() mod = models.PseudoVoigtModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, fraction=0.5) params.update_constraints() mod = models.MoffatModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, beta=0.0) params.update_constraints() mod = models.Pearson7Model() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, expon=1.0) params.update_constraints() mod = models.StudentsTModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9) params.update_constraints() mod = models.BreitWignerModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, q=0.0) params.update_constraints() mod = models.LognormalModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9) params.update_constraints() mod = models.DampedOscillatorModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9) params.update_constraints() mod = models.DampedHarmonicOscillatorModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0) params.update_constraints() mod = models.ExponentialGaussianModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0) params.update_constraints() mod = models.SkewedGaussianModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0) params.update_constraints() mod = models.SkewedVoigtModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0, skew=0.0) params.update_constraints() mod = models.DonaichModel() params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0) params.update_constraints() mod = models.StepModel() for f in ('linear', 'arctan', 'erf', 'logistic'): params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, form=f) params.update_constraints() mod = models.RectangleModel() for f in ('linear', 'arctan', 'erf', 'logistic'): params = mod.make_params(amplitude=1.0, center1=0.0, sigma1=0.0, center2=0.0, sigma2=0.0, form=f) params.update_constraints() def test_guess_modelparams(): x = np.linspace(-10, 10, 501) mod = models.ConstantModel() y = 6.0 + x*0.005 pars = mod.guess(y) assert_allclose(pars['c'].value, 6.0, rtol=0.01) mod = models.ComplexConstantModel(prefix='f_') y = 6.0 + x*0.005 + (4.0 - 0.02*x)*1j pars = mod.guess(y) assert_allclose(pars['f_re'].value, 6.0, rtol=0.01) assert_allclose(pars['f_im'].value, 4.0, rtol=0.01) mod = models.QuadraticModel(prefix='g_') y = -0.2 + 3.0*x + 0.005*x**2 pars = mod.guess(y, x=x) assert_allclose(pars['g_a'].value, 0.005, rtol=0.01) assert_allclose(pars['g_b'].value, 3.0, rtol=0.01) assert_allclose(pars['g_c'].value, -0.2, rtol=0.01) mod = models.PolynomialModel(4, prefix='g_') y = -0.2 + 3.0*x + 0.005*x**2 - 3.3e-6*x**3 + 1.e-9*x**4 pars = mod.guess(y, x=x) assert_allclose(pars['g_c0'].value, -0.2, rtol=0.01) assert_allclose(pars['g_c1'].value, 3.0, rtol=0.01) assert_allclose(pars['g_c2'].value, 0.005, rtol=0.1) assert_allclose(pars['g_c3'].value, -3.3e-6, rtol=0.1) assert_allclose(pars['g_c4'].value, 1.e-9, rtol=0.1) mod = models.GaussianModel(prefix='g_') y = lineshapes.gaussian(x, amplitude=2.2, center=0.25, sigma=1.3) y += np.random.normal(size=len(x), scale=0.004) pars = mod.guess(y, x=x) assert_allclose(pars['g_amplitude'].value, 3, rtol=2) assert_allclose(pars['g_center'].value, 0.25, rtol=1) assert_allclose(pars['g_sigma'].value, 1.3, rtol=1) mod = models.LorentzianModel(prefix='l_') pars = mod.guess(y, x=x) assert_allclose(pars['l_amplitude'].value, 3, rtol=2) assert_allclose(pars['l_center'].value, 0.25, rtol=1) assert_allclose(pars['l_sigma'].value, 1.3, rtol=1) mod = models.SplitLorentzianModel(prefix='s_') pars = mod.guess(y, x=x) assert_allclose(pars['s_amplitude'].value, 3, rtol=2) assert_allclose(pars['s_center'].value, 0.25, rtol=1) assert_allclose(pars['s_sigma'].value, 1.3, rtol=1) assert_allclose(pars['s_sigma_r'].value, 1.3, rtol=1) mod = models.VoigtModel(prefix='l_') pars = mod.guess(y, x=x) assert_allclose(pars['l_amplitude'].value, 3, rtol=2) assert_allclose(pars['l_center'].value, 0.25, rtol=1) assert_allclose(pars['l_sigma'].value, 1.3, rtol=1) mod = models.SkewedVoigtModel(prefix='l_') pars = mod.guess(y, x=x) assert_allclose(pars['l_amplitude'].value, 3, rtol=2) assert_allclose(pars['l_center'].value, 0.25, rtol=1) assert_allclose(pars['l_sigma'].value, 1.3, rtol=1) def test_splitlorentzian_prefix(): mod1 = models.SplitLorentzianModel() par1 = mod1.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.3) par1.update_constraints() mod2 = models.SplitLorentzianModel(prefix='prefix_') par2 = mod2.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.3) par2.update_constraints() lmfit-py-1.0.0/tests/test_manypeaks_speed.py000066400000000000000000000013441357751001700212040ustar00rootroot00000000000000# # test speed of building complex model # from copy import deepcopy import sys import time import numpy as np from lmfit import Model from lmfit.lineshapes import gaussian sys.setrecursionlimit(2000) def test_manypeaks_speed(): model = None t0 = time.time() for i in np.arange(500): g = Model(gaussian, prefix='g%i_' % i) if model is None: model = g else: model += g t1 = time.time() pars = model.make_params() t2 = time.time() cpars = deepcopy(pars) # noqa: F841 t3 = time.time() # these are very conservative tests that # should be satisfied on nearly any machine assert((t3-t2) < 0.5) assert((t2-t1) < 0.5) assert((t1-t0) < 5.0) lmfit-py-1.0.0/tests/test_minimizer.py000066400000000000000000000010411357751001700200310ustar00rootroot00000000000000from lmfit import Minimizer, Parameters def test_scalar_minimize_neg_value(): x0 = 3.14 fmin = -1.1 xtol = 0.001 ftol = 2.0 * xtol def objective(pars): return (pars['x'] - x0) ** 2.0 + fmin params = Parameters() params.add('x', value=2*x0) minr = Minimizer(objective, params) result = minr.scalar_minimize(method='Nelder-Mead', options={'xatol': xtol, 'fatol': ftol}) assert abs(result.params['x'].value - x0) < xtol assert abs(result.fun - fmin) < ftol lmfit-py-1.0.0/tests/test_model.py000066400000000000000000000742321357751001700171420ustar00rootroot00000000000000import functools import sys import unittest import warnings import numpy as np from numpy.testing import assert_allclose import pytest from lmfit import Model, models from lmfit.lineshapes import gaussian from lmfit.models import PseudoVoigtModel def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03, err_msg='', verbose=True): for param_name, value in desired.items(): assert_allclose(actual[param_name], value, rtol, atol, err_msg, verbose) def _skip_if_no_pandas(): try: import pandas # noqa: F401 except ImportError: raise pytest.skip("Skipping tests that require pandas.") def firstarg_ndarray(func): """a simple wrapper used for testing that wrapped functions can be model functions""" @functools.wraps(func) def wrapper(x, *args, **kws): x = np.asarray(x) return func(x, *args, **kws) return wrapper @firstarg_ndarray def linear_func(x, a, b): "test wrapped model function" return a*x+b class CommonTests: # to be subclassed for testing predefined models def setUp(self): np.random.seed(1) self.noise = 0.0001*np.random.randn(*self.x.shape) # Some Models need args (e.g., polynomial order), and others don't. try: args = self.args except AttributeError: self.model = self.model_constructor() self.model_omit = self.model_constructor(nan_policy='omit') self.model_raise = self.model_constructor(nan_policy='raise') self.model_explicit_var = self.model_constructor(['x']) func = self.model.func else: self.model = self.model_constructor(*args) self.model_omit = self.model_constructor(*args, nan_policy='omit') self.model_raise = self.model_constructor(*args, nan_policy='raise') self.model_explicit_var = self.model_constructor( *args, independent_vars=['x']) func = self.model.func self.data = func(x=self.x, **self.true_values()) + self.noise @property def x(self): return np.linspace(1, 10, num=1000) def test_fit(self): model = self.model # Pass Parameters object. params = model.make_params(**self.guess()) result = model.fit(self.data, params, x=self.x) assert_results_close(result.values, self.true_values()) # Pass inidividual Parameter objects as kwargs. kwargs = {name: p for name, p in params.items()} result = self.model.fit(self.data, x=self.x, **kwargs) assert_results_close(result.values, self.true_values()) # Pass guess values (not Parameter objects) as kwargs. kwargs = {name: p.value for name, p in params.items()} result = self.model.fit(self.data, x=self.x, **kwargs) assert_results_close(result.values, self.true_values()) def test_explicit_independent_vars(self): self.check_skip_independent_vars() model = self.model_explicit_var pars = model.make_params(**self.guess()) result = model.fit(self.data, pars, x=self.x) assert_results_close(result.values, self.true_values()) def test_fit_with_weights(self): model = self.model # fit without weights params = model.make_params(**self.guess()) out1 = model.fit(self.data, params, x=self.x) # fit with weights weights = 1.0/(0.5 + self.x**2) out2 = model.fit(self.data, params, weights=weights, x=self.x) max_diff = 0.0 for parname, val1 in out1.values.items(): val2 = out2.values[parname] if max_diff < abs(val1-val2): max_diff = abs(val1-val2) assert max_diff > 1.e-8 def test_result_attributes(self): pars = self.model.make_params(**self.guess()) result = self.model.fit(self.data, pars, x=self.x) # result.init_values assert_results_close(result.values, self.true_values()) self.assertEqual(result.init_values, self.guess()) # result.init_params params = self.model.make_params() for param_name, value in self.guess().items(): params[param_name].value = value self.assertEqual(result.init_params, params) # result.best_fit assert_allclose(result.best_fit, self.data, atol=self.noise.max()) # result.init_fit init_fit = self.model.func(x=self.x, **self.guess()) assert_allclose(result.init_fit, init_fit) # result.model self.assertTrue(result.model is self.model) def test_result_eval(self): # Check eval() output against init_fit and best_fit. pars = self.model.make_params(**self.guess()) result = self.model.fit(self.data, pars, x=self.x) assert_allclose(result.eval(x=self.x, **result.values), result.best_fit) assert_allclose(result.eval(x=self.x, **result.init_values), result.init_fit) def test_result_eval_custom_x(self): self.check_skip_independent_vars() pars = self.model.make_params(**self.guess()) result = self.model.fit(self.data, pars, x=self.x) # Check that the independent variable is respected. short_eval = result.eval(x=np.array([0, 1, 2]), **result.values) if hasattr(short_eval, '__len__'): self.assertEqual(len(short_eval), 3) def test_result_report(self): pars = self.model.make_params(**self.guess()) result = self.model.fit(self.data, pars, x=self.x) report = result.fit_report() assert "[[Model]]" in report assert "[[Variables]]" in report assert "[[Fit Statistics]]" in report assert " # function evals =" in report assert " Akaike " in report assert " chi-square " in report def test_data_alignment(self): _skip_if_no_pandas() from pandas import Series # Align data and indep var of different lengths using pandas index. data = Series(self.data.copy()).iloc[10:-10] x = Series(self.x.copy()) model = self.model params = model.make_params(**self.guess()) result = model.fit(data, params, x=x) result = model.fit(data, params, x=x) assert_results_close(result.values, self.true_values()) # Skip over missing (NaN) values, aligning via pandas index. data.iloc[500:510] = np.nan result = self.model_omit.fit(data, params, x=x) assert_results_close(result.values, self.true_values()) # Raise if any NaN values are present. raises = lambda: self.model_raise.fit(data, params, x=x) self.assertRaises(ValueError, raises) def check_skip_independent_vars(self): # to be overridden for models that do not accept indep vars pass def test_aic(self): model = self.model # Pass Parameters object. params = model.make_params(**self.guess()) result = model.fit(self.data, params, x=self.x) aic = result.aic self.assertTrue(aic < 0) # aic must be negative # Pass extra unused Parameter. params.add("unused_param", value=1.0, vary=True) result = model.fit(self.data, params, x=self.x) aic_extra = result.aic self.assertTrue(aic_extra < 0) # aic must be negative self.assertTrue(aic < aic_extra) # extra param should lower the aic def test_bic(self): model = self.model # Pass Parameters object. params = model.make_params(**self.guess()) result = model.fit(self.data, params, x=self.x) bic = result.bic self.assertTrue(bic < 0) # aic must be negative # Compare to AIC aic = result.aic self.assertTrue(aic < bic) # aic should be lower than bic # Pass extra unused Parameter. params.add("unused_param", value=1.0, vary=True) result = model.fit(self.data, params, x=self.x) bic_extra = result.bic self.assertTrue(bic_extra < 0) # bic must be negative self.assertTrue(bic < bic_extra) # extra param should lower the bic class TestUserDefiniedModel(CommonTests, unittest.TestCase): # mainly aimed at checking that the API does what it says it does # and raises the right exceptions or warnings when things are not right import six if six.PY2: from six import assertRaisesRegex def setUp(self): self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40) self.guess = lambda: dict(amplitude=5, center=2, sigma=4) # return a fresh copy self.model_constructor = ( lambda *args, **kwargs: Model(gaussian, *args, **kwargs)) super().setUp() @property def x(self): return np.linspace(-10, 10, num=1000) def test_lists_become_arrays(self): # smoke test self.model.fit([1, 2, 3], x=[1, 2, 3], **self.guess()) pytest.raises(ValueError, self.model.fit, [1, 2, None, 3], x=[1, 2, 3, 4], **self.guess()) def test_missing_param_raises_error(self): # using keyword argument parameters guess_missing_sigma = self.guess() del guess_missing_sigma['sigma'] # f = lambda: self.model.fit(self.data, x=self.x, **guess_missing_sigma) # self.assertRaises(ValueError, f) # using Parameters params = self.model.make_params() for param_name, value in guess_missing_sigma.items(): params[param_name].value = value self.model.fit(self.data, params, x=self.x) def test_extra_param_issues_warning(self): # The function accepts extra params, Model will warn but not raise. def flexible_func(x, amplitude, center, sigma, **kwargs): return gaussian(x, amplitude, center, sigma) flexible_model = Model(flexible_func) pars = flexible_model.make_params(**self.guess()) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") flexible_model.fit(self.data, pars, x=self.x, extra=5) self.assertTrue(len(w) == 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) def test_missing_independent_variable_raises_error(self): pars = self.model.make_params(**self.guess()) f = lambda: self.model.fit(self.data, pars) self.assertRaises(KeyError, f) def test_bounding(self): true_values = self.true_values() true_values['center'] = 1.3 # as close as it's allowed to get pars = self.model.make_params(**self.guess()) pars['center'].set(value=2, min=1.3) result = self.model.fit(self.data, pars, x=self.x) assert_results_close(result.values, true_values, rtol=0.05) def test_vary_false(self): true_values = self.true_values() true_values['center'] = 1.3 pars = self.model.make_params(**self.guess()) pars['center'].set(value=1.3, vary=False) result = self.model.fit(self.data, pars, x=self.x) assert_results_close(result.values, true_values, rtol=0.05) # testing model addition... def test_user_defined_gaussian_plus_constant(self): data = self.data + 5.0 model = self.model + models.ConstantModel() guess = self.guess() pars = model.make_params(c=10.1, **guess) true_values = self.true_values() true_values['c'] = 5.0 result = model.fit(data, pars, x=self.x) assert_results_close(result.values, true_values, rtol=0.01, atol=0.01) def test_model_with_prefix(self): # model with prefix of 'a' and 'b' mod = models.GaussianModel(prefix='a') vals = {'center': 2.45, 'sigma': 0.8, 'amplitude': 3.15} data = gaussian(x=self.x, **vals) + self.noise/3.0 pars = mod.guess(data, x=self.x) self.assertTrue('aamplitude' in pars) self.assertTrue('asigma' in pars) out = mod.fit(data, pars, x=self.x) self.assertTrue(out.params['aamplitude'].value > 2.0) self.assertTrue(out.params['acenter'].value > 2.0) self.assertTrue(out.params['acenter'].value < 3.0) mod = models.GaussianModel(prefix='b') data = gaussian(x=self.x, **vals) + self.noise/3.0 pars = mod.guess(data, x=self.x) self.assertTrue('bamplitude' in pars) self.assertTrue('bsigma' in pars) def test_change_prefix(self): "should pass!" mod = models.GaussianModel(prefix='b') set_prefix_failed = None try: mod.prefix = 'c' set_prefix_failed = False except AttributeError: set_prefix_failed = True except: # noqa: E722 set_prefix_failed = None self.assertFalse(set_prefix_failed) new_expr = mod.param_hints['fwhm']['expr'] self.assertTrue('csigma' in new_expr) self.assertFalse('bsigma' in new_expr) def test_model_name(self): # test setting the name for built-in models mod = models.GaussianModel(name='user_name') self.assertEqual(mod.name, "Model(user_name)") def test_sum_of_two_gaussians(self): # two user-defined gaussians model1 = self.model f2 = lambda x, amp, cen, sig: gaussian(x, amplitude=amp, center=cen, sigma=sig) model2 = Model(f2) values1 = self.true_values() values2 = {'cen': 2.45, 'sig': 0.8, 'amp': 3.15} data = (gaussian(x=self.x, **values1) + f2(x=self.x, **values2) + self.noise/3.0) model = self.model + model2 pars = model.make_params() pars['sigma'].set(value=2, min=0) pars['center'].set(value=1, min=0.2, max=1.8) pars['amplitude'].set(value=3, min=0) pars['sig'].set(value=1, min=0) pars['cen'].set(value=2.4, min=2, max=3.5) pars['amp'].set(value=1, min=0) true_values = dict(list(values1.items()) + list(values2.items())) result = model.fit(data, pars, x=self.x) assert_results_close(result.values, true_values, rtol=0.01, atol=0.01) # user-defined models with common parameter names # cannot be added, and should raise f = lambda: model1 + model1 self.assertRaises(NameError, f) # two predefined_gaussians, using suffix to differentiate model1 = models.GaussianModel(prefix='g1_') model2 = models.GaussianModel(prefix='g2_') model = model1 + model2 true_values = {'g1_center': values1['center'], 'g1_amplitude': values1['amplitude'], 'g1_sigma': values1['sigma'], 'g2_center': values2['cen'], 'g2_amplitude': values2['amp'], 'g2_sigma': values2['sig']} pars = model.make_params() pars['g1_sigma'].set(2) pars['g1_center'].set(1) pars['g1_amplitude'].set(3) pars['g2_sigma'].set(1) pars['g2_center'].set(2.4) pars['g2_amplitude'].set(1) result = model.fit(data, pars, x=self.x) assert_results_close(result.values, true_values, rtol=0.01, atol=0.01) # without suffix, the names collide and Model should raise model1 = models.GaussianModel() model2 = models.GaussianModel() f = lambda: model1 + model2 self.assertRaises(NameError, f) def test_sum_composite_models(self): # test components of composite model created adding composite model model1 = models.GaussianModel(prefix='g1_') model2 = models.GaussianModel(prefix='g2_') model3 = models.GaussianModel(prefix='g3_') model4 = models.GaussianModel(prefix='g4_') model_total1 = (model1 + model2) + model3 for mod in [model1, model2, model3]: self.assertTrue(mod in model_total1.components) model_total2 = model1 + (model2 + model3) for mod in [model1, model2, model3]: self.assertTrue(mod in model_total2.components) model_total3 = (model1 + model2) + (model3 + model4) for mod in [model1, model2, model3, model4]: self.assertTrue(mod in model_total3.components) def test_eval_components(self): model1 = models.GaussianModel(prefix='g1_') model2 = models.GaussianModel(prefix='g2_') model3 = models.ConstantModel(prefix='bkg_') mod = model1 + model2 + model3 pars = mod.make_params() values1 = dict(amplitude=7.10, center=1.1, sigma=2.40) values2 = dict(amplitude=12.2, center=2.5, sigma=0.5) data = (1.01 + gaussian(x=self.x, **values1) + gaussian(x=self.x, **values2) + 0.05*self.noise) pars['g1_sigma'].set(2) pars['g1_center'].set(1, max=1.5) pars['g1_amplitude'].set(3) pars['g2_sigma'].set(1) pars['g2_center'].set(2.6, min=2.0) pars['g2_amplitude'].set(1) pars['bkg_c'].set(1.88) result = mod.fit(data, params=pars, x=self.x) self.assertTrue(abs(result.params['g1_amplitude'].value - 7.1) < 1.5) self.assertTrue(abs(result.params['g2_amplitude'].value - 12.2) < 1.5) self.assertTrue(abs(result.params['g1_center'].value - 1.1) < 0.2) self.assertTrue(abs(result.params['g2_center'].value - 2.5) < 0.2) self.assertTrue(abs(result.params['bkg_c'].value - 1.0) < 0.25) comps = mod.eval_components(x=self.x) assert 'bkg_' in comps def test_composite_has_bestvalues(self): # test that a composite model has non-empty best_values model1 = models.GaussianModel(prefix='g1_') model2 = models.GaussianModel(prefix='g2_') mod = model1 + model2 pars = mod.make_params() values1 = dict(amplitude=7.10, center=1.1, sigma=2.40) values2 = dict(amplitude=12.2, center=2.5, sigma=0.5) data = (gaussian(x=self.x, **values1) + gaussian(x=self.x, **values2) + 0.1*self.noise) pars['g1_sigma'].set(value=2) pars['g1_center'].set(value=1, max=1.5) pars['g1_amplitude'].set(value=3) pars['g2_sigma'].set(value=1) pars['g2_center'].set(value=2.6, min=2.0) pars['g2_amplitude'].set(value=1) result = mod.fit(data, params=pars, x=self.x) self.assertTrue(len(result.best_values) == 6) self.assertTrue(abs(result.params['g1_amplitude'].value - 7.1) < 0.5) self.assertTrue(abs(result.params['g2_amplitude'].value - 12.2) < 0.5) self.assertTrue(abs(result.params['g1_center'].value - 1.1) < 0.2) self.assertTrue(abs(result.params['g2_center'].value - 2.5) < 0.2) for name, par in pars.items(): assert len(repr(par)) > 5 def test_composite_plotting(self): # test that a composite model has non-empty best_values pytest.importorskip("matplotlib") import matplotlib matplotlib.use('Agg') model1 = models.GaussianModel(prefix='g1_') model2 = models.GaussianModel(prefix='g2_') mod = model1 + model2 pars = mod.make_params() values1 = dict(amplitude=7.10, center=1.1, sigma=2.40) values2 = dict(amplitude=12.2, center=2.5, sigma=0.5) data = (gaussian(x=self.x, **values1) + gaussian(x=self.x, **values2) + 0.1*self.noise) pars['g1_sigma'].set(2) pars['g1_center'].set(1, max=1.5) pars['g1_amplitude'].set(3) pars['g2_sigma'].set(1) pars['g2_center'].set(2.6, min=2.0) pars['g2_amplitude'].set(1) result = mod.fit(data, params=pars, x=self.x) fig, ax = result.plot(show_init=True) assert isinstance(fig, matplotlib.figure.Figure) assert isinstance(ax, matplotlib.axes.GridSpec) comps = result.eval_components(x=self.x) assert len(comps) == 2 assert 'g1_' in comps def test_hints_in_composite_models(self): # test propagation of hints from base models to composite model def func(x, amplitude): pass m1 = Model(func, prefix='p1_') m2 = Model(func, prefix='p2_') m1.set_param_hint('amplitude', value=1) m2.set_param_hint('amplitude', value=2) mx = (m1 + m2) params = mx.make_params() param_values = {name: p.value for name, p in params.items()} self.assertEqual(param_values['p1_amplitude'], 1) self.assertEqual(param_values['p2_amplitude'], 2) def test_hints_for_peakmodels(self): # test that height/fwhm do not cause asteval errors. x = np.linspace(-10, 10, 101) y = np.sin(x / 3) + x/100. m1 = models.LinearModel(prefix='m1_') params = m1.guess(y, x=x) m2 = models.GaussianModel(prefix='m2_') params.update(m2.make_params()) m = m1 + m2 # noqa: F841 param_values = {name: p.value for name, p in params.items()} self.assertTrue(param_values['m1_intercept'] < -0.0) self.assertEqual(param_values['m2_amplitude'], 1) def test_weird_param_hints(self): # tests Github Issue 312, a very weird way to access param_hints def func(x, amp): return amp*x m = Model(func) models = {} for i in range(2): m.set_param_hint('amp', value=1) m.set_param_hint('amp', value=25) models[i] = Model(func, prefix='mod%i_' % i) models[i].param_hints['amp'] = m.param_hints['amp'] self.assertEqual(models[0].param_hints['amp'], models[1].param_hints['amp']) def test_param_hint_explicit_value(self): # tests Github Issue 384 pmod = PseudoVoigtModel() params = pmod.make_params(sigma=2, fraction=0.77) assert_allclose(params['fraction'].value, 0.77, rtol=0.01) def test_composite_model_with_expr_constrains(self): """Smoke test for composite model fitting with expr constraints.""" y = [0, 0, 4, 2, 1, 8, 21, 21, 23, 35, 50, 54, 46, 70, 77, 87, 98, 113, 148, 136, 185, 195, 194, 168, 170, 139, 155, 115, 132, 109, 102, 85, 69, 81, 82, 80, 71, 64, 79, 88, 111, 97, 97, 73, 72, 62, 41, 30, 13, 3, 9, 7, 0, 0, 0] x = np.arange(-0.2, 1.2, 0.025)[:-1] + 0.5*0.025 def gauss(x, sigma, mu, A): return A*np.exp(-(x-mu)**2/(2*sigma**2)) # Initial values p1_mu = 0.2 p1_sigma = 0.1 p2_sigma = 0.1 peak1 = Model(gauss, prefix='p1_') peak2 = Model(gauss, prefix='p2_') model = peak1 + peak2 model.set_param_hint('p1_mu', value=p1_mu, min=-1, max=2) model.set_param_hint('p1_sigma', value=p1_sigma, min=0.01, max=0.2) model.set_param_hint('p2_sigma', value=p2_sigma, min=0.01, max=0.2) model.set_param_hint('p1_A', value=100, min=0.01) model.set_param_hint('p2_A', value=50, min=0.01) # Constrains the distance between peaks to be > 0 model.set_param_hint('pos_delta', value=0.3, min=0) model.set_param_hint('p2_mu', min=-1, expr='p1_mu + pos_delta') # Test fitting result = model.fit(y, x=x) self.assertTrue(result.params['pos_delta'].value > 0) def test_model_nan_policy(self): """Tests for nan_policy with NaN values in the input data.""" x = np.linspace(0, 10, 201) np.random.seed(0) y = gaussian(x, 10.0, 6.15, 0.8) y += gaussian(x, 8.0, 6.35, 1.1) y += gaussian(x, 0.25, 6.00, 7.5) y += np.random.normal(size=len(x), scale=0.5) # with NaN values in the input data y[55] = y[91] = np.nan mod = PseudoVoigtModel() params = mod.make_params(amplitude=20, center=5.5, sigma=1, fraction=0.25) params['fraction'].vary = False # with raise, should get a ValueError result = lambda: mod.fit(y, params, x=x, nan_policy='raise') msg = ('NaN values detected in your input data or the output of your ' 'objective/model function - fitting algorithms cannot handle this!') self.assertRaisesRegex(ValueError, msg, result) # with propagate, should get no error, but bad results result = mod.fit(y, params, x=x, nan_policy='propagate') self.assertTrue(result.success) self.assertTrue(np.isnan(result.chisqr)) self.assertTrue(np.isnan(result.aic)) self.assertFalse(result.errorbars) self.assertTrue(result.params['amplitude'].stderr is None) self.assertTrue(abs(result.params['amplitude'].value - 20.0) < 0.001) # with omit, should get good results result = mod.fit(y, params, x=x, nan_policy='omit') self.assertTrue(result.success) self.assertTrue(result.chisqr > 2.0) self.assertTrue(result.aic < -100) self.assertTrue(result.errorbars) self.assertTrue(result.params['amplitude'].stderr > 0.1) self.assertTrue(abs(result.params['amplitude'].value - 20.0) < 5.0) self.assertTrue(abs(result.params['center'].value - 6.0) < 0.5) # with 'wrong_argument', should get a ValueError err_msg = r"nan_policy must be 'propagate', 'omit', or 'raise'." with pytest.raises(ValueError, match=err_msg): mod.fit(y, params, x=x, nan_policy='wrong_argument') def test_model_nan_policy_NaNs_by_model(self): """Test for nan_policy with NaN values generated by the model function.""" def double_exp(x, a1, t1, a2, t2): return a1*np.exp(-x/t1) + a2*np.exp(-(x-0.1) / t2) model = Model(double_exp) truths = (3.0, 2.0, -5.0, 10.0) x = np.linspace(1, 10, 250) np.random.seed(0) y = double_exp(x, *truths) + 0.1*np.random.randn(x.size) p = model.make_params(a1=4, t1=3, a2=4, t2=3) result = lambda: model.fit(data=y, params=p, x=x, method='Nelder', nan_policy='raise') msg = 'The model function generated NaN values and the fit aborted!' self.assertRaisesRegex(ValueError, msg, result) @pytest.mark.skipif(sys.version_info.major == 2, reason="cannot use wrapped functions with Python 2") def test_wrapped_model_func(self): x = np.linspace(-1, 1, 51) y = 2.0*x + 3 + 0.0003 * x*x y += np.random.normal(size=len(x), scale=0.025) mod = Model(linear_func) pars = mod.make_params(a=1.5, b=2.5) tmp = mod.eval(pars, x=x) self.assertTrue(tmp.max() > 3) self.assertTrue(tmp.min() > -20) result = mod.fit(y, pars, x=x) self.assertTrue(result.chisqr < 0.05) self.assertTrue(result.aic < -350) self.assertTrue(result.errorbars) self.assertTrue(abs(result.params['a'].value - 2.0) < 0.05) self.assertTrue(abs(result.params['b'].value - 3.0) < 0.41) class TestLinear(CommonTests, unittest.TestCase): def setUp(self): self.true_values = lambda: dict(slope=5, intercept=2) self.guess = lambda: dict(slope=10, intercept=6) self.model_constructor = models.LinearModel super().setUp() class TestParabolic(CommonTests, unittest.TestCase): def setUp(self): self.true_values = lambda: dict(a=5, b=2, c=8) self.guess = lambda: dict(a=1, b=6, c=3) self.model_constructor = models.ParabolicModel super().setUp() class TestPolynomialOrder2(CommonTests, unittest.TestCase): # class Polynomial constructed with order=2 def setUp(self): self.true_values = lambda: dict(c2=5, c1=2, c0=8) self.guess = lambda: dict(c1=1, c2=6, c0=3) self.model_constructor = models.PolynomialModel self.args = (2,) super().setUp() class TestPolynomialOrder3(CommonTests, unittest.TestCase): # class Polynomial constructed with order=3 def setUp(self): self.true_values = lambda: dict(c3=2, c2=5, c1=2, c0=8) self.guess = lambda: dict(c3=1, c1=1, c2=6, c0=3) self.model_constructor = models.PolynomialModel self.args = (3,) super().setUp() class TestConstant(CommonTests, unittest.TestCase): def setUp(self): self.true_values = lambda: dict(c=5) self.guess = lambda: dict(c=2) self.model_constructor = models.ConstantModel super().setUp() def check_skip_independent_vars(self): raise pytest.skip("ConstantModel has not independent_vars.") class TestPowerlaw(CommonTests, unittest.TestCase): def setUp(self): self.true_values = lambda: dict(amplitude=5, exponent=3) self.guess = lambda: dict(amplitude=2, exponent=8) self.model_constructor = models.PowerLawModel super().setUp() class TestExponential(CommonTests, unittest.TestCase): def setUp(self): self.true_values = lambda: dict(amplitude=5, decay=3) self.guess = lambda: dict(amplitude=2, decay=8) self.model_constructor = models.ExponentialModel super().setUp() class TestComplexConstant(CommonTests, unittest.TestCase): def setUp(self): self.true_values = lambda: dict(re=5, im=5) self.guess = lambda: dict(re=2, im=2) self.model_constructor = models.ComplexConstantModel super().setUp() class TestExpression(CommonTests, unittest.TestCase): def setUp(self): self.true_values = lambda: dict(off_c=0.25, amp_c=1.0, x0=2.0) self.guess = lambda: dict(off_c=0.20, amp_c=1.5, x0=2.5) self.expression = "off_c + amp_c * exp(-x/x0)" self.model_constructor = ( lambda *args, **kwargs: models.ExpressionModel(self.expression, *args, **kwargs)) super().setUp() def test_composite_with_expression(self): expression_model = models.ExpressionModel("exp(-x/x0)", name='exp') amp_model = models.ConstantModel(prefix='amp_') off_model = models.ConstantModel(prefix='off_', name="off") comp_model = off_model + amp_model * expression_model x = self.x true_values = self.true_values() data = comp_model.eval(x=x, **true_values) + self.noise # data = 0.25 + 1 * np.exp(-x / 2.) params = comp_model.make_params(**self.guess()) result = comp_model.fit(data, x=x, params=params) assert_results_close(result.values, true_values, rtol=0.01, atol=0.01) data_components = comp_model.eval_components(x=x) self.assertIn('exp', data_components) # lmfit-py-1.0.0/tests/test_model_uncertainties.py000066400000000000000000000060301357751001700220660ustar00rootroot00000000000000"""Tests of ModelResult.eval_uncertainty()""" import numpy as np from numpy.testing import assert_allclose from lmfit.lineshapes import gaussian from lmfit.models import GaussianModel, LinearModel def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5): # create data to be fitted np.random.seed(88) x = np.linspace(0, 10, 101) y = intercept + x*slope y = y + np.random.normal(size=len(x), scale=noise) model = LinearModel() params = model.make_params(intercept=intercept, slope=slope) return x, y, model, params def get_gaussianmodel(amplitude=1.0, center=5.0, sigma=1.0, noise=0.1): # create data to be fitted np.random.seed(7392) x = np.linspace(-20, 20, 201) y = gaussian(x, amplitude, center=center, sigma=sigma) y = y + np.random.normal(size=len(x), scale=noise) model = GaussianModel() params = model.make_params(amplitude=amplitude/5.0, center=center-1.0, sigma=sigma*2.0) return x, y, model, params def test_linear_constant_intercept(): x, y, model, params = get_linearmodel(slope=4, intercept=-10) params['intercept'].vary = False ret = model.fit(y, params, x=x) dely = ret.eval_uncertainty(sigma=1) slope_stderr = ret.params['slope'].stderr assert_allclose(dely.min(), 0, rtol=1.e-2) assert_allclose(dely.max(), slope_stderr*x.max(), rtol=1.e-2) assert_allclose(dely.mean(), slope_stderr*x.mean(), rtol=1.e-2) def test_linear_constant_slope(): x, y, model, params = get_linearmodel(slope=-4, intercept=2.3) params['slope'].vary = False ret = model.fit(y, params, x=x) dely = ret.eval_uncertainty(sigma=1) intercept_stderr = ret.params['intercept'].stderr assert_allclose(dely.min(), intercept_stderr, rtol=1.e-2) assert_allclose(dely.max(), intercept_stderr, rtol=1.e-2) def test_gauss_sigmalevel(): """Test that dely increases as sigma increases.""" x, y, model, params = get_gaussianmodel(amplitude=50.0, center=4.5, sigma=0.78, noise=0.1) ret = model.fit(y, params, x=x) dely_sigma1 = ret.eval_uncertainty(sigma=1) dely_sigma2 = ret.eval_uncertainty(sigma=2) dely_sigma3 = ret.eval_uncertainty(sigma=3) assert(dely_sigma3.mean() > 1.5*dely_sigma2.mean()) assert(dely_sigma2.mean() > 1.5*dely_sigma1.mean()) def test_gauss_noiselevel(): """Test that dely increases as expected with changing noise level.""" lonoise = 0.05 hinoise = 10*lonoise x, y, model, params = get_gaussianmodel(amplitude=20.0, center=2.1, sigma=1.0, noise=lonoise) ret1 = model.fit(y, params, x=x) dely_lonoise = ret1.eval_uncertainty(sigma=1) x, y, model, params = get_gaussianmodel(amplitude=20.0, center=2.1, sigma=1.0, noise=hinoise) ret2 = model.fit(y, params, x=x) dely_hinoise = ret2.eval_uncertainty(sigma=1) assert_allclose(dely_hinoise.mean(), 10*dely_lonoise.mean(), rtol=1.e-2) lmfit-py-1.0.0/tests/test_multidatasets.py000066400000000000000000000042501357751001700207160ustar00rootroot00000000000000"""Example fitting to multiple (simulated) data sets""" import numpy as np from lmfit import Parameters, minimize from lmfit.lineshapes import gaussian def gauss_dataset(params, i, x): """calc gaussian from params for data set i using simple, hardwired naming convention""" amp = params['amp_%i' % (i+1)] cen = params['cen_%i' % (i+1)] sig = params['sig_%i' % (i+1)] return gaussian(x, amp, cen, sig) def objective(params, x, data): """ calculate total residual for fits to several data sets held in a 2-D array, and modeled by Gaussian functions""" ndata, nx = data.shape resid = 0.0*data[:] # make residual per data set for i in range(ndata): resid[i, :] = data[i, :] - gauss_dataset(params, i, x) # now flatten this to a 1D array, as minimize() needs return resid.flatten() def test_multidatasets(): # create 5 datasets x = np.linspace(-1, 2, 151) data = [] for i in np.arange(5): amp = 2.60 + 1.50*np.random.rand() cen = -0.20 + 1.50*np.random.rand() sig = 0.25 + 0.03*np.random.rand() dat = gaussian(x, amp, cen, sig) + np.random.normal(size=len(x), scale=0.1) data.append(dat) # data has shape (5, 151) data = np.array(data) assert(data.shape) == (5, 151) # create 5 sets of parameters, one per data set pars = Parameters() for iy, y in enumerate(data): pars.add('amp_%i' % (iy+1), value=0.5, min=0.0, max=200) pars.add('cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0) pars.add('sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0) # but now constrain all values of sigma to have the same value # by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1 for iy in (2, 3, 4, 5): pars['sig_%i' % iy].expr = 'sig_1' # run the global fit to all the data sets out = minimize(objective, pars, args=(x, data)) assert(len(pars) == 15) assert(out.nvarys == 11) assert(out.nfev > 15) assert(out.chisqr > 1.0) assert(pars['amp_1'].value > 0.1) assert(pars['sig_1'].value > 0.1) assert(pars['sig_2'].value == pars['sig_1'].value) lmfit-py-1.0.0/tests/test_nose.py000066400000000000000000000532761357751001700170130ustar00rootroot00000000000000import unittest import numpy as np from numpy import pi from numpy.testing import (assert_, assert_allclose, assert_almost_equal, assert_equal, dec) import pytest from uncertainties import ufloat from lmfit import Minimizer, Parameters, minimize from lmfit.lineshapes import gaussian from lmfit.minimizer import (HAS_EMCEE, SCALAR_METHODS, MinimizerResult, _nan_policy) def check(para, real_val, sig=3): err = abs(para.value - real_val) assert(err < sig * para.stderr) def check_wo_stderr(para, real_val, sig=0.1): err = abs(para.value - real_val) assert(err < sig) def check_paras(para_fit, para_real, sig=3): for i in para_fit: check(para_fit[i], para_real[i].value, sig=sig) def test_simple(): # create data to be fitted np.random.seed(1) x = np.linspace(0, 15, 301) data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) + np.random.normal(size=len(x), scale=0.2)) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """model decaying sine wave, subtract data""" amp = params['amp'] shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0) params.add('decay', value=0.1) params.add('shift', value=0.0, min=-pi/2., max=pi/2) params.add('omega', value=3.0) # do fit, here with leastsq model result = minimize(fcn2min, params, args=(x, data)) # assert that the real parameters are found for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]): check(para, val) def test_lbfgsb(): p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) def residual(pars, x, data=None): amp = pars['amp'] per = pars['period'] shift = pars['shift'] decay = pars['decay'] if abs(shift) > pi/2: shift = shift - np.sign(shift) * pi model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay) if data is None: return model return (model - data) n = 2500 xmin = 0. xmax = 250.0 noise = np.random.normal(scale=0.7215, size=n) x = np.linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=11.0, min=5, max=20) fit_params.add('period', value=5., min=1., max=7) fit_params.add('shift', value=.10, min=0.0, max=0.2) fit_params.add('decay', value=6.e-3, min=0, max=0.1) out = minimize(residual, fit_params, method='lbfgsb', args=(x,), kws={'data': data}) for para, true_para in zip(out.params.values(), p_true.values()): check_wo_stderr(para, true_para.value) def test_derive(): def func(pars, x, data=None): model = pars['a'] * np.exp(-pars['b'] * x) + pars['c'] if data is None: return model return model - data def dfunc(pars, x, data=None): v = np.exp(-pars['b']*x) return np.array([v, -pars['a']*x*v, np.ones(len(x))]) def f(var, x): return var[0] * np.exp(-var[1] * x) + var[2] params1 = Parameters() params1.add('a', value=10) params1.add('b', value=10) params1.add('c', value=10) params2 = Parameters() params2.add('a', value=10) params2.add('b', value=10) params2.add('c', value=10) a, b, c = 2.5, 1.3, 0.8 x = np.linspace(0, 4, 50) y = f([a, b, c], x) data = y + 0.15*np.random.normal(size=len(x)) # fit without analytic derivative min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data': data}) out1 = min1.leastsq() # fit with analytic derivative min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data': data}) out2 = min2.leastsq(Dfun=dfunc, col_deriv=1) check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005) check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005) check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005) def test_peakfit(): def residual(pars, x, data=None): g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1']) g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2']) model = g1 + g2 if data is None: return model return (model - data) n = 601 xmin = 0. xmax = 15.0 noise = np.random.normal(scale=.65, size=n) x = np.linspace(xmin, xmax, n) org_params = Parameters() org_params.add_many(('a1', 12.0, True, None, None, None), ('c1', 5.3, True, None, None, None), ('w1', 1.0, True, None, None, None), ('a2', 9.1, True, None, None, None), ('c2', 8.1, True, None, None, None), ('w2', 2.5, True, None, None, None)) data = residual(org_params, x) + noise fit_params = Parameters() fit_params.add_many(('a1', 8.0, True, None, 14., None), ('c1', 5.0, True, None, None, None), ('w1', 0.7, True, None, None, None), ('a2', 3.1, True, None, None, None), ('c2', 8.8, True, None, None, None)) fit_params.add('w2', expr='2.5*w1') myfit = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data}) myfit.prepare_fit() out = myfit.leastsq() check_paras(out.params, org_params) def test_scalar_minimize_has_no_uncertainties(): # scalar_minimize doesn't calculate uncertainties. # when a scalar_minimize is run the stderr and correl for each parameter # should be None. (stderr and correl are set to None when a Parameter is # initialised). # This requires a reset after a leastsq fit has been done. # Only when scalar_minimize calculates stderr and correl can this test # be removed. np.random.seed(1) x = np.linspace(0, 15, 301) data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) + np.random.normal(size=len(x), scale=0.2)) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """model decaying sine wave, subtract data""" amp = params['amp'] shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0) params.add('decay', value=0.1) params.add('shift', value=0.0, min=-pi/2., max=pi/2) params.add('omega', value=3.0) mini = Minimizer(fcn2min, params, fcn_args=(x, data)) out = mini.minimize() assert_(np.isfinite(out.params['amp'].stderr)) assert out.errorbars out2 = mini.minimize(method='nelder-mead') assert_(out2.params['amp'].stderr is None) assert_(out2.params['decay'].stderr is None) assert_(out2.params['shift'].stderr is None) assert_(out2.params['omega'].stderr is None) assert_(out2.params['amp'].correl is None) assert_(out2.params['decay'].correl is None) assert_(out2.params['shift'].correl is None) assert_(out2.params['omega'].correl is None) assert not out2.errorbars def test_scalar_minimize_reduce_fcn(): # test that the reduce_fcn option for scalar_minimize # gives different and improved results with outliers np.random.seed(2) x = np.linspace(0, 10, 101) yo = 1.0 + 2.0*np.sin(4*x) * np.exp(-x / 5) y = yo + np.random.normal(size=len(yo), scale=0.250) outliers = np.random.randint(int(len(x)/3.0), len(x), int(len(x)/12)) y[outliers] += 5*np.random.random(len(outliers)) # define objective function: returns the array to be minimized def objfunc(pars, x, data): decay = pars['decay'] offset = pars['offset'] omega = pars['omega'] amp = pars['amp'] model = offset + amp * np.sin(x*omega) * np.exp(-x/decay) return model - data # create a set of Parameters params = Parameters() params.add('offset', 2.0) params.add('omega', 3.3) params.add('amp', 2.5) params.add('decay', 1.0) method = 'L-BFGS-B' out1 = minimize(objfunc, params, args=(x, y), method=method) out2 = minimize(objfunc, params, args=(x, y), method=method, reduce_fcn='neglogcauchy') assert_allclose(out1.params['omega'].value, 4.0, rtol=0.01) assert_allclose(out1.params['decay'].value, 7.6, rtol=0.01) assert_allclose(out2.params['omega'].value, 4.0, rtol=0.01) assert_allclose(out2.params['decay'].value, 5.8, rtol=0.01) def test_multidimensional_fit_GH205(): # test that you don't need to flatten the output from the objective # function. Tests regression for GH205. pos = np.linspace(0, 99, 100) xv, yv = np.meshgrid(pos, pos) f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1) + np.cos(yv * lambda2)) data = f(xv, yv, 0.3, 3) assert_(data.ndim, 2) def fcn2min(params, xv, yv, data): """model decaying sine wave, subtract data""" model = f(xv, yv, params['lambda1'], params['lambda2']) return model - data # create a set of Parameters params = Parameters() params.add('lambda1', value=0.4) params.add('lambda2', value=3.2) mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data)) mini.minimize() def test_ufloat(): """Test of ufloat from uncertainties.""" x = ufloat(1, 0.1) assert_allclose(x.nominal_value, 1.0, rtol=1.e-7) assert_allclose(x.std_dev, 0.1, rtol=1.e-7) y = x*x assert_allclose(y.nominal_value, 1.0, rtol=1.e-7) assert_allclose(y.std_dev, 0.2, rtol=1.e-7) y = x - x assert_allclose(y.nominal_value, 0.0, rtol=1.e-7) assert_allclose(y.std_dev, 0.0, rtol=1.e-7) class CommonMinimizerTest(unittest.TestCase): def setUp(self): """ test scale minimizers except newton-cg (needs jacobian) and anneal (doesn't work out of the box). """ p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) self.p_true = p_true n = 2500 xmin = 0. xmax = 250.0 noise = np.random.normal(scale=0.7215, size=n) self.x = np.linspace(xmin, xmax, n) self.data = self.residual(p_true, self.x) + noise fit_params = Parameters() fit_params.add('amp', value=11.0, min=5, max=20) fit_params.add('period', value=5., min=1., max=7) fit_params.add('shift', value=.10, min=0.0, max=0.2) fit_params.add('decay', value=6.e-3, min=0, max=0.1) self.fit_params = fit_params self.mini = Minimizer(self.residual, fit_params, [self.x, self.data]) def residual(self, pars, x, data=None): amp = pars['amp'] per = pars['period'] shift = pars['shift'] decay = pars['decay'] if abs(shift) > pi/2: shift = shift - np.sign(shift) * pi model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay) if data is None: return model return model - data def test_diffev_bounds_check(self): # You need finite (min, max) for each parameter if you're using # differential_evolution. self.fit_params['decay'].min = -np.inf self.fit_params['decay'].vary = True self.minimizer = 'differential_evolution' pytest.raises(ValueError, self.scalar_minimizer) # but only if a parameter is not fixed self.fit_params['decay'].vary = False self.mini.scalar_minimize(method='differential_evolution', maxiter=1) def test_scalar_minimizers(self): # test all the scalar minimizers for method in SCALAR_METHODS: if method in ['newton', 'dogleg', 'trust-ncg', 'cg', 'trust-exact', 'trust-krylov', 'trust-constr']: continue self.minimizer = SCALAR_METHODS[method] if method == 'Nelder-Mead': sig = 0.2 else: sig = 0.15 self.scalar_minimizer(sig=sig) def scalar_minimizer(self, sig=0.15): out = self.mini.scalar_minimize(method=self.minimizer) self.residual(out.params, self.x) for para, true_para in zip(out.params.values(), self.p_true.values()): check_wo_stderr(para, true_para.value, sig=sig) def test_nan_policy(self): # check that an error is raised if there are nan in # the data returned by userfcn self.data[0] = np.nan for method in SCALAR_METHODS: if method == 'differential_evolution': pytest.raises(RuntimeError, self.mini.scalar_minimize, SCALAR_METHODS[method]) else: pytest.raises(ValueError, self.mini.scalar_minimize, SCALAR_METHODS[method]) pytest.raises(ValueError, self.mini.minimize) # now check that the fit proceeds if nan_policy is 'omit' self.mini.nan_policy = 'omit' res = self.mini.minimize() assert_equal(res.ndata, np.size(self.data, 0) - 1) for para, true_para in zip(res.params.values(), self.p_true.values()): check_wo_stderr(para, true_para.value, sig=0.15) def test_nan_policy_function(self): a = np.array([0, 1, 2, 3, np.nan]) pytest.raises(ValueError, _nan_policy, a) assert_(np.isnan(_nan_policy(a, nan_policy='propagate')[-1])) assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3]) a[-1] = np.inf pytest.raises(ValueError, _nan_policy, a) assert_(np.isposinf(_nan_policy(a, nan_policy='propagate')[-1])) assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3]) assert_equal(_nan_policy(a, handle_inf=False), a) @dec.slow def test_emcee(self): # test emcee if not HAS_EMCEE: return True np.random.seed(123456) out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10) check_paras(out.params, self.p_true, sig=3) @dec.slow def test_emcee_method_kwarg(self): # test with emcee as method keyword argument if not HAS_EMCEE: return True np.random.seed(123456) out = self.mini.minimize(method='emcee', nwalkers=50, steps=200, burn=50, thin=10) assert out.method == 'emcee' assert out.nfev == 50*200 check_paras(out.params, self.p_true, sig=3) out_unweighted = self.mini.minimize(method='emcee', nwalkers=50, steps=200, burn=50, thin=10, is_weighted=False) assert out_unweighted.method == 'emcee' @dec.slow def test_emcee_multiprocessing(self): # test multiprocessing runs raise pytest.skip("Pytest fails with multiprocessing") pytest.importorskip("dill") if not HAS_EMCEE: return True self.mini.emcee(steps=50, workers=4, nwalkers=20) def test_emcee_bounds_length(self): # the log-probability functions check if the parameters are # inside the bounds. Check that the bounds and parameters # are the right lengths for comparison. This can be done # if nvarys != nparams if not HAS_EMCEE: return True self.mini.params['amp'].vary = False self.mini.params['period'].vary = False self.mini.params['shift'].vary = False self.mini.emcee(steps=10) @dec.slow def test_emcee_partial_bounds(self): # mcmc with partial bounds if not HAS_EMCEE: return True np.random.seed(123456) # test mcmc output vs lm, some parameters not bounded self.fit_params['amp'].max = np.inf # self.fit_params['amp'].min = -np.inf out = self.mini.emcee(nwalkers=100, steps=300, burn=100, thin=10) check_paras(out.params, self.p_true, sig=3) def test_emcee_init_with_chain(self): # can you initialise with a previous chain if not HAS_EMCEE: return True out = self.mini.emcee(nwalkers=100, steps=5) # can initialise with a chain self.mini.emcee(nwalkers=100, steps=1, pos=out.chain) # can initialise with a correct subset of a chain self.mini.emcee(nwalkers=100, steps=1, pos=out.chain[-1, ...]) # but you can't initialise if the shape is wrong. pytest.raises(ValueError, self.mini.emcee, nwalkers=100, steps=1, pos=out.chain[-1, :-1, ...]) def test_emcee_reuse_sampler(self): if not HAS_EMCEE: return True self.mini.emcee(nwalkers=20, steps=25) # if you've run the sampler the Minimizer object should have a _lastpos # attribute assert_(hasattr(self.mini, '_lastpos')) # now try and re-use sampler out2 = self.mini.emcee(steps=10, reuse_sampler=True) assert_(out2.chain.shape == (35, 20, 4)) # you shouldn't be able to reuse the sampler if nvarys has changed. self.mini.params['amp'].vary = False pytest.raises(ValueError, self.mini.emcee, reuse_sampler=True) def test_emcee_lnpost(self): # check ln likelihood is calculated correctly. It should be # -0.5 * chi**2. result = self.mini.minimize() # obtain the numeric values # note - in this example all the parameters are varied fvars = np.array([par.value for par in result.params.values()]) # calculate the cost function with scaled values (parameters all have # lower and upper bounds. scaled_fvars = [] for par, fvar in zip(result.params.values(), fvars): par.value = fvar scaled_fvars.append(par.setup_bounds()) val = self.mini.penalty(np.array(scaled_fvars)) # calculate the log-likelihood value bounds = np.array([(par.min, par.max) for par in result.params.values()]) val2 = self.mini._lnprob(fvars, self.residual, result.params, result.var_names, bounds, userargs=(self.x, self.data)) assert_almost_equal(-0.5 * val, val2) def test_emcee_output(self): # test mcmc output if not HAS_EMCEE: return True try: from pandas import DataFrame except ImportError: return True out = self.mini.emcee(nwalkers=10, steps=20, burn=5, thin=2) assert_(isinstance(out, MinimizerResult)) assert_(isinstance(out.flatchain, DataFrame)) # check that we can access the chains via parameter name # print( out.flatchain['amp'].shape[0], 200) assert_(out.flatchain['amp'].shape[0] == 70) assert out.errorbars assert_(np.isfinite(out.params['amp'].correl['period'])) # the lnprob array should be the same as the chain size assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob)) # test chain output shapes print(out.lnprob.shape, out.chain.shape, out.flatchain.shape) assert_(out.lnprob.shape == (7, 10)) assert_(out.chain.shape == (7, 10, 4)) assert_(out.flatchain.shape == (70, 4)) @dec.slow def test_emcee_float(self): # test that it works if the residuals returns a float, not a vector if not HAS_EMCEE: return True def resid(pars, x, data=None): return -0.5 * np.sum(self.residual(pars, x, data=data)**2) # just return chi2 def resid2(pars, x, data=None): return np.sum(self.residual(pars, x, data=data)**2) self.mini.userfcn = resid np.random.seed(123456) out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10) check_paras(out.params, self.p_true, sig=3) self.mini.userfcn = resid2 np.random.seed(123456) out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10, float_behavior='chi2') check_paras(out.params, self.p_true, sig=3) @dec.slow def test_emcee_seed(self): # test emcee seeding can reproduce a sampling run if not HAS_EMCEE: return True out = self.mini.emcee(params=self.fit_params, nwalkers=100, steps=1, seed=1) out2 = self.mini.emcee(params=self.fit_params, nwalkers=100, steps=1, seed=1) assert_almost_equal(out.chain, out2.chain) def test_emcee_ntemps(self): # check for DeprecationWarning when using ntemps > 1 if not HAS_EMCEE: return True with pytest.raises(DeprecationWarning): _ = self.mini.emcee(params=self.fit_params, ntemps=5) def residual_for_multiprocessing(pars, x, data=None): # a residual function defined in the top level is needed for # multiprocessing. bound methods don't work. amp = pars['amp'] per = pars['period'] shift = pars['shift'] decay = pars['decay'] if abs(shift) > pi/2: shift = shift - np.sign(shift) * pi model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay) if data is None: return model return (model - data) lmfit-py-1.0.0/tests/test_parameter.py000066400000000000000000000402431357751001700200150ustar00rootroot00000000000000"""Tests for the Parameter class.""" from math import trunc import numpy as np from numpy.testing import assert_allclose import pytest import uncertainties as un import lmfit @pytest.fixture def parameter(): """Initialize parameter for tests.""" param = lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0, max=100.0, expr=None, brute_step=5.0, user_data=1) expected_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, 5.0, 1) assert_parameter_attributes(param, expected_attribute_values) return param, expected_attribute_values def assert_parameter_attributes(par, expected): """Assert that parameter attributes have the expected values.""" par_attr_values = (par.name, par._val, par.vary, par.min, par.max, par._expr, par.brute_step, par.user_data) assert par_attr_values == expected in_out = [(lmfit.Parameter(name='a'), # set name ('a', -np.inf, True, -np.inf, np.inf, None, None, None)), (lmfit.Parameter(name='a', value=10.0), # set value ('a', 10.0, True, -np.inf, np.inf, None, None, None)), (lmfit.Parameter(name='a', vary=False), # fix parameter, set vary to False ('a', -np.inf, False, -np.inf, np.inf, None, None, None)), (lmfit.Parameter(name='a', min=-10.0), # set lower bound, value reset to min ('a', -10.0, True, -10.0, np.inf, None, None, None)), (lmfit.Parameter(name='a', value=-5.0, min=-10.0), # set lower bound ('a', -5.0, True, -10.0, np.inf, None, None, None)), (lmfit.Parameter(name='a', max=10.0), # set upper bound ('a', -np.inf, True, -np.inf, 10.0, None, None, None)), (lmfit.Parameter(name='a', value=25.0, max=10.0), # set upper bound, value reset ('a', 10.0, True, -np.inf, 10.0, None, None, None)), (lmfit.Parameter(name='a', expr="2.0*10.0"), # set expression, vary becomes False ('a', -np.inf, True, -np.inf, np.inf, '2.0*10.0', None, None)), (lmfit.Parameter(name='a', brute_step=0.1), # set brute_step ('a', -np.inf, True, -np.inf, np.inf, None, 0.1, None)), (lmfit.Parameter(name='a', user_data={'b': {}}), # set user_data ('a', -np.inf, True, -np.inf, np.inf, None, None, {'b': {}}))] @pytest.mark.parametrize('par, attr_values', in_out) def test_initialize_Parameter(par, attr_values): """Test the initialization of the Parameter class.""" assert_parameter_attributes(par, attr_values) # check for other default attributes for attribute in ['_expr', '_expr_ast', '_expr_eval', '_expr_deps', '_delay_asteval', 'stderr', 'correl', 'from_internal', '_val']: assert hasattr(par, attribute) def test_Parameter_no_name(): """Test for Parameter name, now required positional argument.""" msg = r"missing 1 required positional argument: 'name'" with pytest.raises(TypeError, match=msg): lmfit.Parameter() def test_init_bounds(): """Tests to make sure that initial bounds are consistent. Only for specific cases not tested above with the initializations of the Parameter class. """ # test 1: min > max; should swap min and max par = lmfit.Parameter(name='a', value=0.0, min=10.0, max=-10.0) assert par.min == -10.0 assert par.max == 10.0 # test 2: min == max; should raise a ValueError msg = r"Parameter 'a' has min == max" with pytest.raises(ValueError, match=msg): par = lmfit.Parameter(name='a', value=0.0, min=10.0, max=10.0) # FIXME: ideally this should be impossible to happen ever.... # perhaps we should add a setter method for MIN and MAX as well? # test 3: max or min is equal to None par.min = None par._init_bounds() assert par.min == -np.inf par.max = None par._init_bounds() assert par.max == np.inf def test_parameter_set_value(parameter): """Test the Parameter.set() function with value.""" par, initial_attribute_values = parameter par.set(value=None) # nothing should change assert_parameter_attributes(par, initial_attribute_values) par.set(value=5.0) changed_attribute_values = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1) assert_parameter_attributes(par, changed_attribute_values) def test_parameter_set_vary(parameter): """Test the Parameter.set() function with vary.""" par, initial_attribute_values = parameter par.set(vary=None) # nothing should change assert_parameter_attributes(par, initial_attribute_values) par.set(vary=False) changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, None, 5.0, 1) assert_parameter_attributes(par, changed_attribute_values) def test_parameter_set_min(parameter): """Test the Parameter.set() function with min.""" par, initial_attribute_values = parameter par.set(min=None) # nothing should change assert_parameter_attributes(par, initial_attribute_values) par.set(min=-50.0) changed_attribute_values = ('a', 10.0, True, -50.0, 100.0, None, 5.0, 1) assert_parameter_attributes(par, changed_attribute_values) def test_parameter_set_max(parameter): """Test the Parameter.set() function with max.""" par, initial_attribute_values = parameter par.set(max=None) # nothing should change assert_parameter_attributes(par, initial_attribute_values) par.set(max=50.0) changed_attribute_values = ('a', 10.0, True, -100.0, 50.0, None, 5.0, 1) assert_parameter_attributes(par, changed_attribute_values) def test_parameter_set_expr(parameter): """Test the Parameter.set() function with expr. Of note, this only tests for setting/removal of the expression; nothing else gets evaluated here.... More specific tests will be present in the Parameters class. """ par, _ = parameter par.set(expr='2.0*50.0') # setting an expression, vary --> False changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, '2.0*50.0', 5.0, 1) assert_parameter_attributes(par, changed_attribute_values) par.set(expr=None) # nothing should change assert_parameter_attributes(par, changed_attribute_values) par.set(expr='') # should remove the expression changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, None, 5.0, 1) assert_parameter_attributes(par, changed_attribute_values) def test_parameter_set_brute_step(parameter): """Test the Parameter.set() function with brute_step.""" par, initial_attribute_values = parameter par.set(brute_step=None) # nothing should change assert_parameter_attributes(par, initial_attribute_values) par.set(brute_step=0.0) # brute_step set to None changed_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, None, 1) assert_parameter_attributes(par, changed_attribute_values) par.set(brute_step=1.0) changed_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, 1.0, 1) assert_parameter_attributes(par, changed_attribute_values) def test_getstate(parameter): """Test for the __getstate__ method.""" par, _ = parameter assert par.__getstate__() == ('a', 10.0, True, None, -100.0, 100.0, 5.0, None, None, 10, 1) def test_setstate(parameter): """Test for the __setstate__ method.""" par, initial_attribute_values = parameter state = par.__getstate__() par_new = lmfit.Parameter('new') attributes_new = ('new', -np.inf, True, -np.inf, np.inf, None, None, None) assert_parameter_attributes(par_new, attributes_new) par_new.__setstate__(state) assert_parameter_attributes(par_new, initial_attribute_values) def test_repr(): """Tests for the __repr__ method.""" par = lmfit.Parameter(name='test', value=10.0, min=0.0, max=20.0) assert par.__repr__() == "" par = lmfit.Parameter(name='test', value=10.0, vary=False) assert par.__repr__() == "" par.set(vary=True) par.stderr = 0.1 assert par.__repr__() == "" par = lmfit.Parameter(name='test', expr='10.0*2.5') assert par.__repr__() == "" par = lmfit.Parameter(name='test', brute_step=0.1) assert par.__repr__() == "" def test_setup_bounds_and_scale_gradient_methods(): """Tests for the setup_bounds and scale_gradient methods. Make use of the MINUIT-style transformation to obtain the the Parameter values and scaling factor for the gradient. See: https://lmfit.github.io/lmfit-py/bounds.html """ # situation 1: no bounds par_no_bounds = lmfit.Parameter('no_bounds', value=10.0) assert_allclose(par_no_bounds.setup_bounds(), 10.0) assert_allclose(par_no_bounds.scale_gradient(par_no_bounds.value), 1.0) # situation 2: no bounds, min/max set to None after creating the parameter # TODO: ideally this should never happen; perhaps use a setter here par_no_bounds = lmfit.Parameter('no_bounds', value=10.0) par_no_bounds.min = None par_no_bounds.max = None assert_allclose(par_no_bounds.setup_bounds(), 10.0) assert_allclose(par_no_bounds.scale_gradient(par_no_bounds.value), 1.0) # situation 3: upper bound par_upper_bound = lmfit.Parameter('upper_bound', value=10.0, max=25.0) assert_allclose(par_upper_bound.setup_bounds(), 15.968719422671311) assert_allclose(par_upper_bound.scale_gradient(par_upper_bound.value), -0.99503719, rtol=1.e-6) # situation 4: lower bound par_lower_bound = lmfit.Parameter('upper_bound', value=10.0, min=-25.0) assert_allclose(par_lower_bound.setup_bounds(), 35.98610843) assert_allclose(par_lower_bound.scale_gradient(par_lower_bound.value), 0.995037, rtol=1.e-6) # situation 5: both lower and upper bounds par_both_bounds = lmfit.Parameter('both_bounds', value=10.0, min=-25.0, max=25.0) assert_allclose(par_both_bounds.setup_bounds(), 0.4115168460674879) assert_allclose(par_both_bounds.scale_gradient(par_both_bounds.value), -20.976788, rtol=1.e-6) def test__getval(parameter): """Test _getval function.""" par, _ = parameter # test uncertainties.core.Variable in _getval [deprecated] par.set(value=un.ufloat(5.0, 0.2)) with pytest.warns(FutureWarning, match='removed in the next release'): val = par.value assert_allclose(val, 5.0) def test_value_setter(parameter): """Tests for the value setter.""" par, initial_attribute_values = parameter assert_parameter_attributes(par, initial_attribute_values) par.set(value=200.0) # above maximum assert_allclose(par.value, 100.0) par.set(value=-200.0) # below minimum assert_allclose(par.value, -100.0) # TODO: add tests for setter/getter methods for VALUE, EXPR # Tests for magic methods of the Parameter class def test__array__(parameter): """Test the __array__ magic method.""" par, _ = parameter assert np.array(par) == np.array(10.0) def test__str__(parameter): """Test the __str__ magic method.""" par, _ = parameter assert str(par) == "" def test__abs__(parameter): """Test the __abs__ magic method.""" par, _ = parameter assert_allclose(abs(par), 10.0) par.set(value=-10.0) assert_allclose(abs(par), 10.0) def test__neg__(parameter): """Test the __neg__ magic method.""" par, _ = parameter assert_allclose(-par, -10.0) par.set(value=-10.0) assert_allclose(-par, 10.0) def test__pos__(parameter): """Test the __pos__ magic method.""" par, _ = parameter assert_allclose(+par, 10.0) par.set(value=-10.0) assert_allclose(+par, -10.0) def test__bool__(parameter): """Test the __bool__ magic method.""" par, _ = parameter assert bool(par) def test__int__(parameter): """Test the __int__ magic method.""" par, _ = parameter assert isinstance(int(par), int) assert_allclose(int(par), 10) def test__float__(parameter): """Test the __float__ magic method.""" par, _ = parameter par.set(value=5) assert isinstance(float(par), float) assert_allclose(float(par), 5.0) def test__trunc__(parameter): """Test the __trunc__ magic method.""" par, _ = parameter par.set(value=10.5) assert isinstance(trunc(par), int) assert_allclose(trunc(par), 10) def test__add__(parameter): """Test the __add__ magic method.""" par, _ = parameter assert_allclose(par + 5.25, 15.25) def test__sub__(parameter): """Test the __sub__ magic method.""" par, _ = parameter assert_allclose(par - 5.25, 4.75) def test__truediv__(parameter): """Test the __truediv__ magic method.""" par, _ = parameter assert_allclose(par / 1.25, 8.0) def test__floordiv__(parameter): """Test the __floordiv__ magic method.""" par, _ = parameter par.set(value=5) assert_allclose(par // 2, 2) def test__divmod__(parameter): """Test the __divmod__ magic method.""" par, _ = parameter assert_allclose(divmod(par, 3), (3, 1)) def test__mod__(parameter): """Test the __mod__ magic method.""" par, _ = parameter assert_allclose(par % 2, 0) assert_allclose(par % 3, 1) def test__mul__(parameter): """Test the __mul__ magic method.""" par, _ = parameter assert_allclose(par * 2.5, 25.0) assert_allclose(par * -0.1, -1.0) def test__pow__(parameter): """Test the __pow__ magic method.""" par, _ = parameter assert_allclose(par ** 0.5, 3.16227766) assert_allclose(par ** 4, 1e4) def test__gt__(parameter): """Test the __gt__ magic method.""" par, _ = parameter assert 11 > par assert not 10 > par def test__ge__(parameter): """Test the __ge__ magic method.""" par, _ = parameter assert 11 >= par assert 10 >= par assert not 9 >= par def test__le__(parameter): """Test the __le__ magic method.""" par, _ = parameter assert 9 <= par assert 10 <= par assert not 11 <= par def test__lt__(parameter): """Test the __lt__ magic method.""" par, _ = parameter assert 9 < par assert not 10 < par def test__eq__(parameter): """Test the __eq__ magic method.""" par, _ = parameter assert 10 == par assert not 9 == par def test__ne__(parameter): """Test the __ne__ magic method.""" par, _ = parameter assert 9 != par assert not 10 != par def test__radd__(parameter): """Test the __radd__ magic method.""" par, _ = parameter assert_allclose(5.25 + par, 15.25) def test__rtruediv__(parameter): """Test the __rtruediv__ magic method.""" par, _ = parameter assert_allclose(1.25 / par, 0.125) def test__rdivmod__(parameter): """Test the __rdivmod__ magic method.""" par, _ = parameter assert_allclose(divmod(3, par), (0, 3)) def test__rfloordiv__(parameter): """Test the __rfloordiv__ magic method.""" par, _ = parameter assert_allclose(2 // par, 0) assert_allclose(20 // par, 2) def test__rmod__(parameter): """Test the __rmod__ magic method.""" par, _ = parameter assert_allclose(2 % par, 2) assert_allclose(25 % par, 5) def test__rmul__(parameter): """Test the __rmul__ magic method.""" par, _ = parameter assert_allclose(2.5 * par, 25.0) assert_allclose(-0.1 * par, -1.0) def test__rpow__(parameter): """Test the __rpow__ magic method.""" par, _ = parameter assert_allclose(0.5 ** par, 0.0009765625) assert_allclose(4 ** par, 1048576) def test__rsub__(parameter): """Test the __rsub__ magic method.""" par, _ = parameter assert_allclose(5.25 - par, -4.75) def test_isParameter(parameter): """Test function to check whether something is a Paramter [deprecated].""" # TODO: this function isn't used anywhere in the codebase; useful at all? par, _ = parameter assert lmfit.parameter.isParameter(par) assert not lmfit.parameter.isParameter('test') with pytest.warns(FutureWarning, match='removed in the next release'): lmfit.parameter.isParameter(par) lmfit-py-1.0.0/tests/test_parameters.py000066400000000000000000000244471357751001700202100ustar00rootroot00000000000000from copy import copy, deepcopy import pickle import unittest import numpy as np from numpy.testing import assert_, assert_almost_equal, assert_equal from lmfit import Model, Parameter, Parameters from lmfit.printfuncs import params_html_table class TestParameters(unittest.TestCase): def setUp(self): self.params = Parameters() self.params.add_many(('a', 1., True, None, None, None), ('b', 2., True, None, None, None), ('c', 3., True, None, None, '2. * a')) def test_expr_was_evaluated(self): self.params.update_constraints() assert_almost_equal(self.params['c'].value, 2 * self.params['a'].value) def test_copy(self): # check simple Parameters.copy() does not fail # on non-trivial Parameters p1 = Parameters() p1.add('t', 2.0, min=0.0, max=5.0) p1.add('x', 10.0) p1.add('y', expr='x*t + sqrt(t)/3.0') p2 = p1.copy() assert isinstance(p2, Parameters) assert 't' in p2 assert 'y' in p2 assert p2['t'].max < 6.0 assert np.isinf(p2['x'].max) and p2['x'].max > 0 assert np.isinf(p2['x'].min) and p2['x'].min < 0 assert 'sqrt(t)' in p2['y'].expr assert p2._asteval is not None assert p2._asteval.symtable is not None assert (p2['y'].value > 20) and (p2['y'].value < 21) def test_copy_function(self): # check copy(Parameters) does not fail p1 = Parameters() p1.add('t', 2.0, min=0.0, max=5.0) p1.add('x', 10.0) p1.add('y', expr='x*t + sqrt(t)/3.0') p2 = copy(p1) assert isinstance(p2, Parameters) # change the 'x' value in the original p1['x'].value = 4.0 assert p2['x'].value > 9.8 assert p2['x'].value < 10.2 assert np.isinf(p2['x'].max) and p2['x'].max > 0 assert 't' in p2 assert 'y' in p2 assert p2['t'].max < 6.0 assert np.isinf(p2['x'].min) and p2['x'].min < 0 assert 'sqrt(t)' in p2['y'].expr assert p2._asteval is not None assert p2._asteval.symtable is not None assert (p2['y'].value > 20) and (p2['y'].value < 21) assert p1['y'].value < 10 def test_deepcopy(self): # check that a simple copy works b = deepcopy(self.params) assert_(self.params == b) # check that we can add a symbol to the interpreter self.params['b'].expr = 'sin(1)' self.params['b'].value = 10 assert_almost_equal(self.params['b'].value, np.sin(1)) assert_almost_equal(self.params._asteval.symtable['b'], np.sin(1)) # check that the symbols in the interpreter are still the same after # deepcopying b = deepcopy(self.params) unique_symbols_params = self.params._asteval.user_defined_symbols() unique_symbols_b = self.params._asteval.user_defined_symbols() assert_(unique_symbols_b == unique_symbols_params) for unique_symbol in unique_symbols_b: if self.params._asteval.symtable[unique_symbol] is np.nan: continue assert_(self.params._asteval.symtable[unique_symbol] == b._asteval.symtable[unique_symbol]) def test_add_many_params(self): # test that we can add many parameters, but only parameters are added. a = Parameter('a', 1) b = Parameter('b', 2) p = Parameters() p.add_many(a, b) assert_(list(p.keys()) == ['a', 'b']) def test_expr_and_constraints_GH265(self): # test that parameters are reevaluated if they have bounds and expr # see GH265 p = Parameters() p['a'] = Parameter('a', 10, True) p['b'] = Parameter('b', 10, True, 0, 20) assert_equal(p['b'].min, 0) assert_equal(p['b'].max, 20) p['a'].expr = '2 * b' assert_almost_equal(p['a'].value, 20) p['b'].value = 15 assert_almost_equal(p['b'].value, 15) assert_almost_equal(p['a'].value, 30) p['b'].value = 30 assert_almost_equal(p['b'].value, 20) assert_almost_equal(p['a'].value, 40) def test_pickle_parameter(self): # test that we can pickle a Parameter p = Parameter('a', 10, True, 0, 1) pkl = pickle.dumps(p) q = pickle.loads(pkl) assert_(p == q) def test_pickle_parameters(self): # test that we can pickle a Parameters object p = Parameters() p.add('a', 10, True, 0, 100) p.add('b', 10, True, 0, 100, 'a * sin(1)') p.update_constraints() p._asteval.symtable['abc'] = '2 * 3.142' pkl = pickle.dumps(p, -1) q = pickle.loads(pkl) q.update_constraints() assert_(p == q) assert_(p is not q) # now test if the asteval machinery survived assert_(q._asteval.symtable['abc'] == '2 * 3.142') # check that unpickling of Parameters is not affected by expr that # refer to Parameter that are added later on. In the following # example var_0.expr refers to var_1, which is a Parameter later # on in the Parameters OrderedDict. p = Parameters() p.add('var_0', value=1) p.add('var_1', value=2) p['var_0'].expr = 'var_1' pkl = pickle.dumps(p) q = pickle.loads(pkl) def test_params_usersyms(self): # test passing usersymes to Parameters() def myfun(x): return x**3 params = Parameters(usersyms={"myfun": myfun}) params.add("a", value=2.3) params.add("b", expr="myfun(a)") xx = np.linspace(0, 1, 10) yy = 3 * xx + np.random.normal(scale=0.002, size=len(xx)) model = Model(lambda x, a: a * x) result = model.fit(yy, params=params, x=xx) assert_(np.isclose(result.params['a'].value, 3.0, rtol=0.025)) assert_(result.nfev > 3) assert_(result.nfev < 300) def test_set_symtable(self): # test that we use Parameter.set(value=XXX) and have # that new value be used in constraint expressions pars = Parameters() pars.add('x', value=1.0) pars.add('y', expr='x + 1') assert_(np.isclose(pars['y'].value, 2.0)) pars['x'].set(value=3.0) assert_(np.isclose(pars['y'].value, 4.0)) def test_dumps_loads_parameters(self): # test that we can dumps() and then loads() a Parameters pars = Parameters() pars.add('x', value=1.0) pars.add('y', value=2.0) pars['x'].expr = 'y / 2.0' dumps = pars.dumps() newpars = Parameters().loads(dumps) newpars['y'].value = 100.0 assert_(np.isclose(newpars['x'].value, 50.0)) def test_isclose(self): assert_(np.isclose(1., 1+1e-5, atol=1e-4, rtol=0)) assert_(not np.isclose(1., 1+1e-5, atol=1e-6, rtol=0)) assert_(np.isclose(1e10, 1.00001e10, rtol=1e-5, atol=1e-8)) assert_(not np.isclose(0, np.inf)) assert_(not np.isclose(-np.inf, np.inf)) assert_(np.isclose(np.inf, np.inf)) assert_(not np.isclose(np.nan, np.nan)) def test_expr_with_bounds(self): "test an expression with bounds, without value" pars = Parameters() pars.add('c1', value=0.2) pars.add('c2', value=0.2) pars.add('c3', value=0.2) pars.add('csum', value=0.8) # this should not raise TypeError: pars.add('c4', expr='csum-c1-c2-c3', min=0, max=1) assert_(np.isclose(pars['c4'].value, 0.2)) def test_invalid_expr_exceptions(self): "test if an exception is raised for invalid expressions (GH486)""" p1 = Parameters() p1.add('t', 2.0, min=0.0, max=5.0) p1.add('x', 10.0) with self.assertRaises(SyntaxError): p1.add('y', expr='x*t + sqrt(t)/') assert len(p1['y']._expr_eval.error) > 0 p1.add('y', expr='x*t + sqrt(t)/3.0') p1['y'].set(expr='x*3.0 + t**2') assert 'x*3' in p1['y'].expr assert len(p1['y']._expr_eval.error) == 0 with self.assertRaises(SyntaxError): p1['y'].set(expr='t+') assert len(p1['y']._expr_eval.error) > 0 assert_almost_equal(p1['y'].value, 34.0) def test_eval(self): # check that eval() works with usersyms and parameter values def myfun(x): return 2.0 * x p = Parameters(usersyms={"myfun": myfun}) p.add("a", value=4.0) p.add("b", value=3.0) assert_almost_equal(p.eval("myfun(2.0) * a"), 16) assert_almost_equal(p.eval("b / myfun(3.0)"), 0.5) def test_params_html_table(self): p1 = Parameters() p1.add('t', 2.0, min=0.0, max=5.0) p1.add('x', 0.0, ) html = params_html_table(p1) self.assertIsInstance(html, str) def test_add_params_expr_outoforder(self): params1 = Parameters() params1.add("a", value=1.0) params2 = Parameters() params2.add("b", value=1.0) params2.add("c", value=2.0) params2['b'].expr = 'c/2' params = params1 + params2 assert 'b' in params assert_almost_equal(params['b'].value, 1.0) def test_params_prints(self): params = Parameters() params.add("a", value=1.0, vary=True) params.add("b", value=8.5, min=0, vary=True) params.add("c", expr='a + sqrt(b)') repr_full = params.pretty_repr() repr_one = params.pretty_repr(oneline=True) out = [] for key, val in params.items(): out.append("%s: %s" % (key, repr(val))) out = '\n'.join(out) assert repr_full.count('\n') > 4 assert repr_one.count('\n') < 2 assert len(repr_full) > 150 assert len(repr_one) > 150 assert len(out) > 150 def test_add_with_symtable(self): pars1 = Parameters() pars1.add("a", value=1.0, vary=True) def half(x): return 0.5*x pars2 = Parameters(usersyms={"half": half}) pars2.add("b", value=3.0) pars2.add("c", expr="half(b)") params = pars1 + pars2 assert_almost_equal(params['c'].value, 1.5) params = pars2 + pars1 assert_almost_equal(params['c'].value, 1.5) params = deepcopy(pars1) params.update(pars2) assert_almost_equal(params['c'].value, 1.5) lmfit-py-1.0.0/tests/test_params_set.py000066400000000000000000000207601357751001700201750ustar00rootroot00000000000000import numpy as np from numpy.testing import assert_allclose from lmfit.lineshapes import gaussian from lmfit.models import VoigtModel def test_param_set(): np.random.seed(2015) x = np.arange(0, 20, 0.05) y = gaussian(x, amplitude=15.43, center=4.5, sigma=2.13) y = y + 0.05 - 0.01*x + np.random.normal(scale=0.03, size=len(x)) model = VoigtModel() params = model.guess(y, x=x) # test #1: gamma is constrained to equal sigma assert(params['gamma'].expr == 'sigma') params.update_constraints() sigval = params['sigma'].value assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True) # test #2: explicitly setting a param value should work, even when # it had been an expression. The value will be left as fixed gamval = 0.87543 params['gamma'].set(value=gamval) assert(params['gamma'].expr is None) assert(not params['gamma'].vary) assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True) # test #3: explicitly setting an expression should work # Note, the only way to ensure that **ALL** constraints are up to date # is to call params.update_constraints(). This is because the constraint # may have multiple dependencies. params['gamma'].set(expr='sigma/2.0') assert(params['gamma'].expr is not None) assert(not params['gamma'].vary) params.update_constraints() assert_allclose(params['gamma'].value, sigval/2.0, 1e-4, 1e-4, '', True) # test #4: explicitly setting a param value WITH vary=True # will set it to be variable gamval = 0.7777 params['gamma'].set(value=gamval, vary=True) assert(params['gamma'].expr is None) assert(params['gamma'].vary) assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True) # test 5: make sure issue #389 is fixed: set boundaries and make sure # they are kept when changing the value amplitude_vary = params['amplitude'].vary amplitude_expr = params['amplitude'].expr params['amplitude'].set(min=0.0, max=100.0) params.update_constraints() assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True) params['amplitude'].set(value=40.0) params.update_constraints() assert_allclose(params['amplitude'].value, 40.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].expr == amplitude_expr) assert(params['amplitude'].vary == amplitude_vary) assert(not params['amplitude'].brute_step) # test for possible regressions of this fix (without 'expr'): # the set function should only change the requested attribute(s) params['amplitude'].set(value=35.0) params.update_constraints() assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary == amplitude_vary) assert(params['amplitude'].expr == amplitude_expr) assert(not params['amplitude'].brute_step) # set minimum params['amplitude'].set(min=10.0) params.update_constraints() assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary == amplitude_vary) assert(params['amplitude'].expr == amplitude_expr) assert(not params['amplitude'].brute_step) # set maximum params['amplitude'].set(max=110.0) params.update_constraints() assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary == amplitude_vary) assert(params['amplitude'].expr == amplitude_expr) assert(not params['amplitude'].brute_step) # set vary params['amplitude'].set(vary=False) params.update_constraints() assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary is False) assert(params['amplitude'].expr == amplitude_expr) assert(not params['amplitude'].brute_step) # set brute_step params['amplitude'].set(brute_step=0.1) params.update_constraints() assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True) assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary is False) assert(params['amplitude'].expr == amplitude_expr) assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True) # test for possible regressions of this fix for variables WITH 'expr': height_value = params['height'].value height_min = params['height'].min height_max = params['height'].max height_vary = params['height'].vary height_expr = params['height'].expr height_brute_step = params['height'].brute_step # set vary=True should remove expression params['height'].set(vary=True) params.update_constraints() assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True) assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True) assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary is True) assert(params['height'].expr is None) assert(params['height'].brute_step == height_brute_step) # setting an expression should set vary=False params['height'].set(expr=height_expr) params.update_constraints() assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True) assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True) assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary is False) assert(params['height'].expr == height_expr) assert(params['height'].brute_step == height_brute_step) # changing min/max should not remove expression params['height'].set(min=0) params.update_constraints() assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True) assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True) assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary == height_vary) assert(params['height'].expr == height_expr) assert(params['height'].brute_step == height_brute_step) # changing brute_step should not remove expression params['height'].set(brute_step=0.1) params.update_constraints() assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True) assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True) assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary == height_vary) assert(params['height'].expr == height_expr) assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True) # changing the value should remove expression and keep vary=False params['height'].set(brute_step=0) params['height'].set(value=10.0) params.update_constraints() assert_allclose(params['height'].value, 10.0, 1e-4, 1e-4, '', True) assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True) assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary is False) assert(params['height'].expr is None) assert(params['height'].brute_step == height_brute_step) # passing expr='' should only remove the expression params['height'].set(expr=height_expr) # first restore the original expr params.update_constraints() params['height'].set(expr='') params.update_constraints() assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True) assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True) assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary is False) assert(params['height'].expr is None) assert(params['height'].brute_step == height_brute_step) lmfit-py-1.0.0/tests/test_printfuncs.py000066400000000000000000000334631357751001700202360ustar00rootroot00000000000000"""Tests for the print/report functions.""" import numpy as np import pytest import lmfit from lmfit import (Minimizer, Parameters, ci_report, conf_interval, fit_report, report_ci, report_errors, report_fit) from lmfit.lineshapes import gaussian from lmfit.models import GaussianModel from lmfit.printfuncs import alphanumeric_sort, getfloat_attr, gformat np.random.seed(0) @pytest.fixture def params(): """Return a lmfit.Parameters class with initial values.""" pars = Parameters() pars.add_many(('a1', 4), ('b', -20.0), ('c1', 3), ('a', 10.0), ('a2', 5), ('b10', 6), ('d', None), ('b01', 8), ('e', 9), ('aa1', 10)) return pars @pytest.fixture def fitresult(): """Return a ModelResult after fitting a randomized Gaussian data set.""" x = np.linspace(0, 12, 601) data = gaussian(x, amplitude=36.4, center=6.70, sigma=0.88) data = data + np.random.normal(x.size, scale=3.2) model = GaussianModel() params = model.make_params(amplitude=50, center=5, sigma=2) params['amplitude'].min = 1 params['amplitude'].max = 100.0 params['sigma'].min = 0 params['sigma'].brute_step = 0.001 result = model.fit(data, params, x=x) return result @pytest.fixture def confidence_interval(): """Return the result of the confidence interval (ci) calculation.""" def residual(pars, x, data=None): argu = (x*pars['decay'])**2 shift = pars['shift'] if abs(shift) > np.pi/2: shift = shift - np.sign(shift)*np.pi model = pars['amp']*np.sin(shift + x/pars['period']) * np.exp(-argu) if data is None: return model return model - data p_true = Parameters() p_true.add_many(('amp', 14.0), ('period', 5.33), ('shift', 0.123), ('decay', 0.010)) x = np.linspace(0.0, 250.0, 2500) data = residual(p_true, x) + np.random.normal(scale=0.7215, size=x.size) fit_params = Parameters() fit_params.add_many(('amp', 13.0), ('period', 2), ('shift', 0.0), ('decay', 0.02)) mini = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data}) out = mini.leastsq() ci = conf_interval(mini, out) return ci def test_alphanumeric_sort(params): """Test alphanumeric sort of the parameters.""" sorted_params = sorted(params, key=alphanumeric_sort) expected = ['a', 'a1', 'a2', 'aa1', 'b', 'b01', 'b10', 'c1', 'd', 'e'] assert sorted_params == expected test_data_getfloat_attr = [('a', 'value', '10.0000000'), ('b', 'value', '-20.0000000'), ('c1', 'value', '3'), ('d', 'value', '-inf'), ('e', 'non_existent_attr', 'unknown'), ('aa1', 'test', '(20+5j)')] @pytest.mark.parametrize("par, attr, expected", test_data_getfloat_attr) def test_getfloat_attr(params, par, attr, expected): """Test getfloat_attr function.""" if par == 'aa1': # add an attribute that is not None, float, or int # This will never occur for Parameter values, but this function is # also used on the MinimizerResult/ModelResult where it could happen. params['aa1'].test = 20+5j output = getfloat_attr(params[par], attr) assert output == expected if par == 'a': assert len(output) == 10 # leading blank for pos values is stripped elif par == 'b': assert len(output) == 11 elif par == 'c1': assert len(output) == 1 test_data_gformat = [(-1.25, '-1.25000000'), (1.25, ' 1.25000000'), (-1234567890.1234567890, '-1.2346e+09'), (1234567890.1234567890, ' 1.2346e+09'), (12345.67890e150, ' 1.235e+154')] @pytest.mark.parametrize("test_input, expected", test_data_gformat) def test_gformat(test_input, expected): """Test gformat function.""" output = gformat(test_input) assert output == expected def test_reports_created(fitresult): """Verify that the fit reports are created and all headers are present.""" report_headers = ['[[Model]]', '[[Fit Statistics]]', '[[Variables]]', '[[Correlations]] (unreported correlations are < 0.100)'] report = fitresult.fit_report() assert len(report) > 500 for header in report_headers: assert header in report report1 = fit_report(fitresult) for header in report_headers[1:]: assert header in report1 html_params = fitresult.params._repr_html_() assert len(html_params) > 500 assert 'brute' in html_params assert 'standard error' in html_params assert 'relative error' in html_params html_report = fitresult._repr_html_() assert len(html_report) > 1000 for header in report_headers: header_title = header.replace('[', '').replace(']', '').strip() assert header_title in html_report def test_fitreports_init_values(fitresult): """Verify that initial values are displayed as expected.""" fitresult.params['sigma'].init_value = None report_split = fitresult.fit_report().split('\n') indx = [i for i, val in enumerate(report_split) if 'sigma' in val][0] assert '(init = ?)' in report_split[indx] indx_center = [i for i, val in enumerate(report_split) if 'center:' in val][0] indx_amplitude = [i for i, val in enumerate(report_split) if 'amplitude:' in val][0] for indx, init_val in zip([indx_center, indx_amplitude], [5, 50]): assert '(init = {})'.format(init_val) in report_split[indx] def test_fitreports_min_correl(fitresult): """Verify that only correlation >= min_correl are displayed.""" report = fitresult.fit_report(min_correl=0.6) assert '[[Correlation]]' not in report html_report = fitresult._repr_html_(min_correl=0.6) assert 'Correlation' not in html_report def test_fitreports_show_corre(fitresult): """Verify that correlation are not shown when show_correl=False.""" report = fitresult.fit_report(show_correl=False) assert '[[Correlation]]' not in report html_report = fitresult._repr_html_(show_correl=False) assert 'Correlation' not in html_report def test_fitreports_sort_pars(fitresult): """Test sorting of parameters in the fit report.""" # not sorted report_split = fitresult.fit_report(sort_pars=False).split('\n') indx_vars = report_split.index('[[Variables]]') first_par = list(fitresult.params.keys())[0] assert first_par in report_split[indx_vars+1] # sorted using default alphanumeric sort report_split = fitresult.fit_report(sort_pars=True).split('\n') indx_vars = report_split.index('[[Variables]]') assert 'amplitude' in report_split[indx_vars+1] # sorted using custom sorting algorithm: length of variable name def sort_length(s): return len(s) report_split = fitresult.fit_report(sort_pars=sort_length).split('\n') indx_vars = report_split.index('[[Variables]]') assert 'fwhm' in report_split[indx_vars+1] def test_report_fit(fitresult, capsys): """Verify that the fit report is printed when using report_fit.""" # report_fit with MinimizerResult/ModelResult as argument gives full # output of fitting results (except for [[Model]]) report_fit(fitresult) report_headers = ['[[Fit Statistics]]', '[[Variables]]', '[[Correlations]] (unreported correlations are < 0.100)'] captured = capsys.readouterr() for header in report_headers: assert header in captured.out # report_fit with Parameter set as argument gives [[Variables]] and # [[Correlations]] report_fit(fitresult) report_headers = ['[[Variables]]', '[[Correlations]] (unreported correlations are < 0.100)'] captured = capsys.readouterr() for header in report_headers: assert header in captured.out def test_report_errors_deprecated(fitresult): """Verify that a DeprecationWarning is shown when calling report_errors.""" with pytest.deprecated_call(): report_errors(params=fitresult.params) def test_report_leastsq_no_errorbars(fitresult): """Verify correct message when uncertainties could not be estimated.""" # general warning is shown fitresult.errorbars = False report = fitresult.fit_report() assert 'Warning: uncertainties could not be estimated:' in report # parameter is at initial value fitresult.params['amplitude'].value = 50.0 report = fitresult.fit_report() assert 'amplitude: at initial value' in report # parameter is at boundary max/min fitresult.params['amplitude'].value = 100.0 report = fitresult.fit_report() assert 'amplitude: at boundary' in report fitresult.params['amplitude'].value = 1.0 report = fitresult.fit_report() assert 'amplitude: at boundary' in report def test_report_no_errorbars_no_numdifftools(fitresult): """Verify message without numdifftools and not using leastsq/least_squares.""" fitresult.fit(method='nelder') lmfit.printfuncs.HAS_NUMDIFFTOOLS = False fitresult.errorbars = False report = fitresult.fit_report() msg = 'this fitting method does not natively calculate uncertainties' assert msg in report assert 'numdifftools' in report def test_report_no_errorbars_with_numdifftools_no_init_value(fitresult): """No TypeError for parameters without initial value when no errorbars. Verify that for parameters without an init_value the fit_report() function does not raise a TypeError when comparing if a parameter is at its initial value (if HAS_NUMDIFFTOOLS is True and result.errorbars is False). See GitHub Issue 578: https://github.com/lmfit/lmfit-py/issues/578 """ fitresult.fit(method='nelder') lmfit.printfuncs.HAS_NUMDIFFTOOLS = True fitresult.errorbars = False fitresult.params['amplitude'].init_value = None report = fitresult.fit_report() assert 'Warning: uncertainties could not be estimated:' in report def test_report_fixed_parameter(fitresult): """Verify that a fixed parameter is shown correctly.""" fitresult.params['center'].vary = False report_split = fitresult.fit_report().split('\n') indx = [i for i, val in enumerate(report_split) if 'center' in val][0] assert '(fixed)' in report_split[indx] def test_report_expression_parameter(fitresult): """Verify that a parameter with expression is shown correctly.""" report_split = fitresult.fit_report().split('\n') indices = [i for i, val in enumerate(report_split) if 'fwhm' in val or 'height' in val] for indx in indices: assert '==' in report_split[indx] html_params = fitresult.params._repr_html_() assert 'expression' in html_params def test_report_modelpars(fitresult): """Verify that model_values are shown when modelpars are given.""" model = GaussianModel() params = model.make_params(amplitude=35, center=7, sigma=0.9) report_split = fitresult.fit_report(modelpars=params).split('\n') indices = [i for i, val in enumerate(report_split) if ('sigma:' in val or 'center:' in val or 'amplitude:' in val)] for indx in indices: assert 'model_value' in report_split[indx] def test_report_parvalue_non_numeric(fitresult): """Verify that a non-numeric value is caught (can this ever happens?).""" fitresult.params['center'].value = None fitresult.params['center'].stderr = None report = fitresult.fit_report() assert 'center: Non Numeric Value?' in report def test_report_zero_value_spercent(fitresult): """Verify that ZeroDivisionError in spercent calc. gives empty string.""" fitresult.params['center'].value = 0 fitresult.params['center'].stderr = 0.1 report_split = fitresult.fit_report().split('\n') indx = [i for i, val in enumerate(report_split) if 'center:' in val][0] assert '%' not in report_split[indx] assert '%' in report_split[indx+1] html_params_split = fitresult.params._repr_html_().split('') indx = [i for i, val in enumerate(html_params_split) if 'center' in val][0] assert '%' not in html_params_split[indx] assert '%' in html_params_split[indx+1] def test_ci_report(confidence_interval): """Verify that the CI report is created when using ci_report.""" report = ci_report(confidence_interval) assert len(report) > 250 for par in confidence_interval.keys(): assert par in report for interval in ['99.73', '95.45', '68.27', '_BEST_']: assert interval in report def test_report_ci(confidence_interval, capsys): """Verify that the CI report is printed when using report_ci.""" report_ci(confidence_interval) captured = capsys.readouterr() assert len(captured.out) > 250 for par in confidence_interval.keys(): assert par in captured.out for interval in ['99.73', '95.45', '68.27', '_BEST_']: assert interval in captured.out def test_ci_report_with_offset(confidence_interval): """Verify output of CI report when using with_offset.""" report_split = ci_report(confidence_interval, with_offset=True).split('\n') # default amp_values = [abs(float(val)) for val in report_split[1].split()[2:]] assert np.all(np.less(np.delete(amp_values, 3), 0.2)) report_split = ci_report(confidence_interval, with_offset=False).split('\n') amp_values = [float(val) for val in report_split[1].split()[2:]] assert np.all(np.greater(amp_values, 13)) @pytest.mark.parametrize("ndigits", [3, 5, 7]) def test_ci_report_with_ndigits(confidence_interval, ndigits): """Verify output of CI report when specifiying ndigits.""" report_split = ci_report(confidence_interval, ndigits=ndigits).split('\n') period_values = [val for val in report_split[2].split()[2:]] length = [len(val.split('.')[-1]) for val in period_values] assert np.all(np.equal(length, ndigits)) lmfit-py-1.0.0/tests/test_saveload.py000066400000000000000000000165611357751001700176410ustar00rootroot00000000000000"""Tests for saving/loading Models and ModelResults.""" import os import time import numpy as np from numpy.testing import assert_allclose import pytest from lmfit import Parameters import lmfit.jsonutils from lmfit.lineshapes import gaussian, lorentzian from lmfit.model import (Model, ModelResult, load_model, load_modelresult, save_model, save_modelresult) from lmfit.models import ExponentialModel, GaussianModel, VoigtModel from lmfit_testutils import assert_between, assert_param_between y, x = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples', 'NIST_Gauss2.dat')).T SAVE_MODEL = 'model_1.sav' SAVE_MODELRESULT = 'modelresult_1.sav' def clear_savefile(fname): """Remove save files so that tests start fresh.""" try: os.unlink(fname) except OSError: pass def wait_for_file(fname, timeout=10): """Check whether file is created within certain amount of time.""" end_time = time.time() + timeout while time.time() < end_time: if os.path.exists(fname): return True time.sleep(0.05) return False def create_model_params(x, y): """Create the model and parameters.""" exp_mod = ExponentialModel(prefix='exp_') params = exp_mod.guess(y, x=x) gauss1 = GaussianModel(prefix='g1_') params.update(gauss1.make_params()) gauss2 = GaussianModel(prefix='g2_') params.update(gauss2.make_params()) params['g1_center'].set(value=105, min=75, max=125) params['g1_sigma'].set(value=15, min=3) params['g1_amplitude'].set(value=2000, min=10) params['g2_center'].set(value=155, min=125, max=175) params['g2_sigma'].set(value=15, min=3) params['g2_amplitude'].set(value=2000, min=10) model = gauss1 + gauss2 + exp_mod return model, params def check_fit_results(result): """Check the result of optimization.""" assert result.nvarys == 8 assert_between(result.chisqr, 1000, 1500) assert_between(result.aic, 400, 450) pars = result.params assert_param_between(pars['exp_decay'], 90, 92) assert_param_between(pars['exp_amplitude'], 98, 101) assert_param_between(pars['g1_sigma'], 16, 17) assert_param_between(pars['g1_center'], 106, 109) assert_param_between(pars['g1_amplitude'], 4100, 4500) assert_param_between(pars['g1_fwhm'], 38, 42) assert_param_between(pars['g1_height'], 100, 103) assert_param_between(pars['g2_sigma'], 10, 15) assert_param_between(pars['g2_center'], 150, 160) assert_param_between(pars['g2_amplitude'], 2100, 2900) assert_param_between(pars['g2_fwhm'], 30, 34) assert_param_between(pars['g2_height'], 70, 75) @pytest.mark.parametrize("dill", [False, True]) def test_save_load_model(dill): """Save/load Model with/without dill.""" if dill: pytest.importorskip("dill") else: lmfit.jsonutils.HAS_DILL = False # create/save Model and perform some tests model, _pars = create_model_params(x, y) save_model(model, SAVE_MODEL) file_exists = wait_for_file(SAVE_MODEL, timeout=10) assert file_exists with open(SAVE_MODEL, 'r') as fh: text = fh.read() assert 1000 < len(text) < 2500 # load the Model, perform fit and assert results saved_model = load_model(SAVE_MODEL) params = saved_model.make_params() params['exp_decay'].set(100) params['exp_amplitude'].set(100) params['g1_center'].set(105, min=75, max=125) params['g1_sigma'].set(15, min=3) params['g1_amplitude'].set(2000, min=10) params['g2_center'].set(155, min=125, max=175) params['g2_sigma'].set(15, min=3) params['g2_amplitude'].set(2000, min=10) result = saved_model.fit(y, params, x=x) check_fit_results(result) clear_savefile(SAVE_MODEL) @pytest.mark.parametrize("dill", [False, True]) def test_save_load_modelresult(dill): """Save/load ModelResult with/without dill.""" if dill: pytest.importorskip("dill") else: lmfit.jsonutils.HAS_DILL = False # create model, perform fit, save ModelResult and perform some tests model, params = create_model_params(x, y) result = model.fit(y, params, x=x) save_modelresult(result, SAVE_MODELRESULT) file_exists = wait_for_file(SAVE_MODELRESULT, timeout=10) assert file_exists text = '' with open(SAVE_MODELRESULT, 'r') as fh: text = fh.read() assert_between(len(text), 8000, 25000) # load the saved ModelResult from file and compare results result_saved = load_modelresult(SAVE_MODELRESULT) check_fit_results(result_saved) clear_savefile(SAVE_MODEL) def test_saveload_modelresult_attributes(): """Test for restoring all attributes of the ModelResult.""" model, params = create_model_params(x, y) result = model.fit(y, params, x=x) save_modelresult(result, SAVE_MODELRESULT) time.sleep(0.25) file_exists = wait_for_file(SAVE_MODELRESULT, timeout=10) assert file_exists time.sleep(0.25) loaded = load_modelresult(SAVE_MODELRESULT) assert len(result.data) == len(loaded.data) assert_allclose(result.data, loaded.data) for pname in result.params.keys(): assert_allclose(result.init_params[pname].value, loaded.init_params[pname].value) clear_savefile(SAVE_MODELRESULT) def test_saveload_modelresult_exception(): """Make sure the proper exceptions are raised when needed.""" model, _pars = create_model_params(x, y) save_model(model, SAVE_MODEL) with pytest.raises(AttributeError, match=r'needs saved ModelResult'): load_modelresult(SAVE_MODEL) clear_savefile(SAVE_MODEL) def test_saveload_modelresult_roundtrip(): """Test for modelresult.loads()/dumps() and repeating that""" def mfunc(x, a, b): return a * (x-b) model = Model(mfunc) params = model.make_params(a=0.0, b=3.0) xx = np.linspace(-5, 5, 201) yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=len(xx)) result1 = model.fit(yy, params, x=xx) result2 = ModelResult(model, Parameters()) result2.loads(result1.dumps(), funcdefs={'mfunc': mfunc}) result3 = ModelResult(model, Parameters()) result3.loads(result2.dumps(), funcdefs={'mfunc': mfunc}) assert result3 is not None assert_param_between(result2.params['a'], 0.48, 0.52) assert_param_between(result2.params['b'], 0.20, 0.25) assert_param_between(result3.params['a'], 0.48, 0.52) assert_param_between(result3.params['b'], 0.20, 0.25) def test_saveload_usersyms(): """Test save/load of modelresult with non-trivial user symbols, this example uses a VoigtModel, wheree `wofz()` is used in a constraint expression""" x = np.linspace(0, 20, 501) y = gaussian(x, 1.1, 8.5, 2) + lorentzian(x, 1.7, 8.5, 1.5) np.random.seed(20) y = y + np.random.normal(size=len(x), scale=0.025) model = VoigtModel() pars = model.guess(y, x=x) result = model.fit(y, pars, x=x) savefile = 'tmpvoigt_modelresult.sav' save_modelresult(result, savefile) assert_param_between(result.params['sigma'], 0.7, 2.1) assert_param_between(result.params['center'], 8.4, 8.6) assert_param_between(result.params['height'], 0.2, 1.0) time.sleep(0.25) result2 = load_modelresult(savefile) assert_param_between(result2.params['sigma'], 0.7, 2.1) assert_param_between(result2.params['center'], 8.4, 8.6) assert_param_between(result2.params['height'], 0.2, 1.0) lmfit-py-1.0.0/tests/test_shgo.py000066400000000000000000000100751357751001700167750ustar00rootroot00000000000000"""Tests for the SHGO global minimization algorithm.""" import numpy as np from numpy.testing import assert_allclose import pytest import scipy import lmfit # SHGO algorithm is present in SciPy >= 1.2 pytest.importorskip("scipy", minversion="1.2") def eggholder(x): return (-(x[1] + 47.0) * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))) def eggholder_lmfit(params): x0 = params['x0'].value x1 = params['x1'].value return (-(x1 + 47.0) * np.sin(np.sqrt(abs(x0/2.0 + (x1 + 47.0)))) - x0 * np.sin(np.sqrt(abs(x0 - (x1 + 47.0))))) def test_shgo_scipy_vs_lmfit(): """Test SHGO algorithm in lmfit versus SciPy.""" bounds = [(-512, 512), (-512, 512)] result_scipy = scipy.optimize.shgo(eggholder, bounds, n=30, sampling_method='sobol') assert len(result_scipy.xl) == 13 pars = lmfit.Parameters() pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512)) mini = lmfit.Minimizer(eggholder_lmfit, pars) result = mini.minimize(method='shgo', n=30, sampling_method='sobol') out_x = np.array([result.params['x0'].value, result.params['x1'].value]) assert_allclose(result_scipy.fun, result.residual) assert_allclose(result_scipy.funl, result.shgo_funl) assert_allclose(result_scipy.xl, result.shgo_xl) assert_allclose(result.shgo_x, out_x) def test_shgo_scipy_vs_lmfit_2(): """Test SHGO algorithm in lmfit versus SciPy.""" bounds = [(-512, 512), (-512, 512)] result_scipy = scipy.optimize.shgo(eggholder, bounds, n=60, iters=5, sampling_method='sobol') assert len(result_scipy.xl) == 39 pars = lmfit.Parameters() pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512)) mini = lmfit.Minimizer(eggholder_lmfit, pars) result = mini.minimize(method='shgo', n=60, iters=5, sampling_method='sobol') assert_allclose(result_scipy.fun, result.residual) assert_allclose(result_scipy.xl, result.shgo_xl) assert_allclose(result_scipy.funl, result.shgo_funl) # correct result for Alpine02 function global_optimum = [7.91705268, 4.81584232] fglob = -6.12950 def test_shgo_simplicial_Alpine02(minimizer_Alpine02): """Test SHGO algorithm on Alpine02 function.""" # sampling_method 'simplicial' fails with iters=1 out = minimizer_Alpine02.minimize(method='shgo', iters=5) out_x = np.array([out.params['x0'].value, out.params['x1'].value]) assert_allclose(out.residual, fglob, rtol=1e-5) assert_allclose(min(out_x), min(global_optimum), rtol=1e-3) assert_allclose(max(out_x), max(global_optimum), rtol=1e-3) assert out.method == 'shgo' def test_shgo_sobol_Alpine02(minimizer_Alpine02): """Test SHGO algorithm on Alpine02 function.""" out = minimizer_Alpine02.minimize(method='shgo', sampling_method='sobol') out_x = np.array([out.params['x0'].value, out.params['x1'].value]) assert_allclose(out.residual, fglob, rtol=1e-5) assert_allclose(min(out_x), min(global_optimum), rtol=1e-3) assert_allclose(max(out_x), max(global_optimum), rtol=1e-3) def test_shgo_bounds(minimizer_Alpine02): """Test SHGO algorithm with bounds.""" pars_bounds = lmfit.Parameters() pars_bounds.add_many(('x0', 1., True, 5.0, 15.0), ('x1', 1., True, 2.5, 7.5)) out = minimizer_Alpine02.minimize(params=pars_bounds, method='shgo') assert 5.0 <= out.params['x0'].value <= 15.0 assert 2.5 <= out.params['x1'].value <= 7.5 def test_shgo_disp_true(minimizer_Alpine02, capsys): """Test SHGO algorithm with disp is True.""" kws = {'disp': True} minimizer_Alpine02.minimize(method='shgo', options=kws) captured = capsys.readouterr() assert 'Splitting first generation' in captured.out def test_shgo_local_solver(minimizer_Alpine02): """Test SHGO algorithm with local solver.""" min_kws = {'method': 'unknown'} with pytest.raises(KeyError, match=r'unknown'): minimizer_Alpine02.minimize(method='shgo', minimizer_kwargs=min_kws) lmfit-py-1.0.0/tests/test_stepmodel.py000066400000000000000000000027301357751001700200300ustar00rootroot00000000000000import numpy as np from lmfit.models import ConstantModel, StepModel def get_data(): x = np.linspace(0, 10, 201) dat = np.ones_like(x) dat[:48] = 0.0 dat[48:77] = np.arange(77-48)/(77.0-48) dat = dat + 5e-2*np.random.randn(len(x)) dat = 110.2 * dat + 12.0 return x, dat def test_stepmodel_linear(): x, y = get_data() stepmod = StepModel(form='linear') const = ConstantModel() pars = stepmod.guess(y, x) pars = pars + const.make_params(c=3*y.min()) mod = stepmod + const out = mod.fit(y, pars, x=x) assert(out.nfev > 5) assert(out.nvarys == 4) assert(out.chisqr > 1) assert(out.params['c'].value > 3) assert(out.params['center'].value > 1) assert(out.params['center'].value < 4) assert(out.params['sigma'].value > 0.5) assert(out.params['sigma'].value < 3.5) assert(out.params['amplitude'].value > 50) def test_stepmodel_erf(): x, y = get_data() stepmod = StepModel(form='linear') const = ConstantModel() pars = stepmod.guess(y, x) pars = pars + const.make_params(c=3*y.min()) mod = stepmod + const out = mod.fit(y, pars, x=x) assert(out.nfev > 5) assert(out.nvarys == 4) assert(out.chisqr > 1) assert(out.params['c'].value > 3) assert(out.params['center'].value > 1) assert(out.params['center'].value < 4) assert(out.params['amplitude'].value > 50) assert(out.params['sigma'].value > 0.2) assert(out.params['sigma'].value < 1.5) lmfit-py-1.0.0/versioneer.py000066400000000000000000002060031357751001700160130ustar00rootroot00000000000000 # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)