pax_global_header00006660000000000000000000000064127512272410014515gustar00rootroot0000000000000052 comment=9e8076dfbc51ef380030e834918486cdba8747d4 bumps-0.7.6/000077500000000000000000000000001275122724100126555ustar00rootroot00000000000000bumps-0.7.6/.gitignore000066400000000000000000000006131275122724100146450ustar00rootroot00000000000000# Eclipse/pycharm settings files .idea .project .pydevproject # editor backup files *.swp *~ *.bak # build/test .settings .coverage /build/ /dist/ /bumps.egg-info/ bumps.iss-include iss-version # doc targets /doc/_build/ /doc/api/ /doc/tutorial/ /doc/dream/ # python droppings from running in place __pycache__/ *.pyc *.pyo *.so *.pyd *.dll *.dyld # run in place sets .mplconfig .mplconfig bumps-0.7.6/LICENSE.txt000077500000000000000000000066571275122724100145210ustar00rootroot00000000000000Bumps is in the public domain. Code in individual files has copyright and license set by individual authors. Bumps GUI --------- Copyright (C) 2006-2011, University of Maryland Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. DREAM ----- Copyright (c) 2008, Los Alamos National Security, LLC All rights reserved. Copyright 2008. Los Alamos National Security, LLC. This software was produced under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL), which is operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S. Government has rights to use, reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES A NY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified to produce derivative works, such modified software should be clearly marked, so as not to confuse it with the version available from LANL. Additionally, redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Los Alamos National Security, LLC, Los Alamos National Laboratory, LANL the U.S. Government, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. bumps-0.7.6/MANIFEST.in000077500000000000000000000014251275122724100144200ustar00rootroot00000000000000# The purpose of this file is to modify the list of files to include/exclude in # the source archive created by the 'python setup.py sdist' command. Executing # setup.py in the top level directory creates a default list (or manifest) and # the directives in this file add or subtract files from the resulting MANIFEST # file that drives the creation of the archive. # # Note: apparently due to a bug in setup, you cannot include a file whose name # starts with 'build' as in 'build_everything.py'. # Add files to the archive in addition to those that are installed by running # 'python setup.py install'. Typically these extra files are build related. include MANIFEST.in # this file include master_builder.py include bumps.iss include setup_py2exe.py # Delete files #prune this that bumps-0.7.6/README.rst000077500000000000000000000070211275122724100143470ustar00rootroot00000000000000============================================== Bumps: data fitting and uncertainty estimation ============================================== Bumps provides data fitting and Bayesian uncertainty modeling for inverse problems. It has a variety of optimization algorithms available for locating the most like value for function parameters given data, and for exploring the uncertainty around the minimum. Installation is with the usual python installation command:: pip install bumps Once the system is installed, you can verify that it is working with:: bumps doc/examples/peaks/model.py --chisq Documentation is available at `readthedocs `_ .. image:: https://zenodo.org/badge/18489/bumps/bumps.svg :target: https://zenodo.org/badge/latestdoi/18489/bumps/bumps Release notes ============= v0.7.6 2016-08-05 ----------------- * add --view option to command line which gets propagated to the model plotter * add support for probability p(x) for vector x using VectorPDF(f,x0) * rename DirectPDF to DirectProblem, and allow it to run in GUI * data reader supports multi-part files, with parts separated by blank lines * add gaussian mixture and laplace examples * bug fix: plots were failing if model name contains a '.' * miscellaneous code cleanup v0.7.5.10 2016-05-04 -------------------- * gui: undo code cleaning operation which broke the user interface v0.7.5.9 2016-04-22 ------------------- * population initializers allow indefinite bounds * use single precision criterion for levenberg-marquardt and bfgs * implement simple, faster, less accurate Hessian & Jacobian * compute uncertainty estimate from Jacobian if problem is sum of squares * gui: fit selection window acts like a dialog v0.7.5.8 2016-04-18 ------------------- * accept model.par output from a different model * show residuals with curve fit output * only show correlations for selected variables * show tics on correlations if small number * improve handling of uncertainty estimate from curvature * tweak dream algorithm -- maybe improve the acceptance ratio? * allow model to set visible variables in output * improve handling of arbitrary probability density functions * simplify loading of pymc models * update to numdifftools 0.9.14 * bug fix: improved handling of ill-conditioned fits * bug fix: avoid copying mcmc chain during run * bug fix: more robust handling of --time limit * bug fix: support newer versions of matplotlib and numpy * miscellaneous tweaks and fixes v0.7.5.7 2015-09-21 ------------------- * add entropy calculator (still unreliable for high dimensional problems) * adjust scaling of likelihood (the green line) to match histogram area * use --samples to specify the number of samples from the distribution * mark this and future releases with a DOI at zenodo.org v0.7.5.6 2015-06-03 ------------------- * tweak uncertainty calculations so they don't fail on bad models v0.7.5.5 2015-05-07 ------------------- * documentation updates v0.7.5.4 2014-12-05 ------------------- * use relative rather than absolute noise in dream, which lets us fit target values in the order of 1e-6 or less. * fix covariance population initializer v0.7.5.3 2014-11-21 ------------------- * use --time to stop after a given number of hours * Levenberg-Marquardt: fix "must be 1-d or 2-d" bug * improve curvefit interface v0.7.5.2 2014-09-26 ------------------- * pull numdifftools dependency into the repository v0.7.5.1 2014-09-25 ------------------- * improve the load_model interface v0.7.5 2014-09-10 ----------------- * Pure python release bumps-0.7.6/bin/000077500000000000000000000000001275122724100134255ustar00rootroot00000000000000bumps-0.7.6/bin/bumps000077500000000000000000000013221275122724100144770ustar00rootroot00000000000000#!/usr/bin/env python # This program is in the public domain. # Authors: Paul Kienzle and James Krycka """ This script starts the command line interface of the Bumps Modeler application to process the command just entered. """ # ========================== Start program ================================== # Process the command line that has been entered. if __name__ == "__main__": # This is necessary when running the application from a frozen image and # using the --parallel option. Note that freeze_support() has no effect # when running from a python script (i.e., in a non-frozen environment). import multiprocessing multiprocessing.freeze_support() import bumps.cli bumps.cli.main() bumps-0.7.6/bin/bumps_gui000077500000000000000000000013421275122724100153450ustar00rootroot00000000000000#!/usr/bin/env python # This program is in the public domain. # Authors: Paul Kienzle and James Krycka """ This script starts the command line interface of the Bumps Modeler application to process the command just entered. """ # ========================== Start program ================================== # Process the command line that has been entered. if __name__ == "__main__": # This is necessary when running the application from a frozen image and # using the --parallel option. Note that freeze_support() has no effect # when running from a python script (i.e., in a non-frozen environment). import multiprocessing multiprocessing.freeze_support() import bumps.gui.gui_app bumps.gui.gui_app.main() bumps-0.7.6/bin/bumps_serve000077500000000000000000000001711275122724100157040ustar00rootroot00000000000000#!/usr/bin/env python """ Debug server for the bumps fit infrastructure. """ from jobqueue.serve import serve serve() bumps-0.7.6/bin/bumps_workerd000077500000000000000000000015111275122724100162340ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import logging from jobqueue import daemon, worker LOGDIR='/var/log/bumps' LOGFILE=os.path.join(LOGDIR,'.bumps-worker.log') PIDFILE=os.path.join(LOGDIR,'.bumps-worker.pid') ERRFILE=os.path.join(LOGDIR,'.bumps-worker.out') def startup(): if not os.path.exists(LOGDIR): os.makedirs(LOGDIR) if len(sys.argv) > 1 and sys.argv[1] == 'debug': loglevel, logfile = logging.DEBUG, None else: loglevel, logfile = logging.ERROR, LOGFILE daemon.startstop(pidfile=PIDFILE, stdout=ERRFILE) logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)-8s %(message)s', datefmt = '%y-%m-%d %H:%M:%S', filename = logfile, filemode='a') worker.main() if __name__ == "__main__": startup() bumps-0.7.6/bin/launch.bat000077500000000000000000000002251275122724100153710ustar00rootroot00000000000000@echo off rem Add location of executing batch file to path for duration of command window. SET BATLOC=%~dp0 PATH %BATLOC%;%PATH% cmd /k bumps --help bumps-0.7.6/bumps.iss000077500000000000000000000223761275122724100145400ustar00rootroot00000000000000; -- bumps.iss -- an Inno Setup Script for Bumps ; This script is used by the Inno Setup Compiler to build a Windows XP ; installer/uninstaller. ; The script is written to explicitly allow multiple versions of the ; application to be installed simulaneously in separate subdirectories such ; as "Bumps 0.5.0", "Bumps 0.7.2", and "Bumps 1.0" under a group directory. ; NOTE: In order to support more than one version of the application ; installed simultaneously, the AppName, Desktop shortcut name, and Quick ; Start shortcut name must be unique among versions. This is in addition to ; having unique names (in the more obvious places) for DefaultDirNam, ; DefaultGroupName, and output file name. ; By default, when installing: ; - The destination folder will be "C:\Program Files\DANSE\Bumps x.y.z" ; - A desktop icon will be created with the label "Bumps x.y.z" ; - A quickstart icon is optional ; - A start menu folder will be created with the name DANSE -> Bumps x.y.z ; By default, when uninstalling Bumps x.y.z ; - The uninstall can be initiated from either the: ; * Start menu via DANSE -> Bumps x.y.z -> Uninstall Bumps ; * Start menu via Control Panel - > Add or Remove Programs -> Bumps x.y.z ; - It will not delete the C:\Program Files\DANSE\Bumps x.y.z folder if it ; contains any user created files ; - It will delete any desktop or quickstart icons for Bumps that were ; created on installation ; NOTE: The Quick Start Pack for the Inno Setup Compiler needs to be installed ; with the Preprocessor add-on selected to support use of #define statements. #define MyAppName "Bumps" #define MyAppNameLowercase "bumps" #define MyGroupFolderName "DANSE" #define MyAppPublisher "NIST & University of Maryland" #define MyAppURL "http://www.reflectometry.org/danse/" ; Use a batch file to launch bumps.exe to setup a custom environment. #define MyAppCLIFileName "launch.bat" #define MyAppGUIFileName "bumps.exe" #define MyIconFileName "bumps.ico" #define MyIconPath = "bumps-data/bumps.ico" #define MyReadmeFileName "README.txt" #define MyLicenseFileName "LICENSE.txt" #define Space " " ; Use updated version string if present in the include file. It is expected that the Bumps ; build script will create this file using the application's internal version string to create ; a define statement in the format shown below. #define MyAppVersion "0.0.0" #ifexist "iss-version" #include "iss-version" #endif [Setup] ; Make the AppName string unique so that other versions of the program can be installed simultaneously. ; This is done by using the name and version of the application together as the AppName. AppName={#MyAppName}{#Space}{#MyAppVersion} AppVerName={#MyAppName}{#Space}{#MyAppVersion} AppPublisher={#MyAppPublisher} ChangesAssociations=yes ; If you do not want a space in folder names, omit {#Space} or replace it with a hyphen char, etc. DefaultDirName={pf}\{#MyGroupFolderName}\{#MyAppName}{#Space}{#MyAppVersion} DefaultGroupName={#MyGroupFolderName}\{#MyAppName}{#Space}{#MyAppVersion} Compression=lzma/max SolidCompression=yes DisableProgramGroupPage=yes ; A file extension of .exe will be appended to OutputBaseFilename. OutputBaseFilename={#MyAppNameLowercase}-{#MyAppVersion}-win32 OutputManifestFile={#MyAppNameLowercase}-{#MyAppVersion}-win32-manifest.txt ; Note that the icon file is in the bin subdirectory, not in the top-level directory. SetupIconFile=bumps\gui\resources\{#MyIconFileName} LicenseFile={#MyLicenseFileName} SourceDir=. OutputDir=. PrivilegesRequired=none ;;;InfoBeforeFile=display_before_install.txt ;;;InfoAfterFile=display_after_install.txt ; The App*URL directives are for display in the Add/Remove Programs control panel and are all optional AppPublisherURL={#MyAppURL} AppSupportURL={#MyAppURL} AppUpdatesURL={#MyAppURL} [Languages] Name: "english"; MessagesFile: "compiler:Default.isl" [Files] ; This script assumes that the output from the previously run py2exe packaging process is in .\dist\... ; NOTE: Don't use "Flags: ignoreversion" on any shared system files Source: "dist\*"; Excludes: "examples,doc"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs ;Source: "dist\examples\*"; DestDir: "{userdocs}\{#MyAppName}\examples"; Flags: ignoreversion recursesubdirs createallsubdirs Source: "doc\tutorial\*"; DestDir: "{userdocs}\{#MyAppName}\examples"; Flags: ignoreversion recursesubdirs createallsubdirs ; The following Pascal function checks for the presence of the VC++ 2008 DLL folder on the target system ; to determine if the VC++ 2008 Redistributable kit needs to be installed. [Code] function InstallVC90CRT(): Boolean; begin Result := not DirExists('C:\WINDOWS\WinSxS\x86_Microsoft.VC90.CRT_1fc8b3b9a1e18e3b_9.0.21022.8_x-ww_d08d0375'); end; [Tasks] Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}" Name: "quicklaunchicon"; Description: "{cm:CreateQuickLaunchIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked [Icons] ; This section creates shortcuts. ; - {group} refers to shortcuts in the Start Menu. ; - {commondesktop} refers to shortcuts on the desktop. ; - {userappdata} refers to shortcuts in the Quick Start menu on the tool bar. ; ; When running the application in command line mode, we want to keep the command window open when it ; exits so that the user can run it again from the window. Unfortunately, this section does not have ; a flag for keeping the command window open on exit. To accomplish this, a batch file is run that ; creates the command window and starts the Windows command interpreter. This provides the same ; environment as starting a command window using the run dialog box from the Windows start menu and ; entering a command such as "cmd" or "cmd /k ". ; ; When running the application in GUI mode, we simply run the executable without a console window. Name: "{group}\Launch {#MyAppName} GUI"; Filename: "{app}\{#MyAppGUIFileName}"; IconFilename: "{app}\{#MyIconPath}"; WorkingDir: "{userdocs}\{#MyAppName}" Name: "{group}\Launch {#MyAppName} CLI"; Filename: "{app}\{#MyAppCLIFileName}"; IconFilename: "{app}\{#MyIconPath}"; WorkingDir: "{userdocs}\{#MyAppName}"; Flags: runmaximized Name: "{group}\{cm:ProgramOnTheWeb,{#MyAppName}}"; Filename: "{#MyAppURL}" Name: "{group}\{cm:UninstallProgram,{#MyAppName}}"; Filename: "{uninstallexe}" Name: "{commondesktop}\{#MyAppName} GUI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppGUIFileName}"; Tasks: desktopicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}" Name: "{commondesktop}\{#MyAppName} CLI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppCLIFileName}"; Tasks: desktopicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}"; Flags: runmaximized Name: "{userappdata}\Microsoft\Internet Explorer\Quick Launch\{#MyAppName} GUI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppGUIFileName}"; Tasks: quicklaunchicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}" Name: "{userappdata}\Microsoft\Internet Explorer\Quick Launch\{#MyAppName} CLI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppCLIFileName}"; Tasks: quicklaunchicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}"; Flags: runmaximized [Run] ;;;Filename: "{app}\{#MyAppGUIFileName}"; Description: "{cm:LaunchProgram,{#MyAppName} GUI}"; WorkingDir: "{userdocs}\{#MyAppName}"; Flags: nowait postinstall skipifsilent ;;;Filename: "{app}\{#MyAppCLIFileName}"; Description: "{cm:LaunchProgram,{#MyAppName} CLI}"; WorkingDir: "{userdocs}\{#MyAppName}"; Flags: nowait postinstall skipifsilent runmaximized unchecked Filename: "{app}\{#MyReadmeFileName}"; Description: "Read Release Notes"; Verb: "open"; Flags: shellexec skipifdoesntexist waituntilterminated postinstall skipifsilent unchecked ; Install the Microsoft C++ DLL redistributable package if it is provided and the DLLs are not present on the target system. ; Note that the redistributable package is included if the app was built using Python 2.6 or 2.7, but not with 2.5. ; Parameter options: ; - for silent install use: "/q" ; - for silent install with progress bar use: "/qb" ; - for silent install with progress bar but disallow cancellation of operation use: "/qb!" ; Note that we do not use the postinstall flag as this would display a checkbox and thus require the user to decide what to do. Filename: "{app}\vcredist_x86.exe"; Parameters: "/qb!"; WorkingDir: "{tmp}"; StatusMsg: "Installing Microsoft Visual C++ 2008 Redistributable Package ..."; Check: InstallVC90CRT(); Flags: skipifdoesntexist waituntilterminated [UninstallDelete] ; Delete directories and files that are dynamically created by the application (i.e. at runtime). Type: filesandordirs; Name: "{localappdata}\bumps-{#MyAppVersion}" Type: files; Name: "{app}\*.exe.log" ; The following is a workaround for the case where the application is installed and uninstalled but the ;{app} directory is not deleted because it has user files. Then the application is installed into the ; existing directory, user files are deleted, and the application is un-installed again. Without the ; directive below, {app} will not be deleted because Inno Setup did not create it during the previous ; installation. Type: dirifempty; Name: "{app}" bumps-0.7.6/bumps/000077500000000000000000000000001275122724100140035ustar00rootroot00000000000000bumps-0.7.6/bumps/__init__.py000066400000000000000000000023071275122724100161160ustar00rootroot00000000000000# This program is in the public domain # Author: Paul Kienzle """ Bumps: curve fitter with uncertainty estimation This package provides tools for modeling parametric systems in a Bayesian context, with routines for finding the maximum likelihood and the posterior probability density function. A graphical interface allows direct manipulation of the model parameters. See http://www.reflectometry.org/danse/reflectometry for online manuals. """ __version__ = "0.7.6" def data_files(): """ Return the data files associated with the package for setup_py2exe.py. The format is a list of (directory, [files...]) pairs which can be used directly in the py2exe setup script as:: setup(..., data_files=data_files(), ...) """ from .gui.utilities import data_files return data_files() def package_data(): """ Return the data files associated with the package for setup.py. The format is a dictionary of {'fully.qualified.module', [files...]} used directly in the setup script as:: setup(..., package_data=package_data(), ...) """ from .gui.utilities import package_data return package_data() bumps-0.7.6/bumps/bounds.py000066400000000000000000000555741275122724100156670ustar00rootroot00000000000000# This program is in the public domain # Author: Paul Kienzle """ Parameter bounds and prior probabilities. Parameter bounds encompass several features of our optimizers. First and most trivially they allow for bounded constraints on parameter values. Secondly, for parameter values known to follow some distribution, the bounds encodes a penalty function as the value strays from its nominal value. Using a negative log likelihood cost function on the fit, then this value naturally contributes to the overall likelihood measure. Predefined bounds are:: Unbounded range (-inf, inf) BoundedBelow range (base, inf) BoundedAbove range (-inf, base) Bounded range (low, high) Normal range (-inf, inf) with gaussian probability BoundedNormal range (low, high) with gaussian probability within SoftBounded range (low, high) with gaussian probability outside New bounds can be defined following the abstract base class interface defined in :class:`Bounds`, or using Distribution(rv) where rv is a scipy.stats continuous distribution. For generating bounds given a value, we provide a few helper functions:: v +/- d: pm(x,dx) or pm(x,-dm,+dp) or pm(x,+dp,-dm) return (x-dm,x+dm) limited to 2 significant digits v +/- p%: pmp(x,p) or pmp(x,-pm,+pp) or pmp(x,+pp,-pm) return (x-pm*x/100, x+pp*x/100) limited to 2 sig. digits pm_raw(x,dx) or raw_pm(x,-dm,+dp) or raw_pm(x,+dp,-dm) return (x-dm,x+dm) pmp_raw(x,p) or raw_pmp(x,-pm,+pp) or raw_pmp(x,+pp,-pm) return (x-pm*x/100, x+pp*x/100) nice_range(lo,hi) return (lo,hi) limited to 2 significant digits """ from __future__ import division __all__ = ['pm', 'pmp', 'pm_raw', 'pmp_raw', 'nice_range', 'init_bounds', 'Bounds', 'Unbounded', 'Bounded', 'BoundedAbove', 'BoundedBelow', 'Distribution', 'Normal', 'BoundedNormal', 'SoftBounded'] import math from math import log, log10, sqrt, pi, ceil, floor from numpy import inf, isinf, isfinite, clip import numpy.random as RNG try: from scipy.stats import norm as normal_distribution except ImportError: def normal_distribution(*args, **kw): raise RuntimeError("scipy.stats unavailable") def pm(v, *args): """ Return the tuple (~v-dv,~v+dv), where ~expr is a 'nice' number near to to the value of expr. For example:: >>> r = pm(0.78421, 0.0023145) >>> print("%g - %g"%r) 0.7818 - 0.7866 If called as pm(value, +dp, -dm) or pm(value, -dm, +dp), return (~v-dm, ~v+dp). """ return nice_range(pm_raw(v, *args)) def pmp(v, *args): """ Return the tuple (~v-%v,~v+%v), where ~expr is a 'nice' number near to the value of expr. For example:: >>> r = pmp(0.78421, 10) >>> print("%g - %g"%r) 0.7 - 0.87 >>> r = pmp(0.78421, 0.1) >>> print("%g - %g"%r) 0.7834 - 0.785 If called as pmp(value, +pp, -pm) or pmp(value, -pm, +pp), return (~v-pm%v, ~v+pp%v). """ return nice_range(pmp_raw(v, *args)) # Generate ranges using x +/- dx or x +/- p%*x def pm_raw(v, *args): """ Return the tuple [v-dv,v+dv]. If called as pm_raw(value, +dp, -dm) or pm_raw(value, -dm, +dp), return (v-dm, v+dp). """ if len(args) == 1: dv = args[0] return v - dv, v + dv elif len(args) == 2: plus, minus = args if plus < minus: plus, minus = minus, plus # if minus > 0 or plus < 0: # raise TypeError("pm(value, p1, p2) requires both + and - values") return v + minus, v + plus else: raise TypeError("pm(value, delta) or pm(value, -p1, +p2)") def pmp_raw(v, *args): """ Return the tuple [v-%v,v+%v] If called as pmp_raw(value, +pp, -pm) or pmp_raw(value, -pm, +pp), return (v-pm%v, v+pp%v). """ if len(args) == 1: percent = args[0] b1, b2 = v * (1 - 0.01 * percent), v * (1 + 0.01 * percent) elif len(args) == 2: plus, minus = args if plus < minus: plus, minus = minus, plus # if minus > 0 or plus < 0: # raise TypeError("pmp(value, p1, p2) requires both + and - values") b1, b2 = v * (1 + 0.01 * minus), v * (1 + 0.01 * plus) else: raise TypeError("pmp(value, delta) or pmp(value, -p1, +p2)") return (b1, b2) if v > 0 else (b2, b1) def nice_range(bounds): """ Given a range, return an enclosing range accurate to two digits. """ step = bounds[1] - bounds[0] if step > 0: d = 10 ** (floor(log10(step)) - 1) return floor(bounds[0]/d)*d, ceil(bounds[1]/d)*d else: return bounds def init_bounds(v): """ Returns a bounds object of the appropriate type given the arguments. This is a helper factory to simplify the user interface to parameter objects. """ # if it is none, then it is unbounded if v is None: return Unbounded() # if it isn't a tuple, assume it is a bounds type. try: lo, hi = v except TypeError: return v # if it is a tuple, then determine what kind of bounds we have if lo is None: lo = -inf if hi is None: hi = inf # TODO: consider issuing a warning instead of correcting reversed bounds if lo >= hi: lo, hi = hi, lo if isinf(lo) and isinf(hi): return Unbounded() elif isinf(lo): return BoundedAbove(hi) elif isinf(hi): return BoundedBelow(lo) else: return Bounded(lo, hi) class Bounds(object): """ Bounds abstract base class. A range is used for several purposes. One is that it transforms parameters between unbounded and bounded forms depending on the needs of the optimizer. Another is that it generates random values in the range for stochastic optimizers, and for initialization. A third is that it returns the likelihood of seeing that particular value for optimizers which use soft constraints. Assuming the cost function that is being optimized is also a probability, then this is an easy way to incorporate information from other sorts of measurements into the model. """ limits = (-inf, inf) # TODO: need derivatives wrt bounds transforms def get01(self, x): """ Convert value into [0,1] for optimizers which are bounds constrained. This can also be used as a scale bar to show approximately how close to the end of the range the value is. """ def put01(self, v): """ Convert [0,1] into value for optimizers which are bounds constrained. """ def getfull(self, x): """ Convert value into (-inf,inf) for optimizers which are unconstrained. """ def putfull(self, v): """ Convert (-inf,inf) into value for optimizers which are unconstrained. """ def random(self, n=1, target=1.0): """ Return a randomly generated valid value. *target* gives some scale independence to the random number generator, allowing the initial value of the parameter to influence the randomly generated value. Otherwise fits without bounds have too large a space to search through. """ def nllf(self, value): """ Return the negative log likelihood of seeing this value, with likelihood scaled so that the maximum probability is one. For uniform bounds, this either returns zero or inf. For bounds based on a probability distribution, this returns values between zero and inf. The scaling is necessary so that indefinite and semi-definite ranges return a sensible value. The scaling does not affect the likelihood maximization process, though the resulting likelihood is not easily interpreted. """ def residual(self, value): """ Return the parameter 'residual' in a way that is consistent with residuals in the normal distribution. The primary purpose is to graphically display exceptional values in a way that is familiar to the user. For fitting, the scaled likelihood should be used. To do this, we will match the cumulative density function value with that for N(0,1) and find the corresponding percent point function from the N(0,1) distribution. In this way, for example, a value to the right of 2.275% of the distribution would correspond to a residual of -2, or 2 standard deviations below the mean. For uniform distributions, with all values equally probable, we use a value of +/-4 for values outside the range, and 0 for values inside the range. """ def start_value(self): """ Return a default starting value if none given. """ return self.put01(0.5) def __contains__(self, v): return self.limits[0] <= v <= self.limits[1] def __str__(self): limits = tuple(num_format(v) for v in self.limits) return "(%s,%s)" % limits # CRUFT: python 2.5 doesn't format indefinite numbers properly on windows def num_format(v): """ Number formating which supports inf/nan on windows. """ if isfinite(v): return "%g" % v elif isinf(v): return "inf" if v > 0 else "-inf" else: return "NaN" class Unbounded(Bounds): """ Unbounded parameter. The random initial condition is assumed to be between 0 and 1 The probability is uniformly 1/inf everywhere, which means the negative log likelihood of P is inf everywhere. A value inf will interfere with optimization routines, and so we instead choose P == 1 everywhere. """ def random(self, n=1, target=1.0): scale = target + (target==0.) return RNG.randn(n)*scale def nllf(self, value): return 0 def residual(self, value): return 0 def get01(self, x): return _get01_inf(x) def put01(self, v): return _put01_inf(v) def getfull(self, x): return x def putfull(self, v): return v class BoundedBelow(Bounds): """ Semidefinite range bounded below. The random initial condition is assumed to be within 1 of the maximum. [base,inf] <-> (-inf,inf) is direct above base+1, -1/(x-base) below [base,inf] <-> [0,1] uses logarithmic compression. Logarithmic compression works by converting sign*m*2^e+base to sign*(e+1023+m), yielding a value in [0,2048]. This can then be converted to a value in [0,1]. Note that the likelihood function is problematic: the true probability of seeing any particular value in the range is infinitesimal, and that is indistinguishable from values outside the range. Instead we say that P = 1 in range, and 0 outside. """ def __init__(self, base): self.limits = (base, inf) self._base = base def start_value(self): return self._base + 1 def random(self, n=1, target=1.): target = max(abs(target), abs(self._base)) scale = target + (target==0.) return self._base + abs(RNG.randn(n)*scale) def nllf(self, value): return 0 if value >= self._base else inf def residual(self, value): return 0 if value >= self._base else -4 def get01(self, x): m, e = math.frexp(x - self._base) if m >= 0 and e <= _E_MAX: v = (e + m) / (2. * _E_MAX) return v else: return 0 if m < 0 else 1 def put01(self, v): v = v * 2 * _E_MAX e = int(v) m = v - e x = math.ldexp(m, e) + self._base return x def getfull(self, x): v = x - self._base return v if v >= 1 else 2 - 1. / v def putfull(self, v): x = v if v >= 1 else 1. / (2 - v) return x + self._base class BoundedAbove(Bounds): """ Semidefinite range bounded above. [-inf,base] <-> [0,1] uses logarithmic compression [-inf,base] <-> (-inf,inf) is direct below base-1, 1/(base-x) above Logarithmic compression works by converting sign*m*2^e+base to sign*(e+1023+m), yielding a value in [0,2048]. This can then be converted to a value in [0,1]. Note that the likelihood function is problematic: the true probability of seeing any particular value in the range is infinitesimal, and that is indistinguishable from values outside the range. Instead we say that P = 1 in range, and 0 outside. """ def __init__(self, base): self.limits = (-inf, base) self._base = base def start_value(self): return self._base - 1 def random(self, n=1, target=1.0): target = max(abs(self._base), abs(target)) scale = target + (target==0.) return self._base - abs(RNG.randn(n)*scale) def nllf(self, value): return 0 if value <= self._base else inf def residual(self, value): return 0 if value <= self._base else 4 def get01(self, x): m, e = math.frexp(self._base - x) if m >= 0 and e <= _E_MAX: v = (e + m) / (2. * _E_MAX) return 1 - v else: return 1 if m < 0 else 0 def put01(self, v): v = (1 - v) * 2 * _E_MAX e = int(v) m = v - e x = -(math.ldexp(m, e) - self._base) return x def getfull(self, x): v = x - self._base return v if v <= -1 else -2 - 1. / v def putfull(self, v): x = v if v <= -1 else -1. / (v + 2) return x + self._base class Bounded(Bounds): """ Bounded range. [lo,hi] <-> [0,1] scale is simple linear [lo,hi] <-> (-inf,inf) scale uses exponential expansion While technically the probability of seeing any value within the range is 1/range, for consistency with the semi-infinite ranges and for a more natural mapping between nllf and chisq, we instead set the probability to 0. This choice will not affect the fits. """ def __init__(self, lo, hi): self.limits = (lo, hi) self._nllf_scale = log(hi - lo) def random(self, n=1, target=1.0): lo, hi = self.limits #print("= uniform",lo,hi) return RNG.uniform(lo, hi, size=n) def nllf(self, value): lo, hi = self.limits return 0 if lo <= value <= hi else inf # return self._nllf_scale if lo<=value<=hi else inf def residual(self, value): lo, hi = self.limits return -4 if lo > value else (4 if hi < value else 0) def get01(self, x): lo, hi = self.limits return float(x - lo) / (hi - lo) if hi - lo > 0 else 0 def put01(self, v): lo, hi = self.limits return (hi - lo) * v + lo def getfull(self, x): return _put01_inf(self.get01(x)) def putfull(self, v): return self.put01(_get01_inf(v)) class Distribution(Bounds): """ Parameter is pulled from a distribution. *dist* must implement the distribution interface from scipy.stats. In particular, it should define methods rvs, nnlf, cdf and ppf and attributes args and dist.name. """ def __init__(self, dist): self.dist = dist def random(self, n=1, target=1.0): return self.dist.rvs(n) def nllf(self, value): return -log(self.dist.pdf(value)) def residual(self, value): return normal_distribution.ppf(self.dist.cdf(value)) def get01(self, x): return self.dist.cdf(x) def put01(self, v): return self.dist.ppf(v) def getfull(self, x): return x def putfull(self, v): return v def __getstate__(self): # WARNING: does not preserve and restore seed return self.dist.__class__, self.dist.args, self.dist.kwds def __setstate__(self, state): cls, args, kwds = state self.dist = cls(*args, **kwds) def __str__(self): return "%s(%s)" % (self.dist.dist.name, ",".join(str(s) for s in self.dist.args)) class Normal(Distribution): """ Parameter is pulled from a normal distribution. If you have measured a parameter value with some uncertainty (e.g., the film thickness is 35+/-5 according to TEM), then you can use this measurement to restrict the values given to the search, and to penalize choices of this fitting parameter which are different from this value. *mean* is the expected value of the parameter and *std* is the 1-sigma standard deviation. """ def __init__(self, mean=0, std=1): Distribution.__init__(self, normal_distribution(mean, std)) self._nllf_scale = log(sqrt(2 * pi * std ** 2)) def nllf(self, value): # P(v) = exp(-0.5*(v-mean)**2/std**2)/sqrt(2*pi*std**2) # -log(P(v)) = -(-0.5*(v-mean)**2/std**2 - log( (2*pi*std**2) ** 0.5)) # = 0.5*(v-mean)**2/std**2 + log(2*pi*std**2)/2 mean, std = self.dist.args return 0.5 * ((value-mean)/std)**2 + self._nllf_scale def residual(self, value): mean, std = self.dist.args return (value-mean)/std def __getstate__(self): return self.dist.args # args is mean,std def __setstate__(self, state): mean, std = state self.__init__(mean=mean, std=std) class BoundedNormal(Bounds): """ truncated normal bounds """ def __init__(self, sigma=1, mu=0, limits=(-inf, inf)): self.limits = limits self.sigma, self.mu = sigma, mu self._left = normal_distribution.cdf((limits[0]-mu)/sigma) self._delta = normal_distribution.cdf((limits[1]-mu)/sigma) - self._left self._nllf_scale = log(sqrt(2 * pi * sigma ** 2)) + log(self._delta) def get01(self, x): """ Convert value into [0,1] for optimizers which are bounds constrained. This can also be used as a scale bar to show approximately how close to the end of the range the value is. """ v = ((normal_distribution.cdf((x-self.mu)/self.sigma) - self._left) / self._delta) return clip(v, 0, 1) def put01(self, v): """ Convert [0,1] into value for optimizers which are bounds constrained. """ x = v * self._delta + self._left return normal_distribution.ppf(x) * self.sigma + self.mu def getfull(self, x): """ Convert value into (-inf,inf) for optimizers which are unconstrained. """ raise NotImplementedError def putfull(self, v): """ Convert (-inf,inf) into value for optimizers which are unconstrained. """ raise NotImplementedError def random(self, n=1, target=1.0): """ Return a randomly generated valid value, or an array of values """ return self.get01(RNG.rand(n)) def nllf(self, value): """ Return the negative log likelihood of seeing this value, with likelihood scaled so that the maximum probability is one. """ if value in self: return 0.5 * ((value-self.mu)/self.sigma)**2 + self._nllf_scale else: return inf def residual(self, value): """ Return the parameter 'residual' in a way that is consistent with residuals in the normal distribution. The primary purpose is to graphically display exceptional values in a way that is familiar to the user. For fitting, the scaled likelihood should be used. For the truncated normal distribution, we can just use the normal residuals. """ return (value - self.mu) / self.sigma def start_value(self): """ Return a default starting value if none given. """ return self.put01(0.5) def __contains__(self, v): return self.limits[0] <= v <= self.limits[1] def __str__(self): vals = ( self.limits[0], self.limits[1], self.mu, self.sigma, ) return "(%s,%s), norm(%s,%s)" % tuple(num_format(v) for v in vals) class SoftBounded(Bounds): """ Parameter is pulled from a stretched normal distribution. This is like a rectangular distribution, but with gaussian tails. The intent of this distribution is for soft constraints on the values. As such, the random generator will return values like the rectangular distribution, but the likelihood will return finite values based on the distance from the from the bounds rather than returning infinity. Note that for bounds constrained optimizers which force the value into the range [0,1] for each parameter we don't need to use soft constraints, and this acts just like the rectangular distribution. """ def __init__(self, lo, hi, std=None): self._lo, self._hi, self._std = lo, hi, std self._nllf_scale = log(hi - lo + sqrt(2 * pi * std)) def random(self, n=1, target=1.0): return RNG.uniform(self._lo, self._hi, size=n) def nllf(self, value): # To turn f(x) = 1 if x in [lo,hi] else G(tail) # into a probability p, we need to normalize by \int{f(x)dx}, # which is just hi-lo + sqrt(2*pi*std**2). if value < self._lo: z = self._lo - value elif value > self._hi: z = value - self._hi else: z = 0 return (z / self._std) ** 2 / 2 + self._nllf_scale def residual(self, value): if value < self._lo: z = self._lo - value elif value > self._hi: z = value - self._hi else: z = 0 return z / self._std def get01(self, x): v = float(x - self._lo) / (self._hi - self._lo) return v if 0 <= v <= 1 else (0 if v < 0 else 1) def put01(self, v): return v * (self._hi - self._lo) + self._lo def getfull(self, x): return x def putfull(self, v): return v def __str__(self): return "box_norm(%g,%g,sigma=%g)" % (self._lo, self._hi, self._std) _E_MIN = -1023 _E_MAX = 1024 def _get01_inf(x): """ Convert a floating point number to a value in [0,1]. The value sign*m*2^e to sign*(e+1023+m), yielding a value in [-2048,2048]. This can then be converted to a value in [0,1]. Sort order is preserved. At least 14 bits of precision are lost from the 53 bit mantissa. """ # Arctan alternative # Arctan is approximately linear in (-0.5, 0.5), but the # transform is only useful up to (-10**15,10**15). # return atan(x)/pi + 0.5 m, e = math.frexp(x) s = math.copysign(1.0, m) v = (e - _E_MIN + m * s) * s v = v / (4 * _E_MAX) + 0.5 v = 0 if _E_MIN > e else (1 if _E_MAX < e else v) return v def _put01_inf(v): """ Convert a value in [0,1] to a full floating point number. Sort order is preserved. Reverses :func:`_get01_inf`, but with fewer bits of precision. """ # Arctan alternative # return tan(pi*(v-0.5)) v = (v - 0.5) * 4 * _E_MAX s = math.copysign(1., v) v *= s e = int(v) m = v - e x = math.ldexp(s * m, e + _E_MIN) # print "< x,e,m,s,v",x,e+_e_min,s*m,s,v return x bumps-0.7.6/bumps/bspline.py000066400000000000000000000314271275122724100160200ustar00rootroot00000000000000# This program is public domain """ BSpline calculator. Given a set of knots, compute the cubic B-spline interpolation. """ from __future__ import division, print_function __all__ = ['bspline', 'pbs'] import numpy as np from numpy import maximum as max, minimum as min def pbs(x, y, t, clamp=True, parametric=True): """ Evaluate the parametric B-spline px(t),py(t). *x* and *y* are the control points, and *t* are the points in [0,1] at which they are evaluated. The *x* values are sorted so that the spline describes a function. The spline goes through the control points at the ends. If *clamp* is True, the derivative of the spline at both ends is zero. If *clamp* is False, the derivative at the ends is equal to the slope connecting the final pair of control points. If *parametric* is False, then parametric points t' are chosen such that x(t') = *t*. The B-spline knots are chosen to be equally spaced within [0,1]. """ x = list(sorted(x)) knot = np.hstack((0, 0, np.linspace(0, 1, len(y)), 1, 1)) cx = np.hstack((x[0], x[0], x[0], (2 * x[0] + x[1]) / 3, x[1:-1], (2 * x[-1] + x[-2]) / 3, x[-1])) if clamp: cy = np.hstack((y[0], y[0], y[0], y, y[-1])) else: cy = np.hstack((y[0], y[0], y[0], y[0] + (y[1] - y[0]) / 3, y[1:-1], y[-1] + (y[-2] - y[-1]) / 3, y[-1])) if parametric: return _bspline3(knot, cx, t), _bspline3(knot, cy, t) # Find parametric t values corresponding to given z values # First try a few newton steps xt = np.interp(t, x, np.linspace(0, 1, len(x))) with np.errstate(all='ignore'): for _ in range(6): pt, dpt = _bspline3(knot, cx, xt, nderiv=1) xt -= (pt - t) / dpt idx = np.isnan(xt) | (abs(_bspline3(knot, cx, xt) - t) > 1e-9) # Use bisection when newton fails if idx.any(): missing = t[idx] # print missing t_lo, t_hi = 0 * missing, 1 * missing for _ in range(30): # bisection with about 1e-9 tolerance trial = (t_lo + t_hi) / 2 ptrial = _bspline3(knot, cx, trial) tidx = ptrial < missing t_lo[tidx] = trial[tidx] t_hi[~tidx] = trial[~tidx] xt[idx] = (t_lo + t_hi) / 2 # print "err",np.max(abs(_bspline3(knot,cx,t)-xt)) # Return y evaluated at the interpolation points return _bspline3(knot, cx, xt), _bspline3(knot, cy, xt) def bspline(y, xt, clamp=True): """ Evaluate the B-spline with control points *y* at positions *xt* in [0,1]. The spline goes through the control points at the ends. If *clamp* is True, the derivative of the spline at both ends is zero. If *clamp* is False, the derivative at the ends is equal to the slope connecting the final pair of control points. B-spline knots are chosen to be equally spaced within [0,1]. """ knot = np.hstack((0, 0, np.linspace(0, 1, len(y)), 1, 1)) if clamp: cy = np.hstack(([y[0]] * 3, y, y[-1])) else: cy = np.hstack((y[0], y[0], y[0], y[0] + (y[1] - y[0]) / 3, y[1:-1], y[-1] + (y[-2] - y[-1]) / 3, y[-1])) return _bspline3(knot, cy, xt) def _bspline3(knot, control, t, nderiv=0): """ Evaluate the B-spline specified by the given *knot* sequence and *control* values at the parametric points *t*. *nderiv* selects the function or derivative to evaluate. """ knot, control, t = [np.asarray(v) for v in (knot, control, t)] # Deal with values outside the range valid = (t > knot[0]) & (t <= knot[-1]) tv = t[valid] f = np.zeros(t.shape) f[t <= knot[0]] = control[0] f[t >= knot[-1]] = control[-1] # Find B-Spline parameters for the individual segments end = len(knot) - 1 segment = knot.searchsorted(tv) - 1 tm2 = knot[max(segment - 2, 0)] tm1 = knot[max(segment - 1, 0)] tm0 = knot[max(segment - 0, 0)] tp1 = knot[min(segment + 1, end)] tp2 = knot[min(segment + 2, end)] tp3 = knot[min(segment + 3, end)] p4 = control[min(segment + 3, end)] p3 = control[min(segment + 2, end)] p2 = control[min(segment + 1, end)] p1 = control[min(segment + 0, end)] # Compute second and third derivatives. if nderiv > 1: # Normally we require a recursion for Q, R and S to compute # df, d2f and d3f respectively, however Q can be computed directly # from intermediate values of P, S has a recursion of depth 0, # which leaves only the R recursion of depth 1 in the calculation # below. q4 = (p4 - p3) * 3 / (tp3 - tm0) q3 = (p3 - p2) * 3 / (tp2 - tm1) q2 = (p2 - p1) * 3 / (tp1 - tm2) r4 = (q4 - q3) * 2 / (tp2 - tm0) r3 = (q3 - q2) * 2 / (tp1 - tm1) if nderiv > 2: s4 = (r4 - r3) / (tp1 - tm0) d3f = np.zeros(t.shape) d3f[valid] = s4 r4 = ((tv - tm0) * r4 + (tp1 - tv) * r3) / (tp1 - tm0) d2f = np.zeros(t.shape) d2f[valid] = r4 # Compute function value and first derivative p4 = ((tv - tm0) * p4 + (tp3 - tv) * p3) / (tp3 - tm0) p3 = ((tv - tm1) * p3 + (tp2 - tv) * p2) / (tp2 - tm1) p2 = ((tv - tm2) * p2 + (tp1 - tv) * p1) / (tp1 - tm2) p4 = ((tv - tm0) * p4 + (tp2 - tv) * p3) / (tp2 - tm0) p3 = ((tv - tm1) * p3 + (tp1 - tv) * p2) / (tp1 - tm1) if nderiv >= 1: df = np.zeros(t.shape) df[valid] = (p4 - p3) * 3 / (tp1 - tm0) p4 = ((tv - tm0) * p4 + (tp1 - tv) * p3) / (tp1 - tm0) f[valid] = p4 if nderiv == 0: return f elif nderiv == 1: return f, df elif nderiv == 2: return f, df, d2f else: return f, df, d2f, d3f def bspline_control(y, clamp=True): return _find_control(y, clamp=clamp) def pbs_control(x, y, clamp=True): return _find_control(x, clamp=clamp), _find_control(y, clamp=clamp) def _find_control(v, clamp=True): raise NotImplementedError("B-spline interpolation doesn't work yet") from scipy.linalg import solve_banded n = len(v) udiag = np.hstack([0, 0, 0, [1 / 6] * (n - 3), 0.25, 0.3]) ldiag = np.hstack([-0.3, 0.25, [1 / 6] * (n - 3), 0, 0, 0]) mdiag = np.hstack([1, 0.3, 7 / 12, [2 / 3] * (n - 4), 7 / 12, -0.3, 1]) A = np.vstack([ldiag, mdiag, udiag]) if clamp: # First derivative is zero at ends bl, br = 0, 0 else: # First derivative at ends follows line between final control points bl, br = (v[1] - v[0]) * n, (v[-1] - v[-2]) * n b = np.hstack([v[0], bl, v[1:n - 1], br, v[-1]]) x = solve_banded((1, 1), A, b) return x # x[1:-1] # =========================================================================== # test code def speed_check(): """ Print the time to evaluate 400 points on a 7 knot spline. """ import time x = np.linspace(0, 1, 7) x[1], x[-2] = x[2], x[-3] y = [9, 11, 2, 3, 8, 0, 2] t = np.linspace(0, 1, 400) t0 = time.time() for _ in range(1000): bspline(y, t, clamp=True) print("bspline (ms)", (time.time() - t0) / 1000) def _check(expected, got, tol): """ Check that value matches expected within tolerance. If *expected* is never zero, use relative error for tolerance. """ relative = (np.isscalar(expected) and expected != 0) \ or (not np.isscalar(expected) and all(expected != 0)) if relative: norm = np.linalg.norm((expected - got) / expected) else: norm = np.linalg.norm(expected - got) if norm >= tol: msg = [ "expected %s"%str(expected), "got %s"%str(got), "tol %s norm %s"%(tol, norm), ] raise ValueError("\n".join(msg)) def _derivs(x, y): """ Compute numerical derivative for a function evaluated on a fine grid. """ # difference formula return (y[1] - y[0]) / (x[1] - x[0]), (y[-1] - y[-2]) / (x[-1] - x[-2]) # 5-point difference formula #left = (y[0]-8*y[1]+8*y[3]-y[4]) / 12 / (x[1]-x[0]) #right = (y[-5]-8*y[-4]+8*y[-2]-y[-1]) / 12 / (x[-1]-x[-2]) # return left,right def test(): h = 1e-10 t = np.linspace(0, 1, 100) dt = np.array([0, h, 2 * h, 3 * h, 4 * h, 1 - 4 * h, 1 - 3 * h, 1 - 2 * h, 1 - h, 1]) y = [9, 11, 2, 3, 8, 0, 2] n = len(y) xeq = np.linspace(0, 1, n) x = xeq + 0 x[0], x[-1] = (x[0] + x[1]) / 2, (x[-2] + x[-1]) / 2 dx = np.array([x[0], x[0] + h, x[0] + 2*h, x[0] + 3*h, x[0] + 4*h, x[-1] - 4*h, x[-1] - 3*h, x[-1] - 2*h, x[-1] - h, x[-1]]) # ==== Check that bspline matches pbs with equally spaced x yt = bspline(y, t, clamp=True) xtp, ytp = pbs(xeq, y, t, clamp=True, parametric=False) _check(t, xtp, 1e-8) _check(yt, ytp, 1e-8) xtp, ytp = pbs(xeq, y, t, clamp=True, parametric=True) _check(t, xtp, 1e-8) _check(yt, ytp, 1e-8) yt = bspline(y, t, clamp=False) xtp, ytp = pbs(xeq, y, t, clamp=False, parametric=False) _check(t, xtp, 1e-8) _check(yt, ytp, 1e-8) xtp, ytp = pbs(xeq, y, t, clamp=False, parametric=True) _check(t, xtp, 1e-8) _check(yt, ytp, 1e-8) # ==== Check bspline f at end points yt = bspline(y, t, clamp=True) _check(y[0], yt[0], 1e-12) _check(y[-1], yt[-1], 1e-12) yt = bspline(y, t, clamp=False) _check(y[0], yt[0], 1e-12) _check(y[-1], yt[-1], 1e-12) xt, yt = pbs(x, y, t, clamp=True, parametric=False) _check(x[0], xt[0], 1e-8) _check(x[-1], xt[-1], 1e-8) _check(y[0], yt[0], 1e-8) _check(y[-1], yt[-1], 1e-8) xt, yt = pbs(x, y, t, clamp=True, parametric=True) _check(x[0], xt[0], 1e-8) _check(x[-1], xt[-1], 1e-8) _check(y[0], yt[0], 1e-8) _check(y[-1], yt[-1], 1e-8) xt, yt = pbs(x, y, t, clamp=False, parametric=False) _check(x[0], xt[0], 1e-8) _check(x[-1], xt[-1], 1e-8) _check(y[0], yt[0], 1e-8) _check(y[-1], yt[-1], 1e-8) xt, yt = pbs(x, y, t, clamp=False, parametric=True) _check(x[0], xt[0], 1e-8) _check(x[-1], xt[-1], 1e-8) _check(y[0], yt[0], 1e-8) _check(y[-1], yt[-1], 1e-8) # ==== Check f' at end points yt = bspline(y, dt, clamp=True) left, right = _derivs(dt, yt) _check(0, left, 1e-8) _check(0, right, 1e-8) xt, yt = pbs(x, y, dx, clamp=True, parametric=False) left, right = _derivs(xt, yt) _check(0, left, 1e-8) _check(0, right, 1e-8) xt, yt = pbs(x, y, dt, clamp=True, parametric=True) left, right = _derivs(xt, yt) _check(0, left, 1e-8) _check(0, right, 1e-8) yt = bspline(y, dt, clamp=False) left, right = _derivs(dt, yt) _check((y[1] - y[0]) * (n - 1), left, 5e-4) _check((y[-1] - y[-2]) * (n - 1), right, 5e-4) xt, yt = pbs(x, y, dx, clamp=False, parametric=False) left, right = _derivs(xt, yt) _check((y[1] - y[0]) / (x[1] - x[0]), left, 5e-4) _check((y[-1] - y[-2]) / (x[-1] - x[-2]), right, 5e-4) xt, yt = pbs(x, y, dt, clamp=False, parametric=True) left, right = _derivs(xt, yt) _check((y[1] - y[0]) / (x[1] - x[0]), left, 5e-4) _check((y[-1] - y[-2]) / (x[-1] - x[-2]), right, 5e-4) # ==== Check interpolator #yc = bspline_control(y) # print("y",y) # print("p(yc)",bspline(yc,xeq)) def demo(): from pylab import hold, linspace, subplot, plot, legend, show hold(True) #y = [9,6,1,3,8,4,2] #y = [9,11,13,3,-2,0,2] y = [9, 11, 2, 3, 8, 0] #y = [9,9,1,3,8,2,2] x = linspace(0, 1, len(y)) t = linspace(x[0], x[-1], 400) subplot(211) plot(t, bspline(y, t, clamp=False), '-.y', label="unclamped bspline") # bspline # bspline plot(t, bspline(y, t, clamp=True), '-y', label="clamped bspline") plot(sorted(x), y, ':oy', label="control points") legend() #left, right = _derivs(t, bspline(y, t, clamp=False)) #print(left, (y[1] - y[0]) / (x[1] - x[0])) subplot(212) xt, yt = pbs(x, y, t, clamp=False) plot(xt, yt, '-.b', label="unclamped pbs") # pbs xt, yt = pbs(x, y, t, clamp=True) plot(xt, yt, '-b', label="clamped pbs") # pbs #xt,yt = pbs(x,y,t,clamp=True, parametric=True) # plot(xt,yt,'-g') # pbs plot(sorted(x), y, ':ob', label="control points") legend() show() def demo_interp(): # B-Spline control point inverse function is not yet implemented from pylab import hold, linspace, plot, show hold(True) x = linspace(0, 1, 7) y = [9, 11, 2, 3, 8, 0, 2] t = linspace(0, 1, 400) yc = bspline_control(y, clamp=True) xc = linspace(x[0], x[-1], 9) plot(xc, yc, ':oy', x, y, 'xg') #knot = np.hstack((0, np.linspace(0,1,len(y)), 1)) #fy = _bspline3(knot,yc,t) fy = bspline(yc, t, clamp=True) plot(t, fy, '-.y') show() if __name__ == "__main__": # test() demo() # demo_interp() # speed_check() bumps-0.7.6/bumps/cheby.py000066400000000000000000000146631275122724100154610ustar00rootroot00000000000000r""" Freeform modeling with Chebyshev polynomials. `Chebyshev polynomials `_ $T_k$ form a basis set for functions over $[-1,1]$. The truncated interpolating polynomial $P_n$ is a weighted sum of Chebyshev polynomials up to degree $n$: .. math:: f(x) \approx P_n(x) = \sum_{k=0}^n c_i T_k(x) The interpolating polynomial exactly matches $f(x)$ at the chebyshev nodes $z_k$ and is near the optimal polynomial approximation to $f$ of degree $n$ under the maximum norm. For well behaved functions, the coefficients $c_k$ decrease rapidly, and furthermore are independent of the degree $n$ of the polynomial. The models can either be defined directly in terms of the Chebyshev coefficients $c_k$ with *method* = 'direct', or in terms of control points $(z_k, f(z_k))$ at the Chebyshev nodes :func:`cheby_points` with *method* = 'interp'. Bounds on the parameters are easier to control using 'interp', but the function may oscillate wildly outside the bounds. Bounds on the oscillation are easier to control using 'direct', but the shape of the profile is difficult to control. """ # TODO: clipping volume fraction to [0,1] distorts parameter space # Option 0: clip to [0,1] # - Bayesian analysis: parameter values outside the domain will be equally # probable out to infinity # - Newton methods: the fit space is flat outside the domain, which leads # to a degenerate hessian. # - Direct methods: won't fail, but will be subject to random walk # performance outside the domain. # - trivial to implement! # Option 1: compress (-inf,0.001] and [0.999,inf) into (0,0.001], [0.999,1) # - won't address any of the problems of clipping # Option 2: have chisq return inf for points outside the domain # - Bayesian analysis: correctly assigns probability zero # - Newton methods: degenerate Hessian outside domain # - Direct methods: random walk outside domain # - easy to implement # Option 3: clip outside domain but add penalty based on amount of clipping # A profile based on clipping may have lower chisq than any profile that # can be described by a valid model (e.g., by having a sharper transition # than would be allowed by the model), leading to a minimum outside D. # Adding a penalty constant outside D would help, but there is no constant # that works everywhere. We could use a constant greater than the worst # chisq seen so far in D, which can guarantee an arbitrarily low P(x) and # a global minimum within D, but for Newton methods, the boundary may still # have spurious local minima and objective value now depends on history. # Linear compression of profile to fit within the domain would avoid # unreachable profile shapes (this is just a linear transform on chebyshev # coefficients), and the addition of the penalty value would reduce # parameter correlations that result from having transformed parameters # resulting in identical profiles. Returning T = ||A(x)|| from render, # with A being a transform that brings the profile within [0,1], the # objective function can return P'(x) = P(x)/(10*(1+sum(T_i)^4) for all # slabs i, or P(x) if no slabs return a penalty value. So long as T is # monotonic with increasing badness, with value of 0 within D, and so long # as no values of x outside D can generate models that cannot be # expressed for any x within D, then any optimizer should return a valid # result at the global minimum. There may still be local minima outside # the boundary, so information that the the value is outside the domain # still needs to pass through a local optimizer to the fitting program. # This approach could be used to transform a box constrained # problem to an unconstrained problem using clipping+penalty on the # parameter values and removing the need for constrained Newton optimizers. # - Bayesian analysis: parameters outside D have incorrect probability, but # with a sufficiently large penalty, P(x) ~ 0; if the penalty value is # too low, details of the correlations outside D may leak into D. # - Newton methods: Hessian should point back to domain # - Direct methods: random walk should be biased toward the domain # - moderately complicated __all__ = ["profile", "cheby_approx", "cheby_val", "cheby_points", "cheby_coeff"] import numpy as np from numpy import real, exp, pi, cos, arange, asarray from numpy.fft import fft def profile(c, t, method): r""" Evaluate the chebyshev approximation c at points x. If method is 'direct' then $c_i$ are the coefficients for the chebyshev polynomials $T_i$ yielding $P = \sum_i{c_i T_i(x)}$. If method is 'interp' then $c_i$ are the values of the interpolated function $f$ evaluated at the chebyshev points returned by :func:`cheby_points`. """ if method == 'interp': c = cheby_coeff(c) return cheby_val(c, t) def cheby_approx(n, f, range=(0, 1)): """ Return the coefficients for the order n chebyshev approximation to function f evaluated over the range [low,high]. """ fx = f(cheby_points(n, range=range)) return cheby_coeff(fx) def cheby_val(c, x): r""" Evaluate the chebyshev approximation c at points x. The values $c_i$ are the coefficients for the chebyshev polynomials $T_i$ yielding $p(x) = \sum_i{c_i T_i(x)}$. """ c = np.asarray(c) if len(c) == 0: return 0 * x # Crenshaw recursion from numerical recipes sec. 5.8 y = 4 * x - 2 d = dd = 0 for c_j in c[:0:-1]: d, dd = y * d + (c_j - dd), d return y * (0.5 * d) + (0.5 * c[0] - dd) def cheby_points(n, range=(0, 1)): r""" Return the points in at which a function must be evaluated to generate the order $n$ Chebyshev approximation function. Over the range [-1,1], the points are $p_k = \cos(\pi(2 k + 1)/(2n))$. Adjusting the range to $[x_L,x_R]$, the points become $x_k = \frac{1}{2} (p_k - x_L + 1)/(x_R-x_L)$. """ return 0.5 * (cos(pi * (arange(n) + 0.5) / n) - range[0] + 1) / (range[1] - range[0]) def cheby_coeff(fx): """ Compute chebyshev coefficients for a polynomial of order n given the function evaluated at the chebyshev points for order n. This can be used as the basis of a direct interpolation method where the n control points are positioned at cheby_points(n). """ fx = asarray(fx) n = len(fx) w = exp((-0.5j * pi / n) * arange(n)) y = np.hstack((fx[0::2], fx[1::2][::-1])) c = (2. / n) * real(fft(y) * w) return c bumps-0.7.6/bumps/cli.py000066400000000000000000000446051275122724100151350ustar00rootroot00000000000000""" Bumps command line interface. The functions in this module are used by the bumps command to implement the command line interface. Bumps plugin models can use them to create stand alone applications with a similar interface. For example, the Refl1D application uses the following:: from . import fitplugin import bumps.cli bumps.cli.set_mplconfig(appdatadir='Refl1D') bumps.cli.install_plugin(fitplugin) bumps.cli.main() After completing a set of fits on related systems, a post-analysis script can use :func:`load_model` to load the problem definition and :func:`load_best` to load the best value found in the fit. This can be used for example in experiment design, where you look at the expected parameter uncertainty when fitting simulated data from a range of experimental systems. """ from __future__ import with_statement, print_function __all__ = ["main", "install_plugin", "set_mplconfig", "config_matplotlib", "load_model", "preview", "load_best", "save_best", "resynth"] import sys import os import re import warnings import traceback import shutil try: import dill as pickle except ImportError: import pickle import numpy as np # np.seterr(all="raise") from . import fitters from .fitters import FitDriver, StepMonitor, ConsoleMonitor, nllf_scale from .mapper import MPMapper, AMQPMapper, MPIMapper, SerialMapper from .formatnum import format_uncertainty from . import util from . import initpop from . import __version__ from . import plugin from . import options from .util import pushdir def install_plugin(p): """ Replace symbols in :mod:`bumps.plugin` with application specific methods. """ for symbol in plugin.__all__: if hasattr(p, symbol): setattr(plugin, symbol, getattr(p, symbol)) def load_model(path, model_options=None): """ Load a model file. *path* contains the path to the model file. *model_options* are any additional arguments to the model. The sys.argv variable will be set such that *sys.argv[1:] == model_options*. """ from .fitproblem import load_problem # Change to the target path before loading model so that data files # can be given as relative paths in the model file. This should also # allow imports as expected from the model file. directory, filename = os.path.split(path) with pushdir(directory): # Try a specialized model loader problem = plugin.load_model(filename) if problem is None: # print "loading",filename,"from",directory if filename.endswith('pickle'): # First see if it is a pickle problem = pickle.load(open(filename, 'rb')) else: # Then see if it is a python model script problem = load_problem(filename, options=model_options) # Guard against the user changing parameters after defining the problem. problem.model_reset() problem.path = os.path.abspath(path) if not hasattr(problem, 'title'): problem.title = filename problem.name, _ = os.path.splitext(filename) problem.options = model_options return problem def preview(problem, view=None): """ Show the problem plots and parameters. """ import pylab problem.show() problem.plot(view=view) pylab.show() def save_best(fitdriver, problem, best, view=None): """ Save the fit data, including parameter values, uncertainties and plots. *fitdriver* is the fitter that was used to drive the fit. *problem* is a FitProblem instance. *best* is the parameter set to save. """ # Make sure the problem contains the best value # TODO: avoid recalculating if problem is already at best. problem.setp(best) # print "remembering best" pardata = "".join("%s %.15g\n" % (name, value) for name, value in zip(problem.labels(), problem.getp())) open(problem.output_path + ".par", 'wt').write(pardata) fitdriver.save(problem.output_path) with util.redirect_console(problem.output_path + ".err"): fitdriver.show() fitdriver.plot(output_path=problem.output_path, view=view) fitdriver.show() # print "plotting" PARS_PATTERN = re.compile(r"^(?P