pax_global_header 0000666 0000000 0000000 00000000064 11757757505 0014535 g ustar 00root root 0000000 0000000 52 comment=8f6e10595ffe24e02428880adafdc99ac43d006f
openopt-0.38+svn1589/ 0000775 0000000 0000000 00000000000 11757757505 0014304 5 ustar 00root root 0000000 0000000 openopt-0.38+svn1589/COPYING.txt 0000664 0000000 0000000 00000001773 11757757505 0016165 0 ustar 00root root 0000000 0000000 Copyright (c) 2007-2009, Dmitrey Kroshko, www.icyb.kiev.ua optimization department
OpenOpt Kernel license is New BSD license, allows using from both open- and closed-code software
All connected solvers have their own licenses.
-----------------------------------
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
openopt-0.38+svn1589/DOCUMENTATION.html 0000664 0000000 0000000 00000000110 11757757505 0017173 0 ustar 00root root 0000000 0000000
openopt-0.38+svn1589/INSTALL.txt 0000664 0000000 0000000 00000001676 11757757505 0016165 0 ustar 00root root 0000000 0000000 Do you have any problems with installation for Linux, MS Windows, MacOS?
Check webpage
http://openopt.org/Install
Installation (very briefly, link above leads to more precize):
1. Ensure you have Python (v >= 2.5 is recommended), numpy (v >= 1.1.0 is recommended) and setuptools installed.
You can use Linux software channels, like this for Debian/*UBUNTU:
[sudo] aptitude install python-numpy
[sudo] aptitude install python-setuptools
(Linux OSes already have Python installed)
# optional, for graphics output:
[sudo] aptitude install python-matplotlib
# optional packages that may be present in software channels:
# python-scipy, python-cvxopt, some others
All optional packages and/or solvers could be installed after OO installation, when they will be required for the tasks involved.
2.
[sudo] python setup.py install
or
[sudo] python setup.py develop
3. To check installation you could try running "python nlp_1.py" from /examples directory
openopt-0.38+svn1589/README.txt 0000664 0000000 0000000 00000000721 11757757505 0016002 0 ustar 00root root 0000000 0000000 openopt license: BSD
openopt usage:
from openopt import *
#and then see
help(NLP), help(LP), help(QP) # ... etc
See also directory "examples", using on-line documentation is highly recommended:
http://openopt.org/Doc
See also:
OpenOpt homepage:
http://openopt.org
New! Numerical optimization forum:
http://forum.openopt.org/
Found OpenOpt useful?
Please mention it in our guestbook
http://forum.openopt.org/viewforum.php?id=11
Regards,
OpenOpt developers
openopt-0.38+svn1589/openopt/ 0000775 0000000 0000000 00000000000 11757757505 0015770 5 ustar 00root root 0000000 0000000 openopt-0.38+svn1589/openopt/__init__.py 0000664 0000000 0000000 00000002137 11757757505 0020104 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python
#from .ooVersionNumber import __version__
import os, sys
curr_dir = ''.join([elem + os.sep for elem in __file__.split(os.sep)[:-1]])
sys.path += [curr_dir, curr_dir + 'kernel']
from ooVersionNumber import __version__
from oo import *
#from kernel.GUI import manage
#from kernel.oologfcn import OpenOptException
#from kernel.nonOptMisc import oosolver
from GUI import manage
from oologfcn import OpenOptException
from nonOptMisc import oosolver
from mfa import MFA
isE = False
try:
import enthought
isE = True
except ImportError:
pass
try:
import envisage
import mayavi
isE = True
except ImportError:
pass
try:
import xy
isE = False
except ImportError:
pass
if isE:
s = """
Seems like you are using OpenOpt from
commercial Enthought Python Distribution;
consider using free GPL-licensed alternatives
PythonXY (http://www.pythonxy.com) or
Sage (http://sagemath.org) instead.
"""
print(s)
#__all__ = filter(lambda s:not s.startswith('_'),dir())
#from numpy.testing import NumpyTest
#test = NumpyTest().test
openopt-0.38+svn1589/openopt/doc/ 0000775 0000000 0000000 00000000000 11757757505 0016535 5 ustar 00root root 0000000 0000000 openopt-0.38+svn1589/openopt/doc/DOCUMENTATION.html 0000664 0000000 0000000 00000000110 11757757505 0021424 0 ustar 00root root 0000000 0000000
openopt-0.38+svn1589/openopt/doc/badlyScaled.py 0000664 0000000 0000000 00000004516 11757757505 0021324 0 ustar 00root root 0000000 0000000 from numpy import *
from openopt import *
coeff = 1e-7
f = lambda x: (x[0]-20)**2+(coeff * x[1] - 80)**2 # objFun
c = lambda x: (x[0]-14)**2-1 # non-lin ineq constraint(s) c(x) <= 0
# for the problem involved: f_opt =25, x_opt = [15.0, 8.0e9]
x0 = [-4,4]
# even modification of stop criteria can't help to achieve the desired solution:
someModifiedStopCriteria = {'gtol': 1e-15, 'ftol': 1e-13, 'xtol': 1e-13, 'maxIter': 1e3}
# using default diffInt = 1e-7 is inappropriate:
p = NLP(f, x0, c=c, iprint = 100, **someModifiedStopCriteria)
r = p.solve('ralg')
print r.ff, r.xf # will print something like "6424.9999886000014 [ 15.0000005 4. ]"
"""
for to improve the solution we will use
changing either p.diffInt from default 1e-7 to [1e-7, 1]
or p.scale from default None to [1, 1e-7]
latter (using p.scale) is more recommended
because it affects xtol for those solvers
who use OO stop criteria
(ralg, lincher, nsmm, nssolve and mb some others)
xtol will be compared to scaled x shift:
is || (x[k] - x[k-1]) * scale || < xtol
You can define scale and diffInt as
numpy arrays, matrices, Python lists, tuples
"""
p = NLP(f, x0, c=c, scale = [1, coeff], iprint = 100, **someModifiedStopCriteria)
r = p.solve('ralg')
print r.ff, r.xf # "24.999996490694787 [ 1.50000004e+01 8.00004473e+09]" - much better
"""
Full Output:
-----------------------------------------------------
solver: ralg problem: unnamed goal: minimum
iter objFunVal log10(maxResidual)
0 6.976e+03 2.51
51 6.425e+03 -6.10
istop: 4 (|| F[k] - F[k-1] || < ftol)
Solver: Time Elapsed = 0.16 CPU Time Elapsed = 0.16
objFunValue: 6424.9999 (feasible, max constraint = 8e-07)
6424.999932 [ 15.0000004 4. ]
-----------------------------------------------------
solver: ralg problem: unnamed goal: minimum
iter objFunVal log10(maxResidual)
0 6.976e+03 2.51
100 4.419e+01 -5.99
200 2.504e+01 -6.10
300 2.503e+01 -6.10
400 2.503e+01 -6.10
500 2.503e+01 -6.10
506 2.500e+01 -6.91
istop: 3 (|| X[k] - X[k-1] || < xtol)
Solver: Time Elapsed = 1.59 CPU Time Elapsed = 1.59
objFunValue: 25.000189 (feasible, max constraint = 1.23911e-07)
25.0001894297 [ 1.50000001e+01 8.00137858e+08]
"""
openopt-0.38+svn1589/openopt/doc/checkDerivatives.py 0000664 0000000 0000000 00000005207 11757757505 0022376 0 ustar 00root root 0000000 0000000 from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))
def df(x):
r = 2*(x-M)
r[0] += 15 #incorrect derivative
r[8] += 80 #incorrect derivative
return r
p.df = df
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
def dc(x):
r = zeros((2, p.n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2] + 15 #incorrect derivative
return r
p.dc = dc
p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4)
def dh(x):
r = zeros((2, p.n))
r[0,-1] = 1e1*4*(x[-1]-1)**3
r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative
return r
p.dh = dh
p.checkdf()
p.checkdc()
p.checkdh()
"""
you can use p.checkdF(x) for other point than x0 (F is f, c or h)
p.checkdc(myX)
or
p.checkdc(x=myX)
values with difference greater than
maxViolation (default 1e-5)
will be shown
p.checkdh(maxViolation=1e-4)
p.checkdh(myX, maxViolation=1e-4)
p.checkdh(x=myX, maxViolation=1e-4)
#################################################################################
Typical output (unfortunately, in terminal or other IDEs the blank space used in strings separation can have other lengths):
Note that RD (relative difference) is defined as
int(ceil(log10(abs(Diff) / maxViolation + 1e-150)))
where
Diff = 1 - (info_user+1e-8)/(info_numerical + 1e-8)
OpenOpt checks user-supplied gradient df (shape: (30,) )
according to:
prob.diffInt = [ 1.00000000e-07]
|1 - info_user/info_numerical| <= prob.maxViolation = 0.01
df num user-supplied numerical RD
0 +7.000e+00 -8.000e+00 3
8 -2.291e+00 -1.029e+01 2
max(abs(df_user - df_numerical)) = 14.9999995251
(is registered in df number 0)
========================
OpenOpt checks user-supplied gradient dc (shape: (2, 30) )
according to:
prob.diffInt = [ 1.00000000e-07]
|1 - info_user/info_numerical| <= prob.maxViolation = 0.01
dc num i,j:dc[i]/dx[j] user-supplied numerical RD
32 1 / 2 +1.417e+01 -8.323e-01 4
max(abs(dc_user - dc_numerical)) = 14.9999999032
(is registered in dc number 32)
========================
OpenOpt checks user-supplied gradient dh (shape: (2, 30) )
according to:
prob.diffInt = [ 1.00000000e-07]
|1 - info_user/info_numerical| <= prob.maxViolation = 0.01
dh num i,j:dh[i]/dx[j] user-supplied numerical RD
58 1 / 28 -4.474e+01 -5.974e+01 2
max(abs(dh_user - dh_numerical)) = 14.9999962441
(is registered in dh number 58)
========================
"""
openopt-0.38+svn1589/openopt/doc/lpToMPS.py 0000664 0000000 0000000 00000002157 11757757505 0020412 0 ustar 00root root 0000000 0000000 # Example of export OpenOpt LP to MPS file
# you should have lpsolve and its Python binding properly installed
# (you may take a look at the instructions from openopt.org/LP)
# You can solve problems defined in MPS files
# with a variety of solvers at NEOS server for free
# http://neos.mcs.anl.gov/
# BTW they have Python API along with web API and other
from numpy import *
from openopt import LP
f = array([15,8,80])
A = mat('1 2 3; 8 15 80; 8 80 15; -100 -10 -1') # numpy.ndarray is also allowed
b = [15, 80, 150, -800] # numpy.ndarray, matrix etc are also allowed
Aeq = mat('80 8 15; 1 10 100') # numpy.ndarray is also allowed
beq = (750, 80)
lb = [4, -80, -inf]
ub = [inf, -8, inf]
p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub, name = 'lp_1')
# or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub)
# if file name not ends with '.MPS' or '.mps'
# then '.mps' will be appended
success = p.exportToMPS('asdf')
# success is False if a error occurred (read-only file system, no write access, etc)
# elseware success is True
# objFunValue should be 204.48841578
# x_opt should be [ 9.89355041 -8. 1.5010645 ]
openopt-0.38+svn1589/openopt/doc/milpToMPS.py 0000664 0000000 0000000 00000002355 11757757505 0020740 0 ustar 00root root 0000000 0000000 # Example of export OpenOpt MILP to MPS file
# you should have lpsolve and its Python binding properly installed
# (you may take a look at the instructions from openopt.org/LP)
# You can solve problems defined in MPS files
# with a variety of solvers at NEOS server for free
# http://neos.mcs.anl.gov/
# BTW they have Python API along with web API and other
from numpy import *
from openopt import MILP
f = [1, 2, 3, 4, 5, 4, 2, 1]
# indexing starts from ZERO!
# while in native lpsolve-python wrapper from 1
# so if you used [5,8] for native lp_solve python binding
# you should use [4,7] instead
intVars = [4, 7]
lb = -1.5 * ones(8)
ub = 15 * ones(8)
A = zeros((5, 8))
b = zeros(5)
for i in xrange(5):
for j in xrange(8):
A[i,j] = -8+sin(8*i) + cos(15*j)
b[i] = -150 + 80*sin(80*i)
p = MILP(f=f, lb=lb, ub=ub, A=A, b=b, intVars=intVars)
# if file name not ends with '.MPS' or '.mps'
# then '.mps' will be appended
success = p.exportToMPS('/home/dmitrey/PyTest/milp_1')
# or write into current dir:
# success = p.exportToMPS('milp')
# success is False if a error occurred (read-only file system, no write access, etc)
# elseware success is True
# f_opt is 25.801450769161505
# x_opt is [ 15. 10.15072538 -1.5 -1.5 -1. -1.5 -1.5 15.]
openopt-0.38+svn1589/openopt/doc/oosolver.py 0000664 0000000 0000000 00000001407 11757757505 0020761 0 ustar 00root root 0000000 0000000 """
The example illustrates oosolver usage
You should pay special attention for "isInstalled" field
oosolver work is untested for converters
"""
from openopt import oosolver, NLP
ipopt = oosolver('ipopt', color='r') # oosolver can hanlde prob parameters
ralg = oosolver('ralg', color='k', alp = 4.0) # as well as solver parameters
asdf = oosolver('asdf')
solvers = [ralg, asdf, ipopt]
# or just
# solvers = [oosolver('ipopt', color='r'), oosolver('asdf'), oosolver('ralg', color='k', alp = 4.0)]
for solver in solvers:
if not solver.isInstalled:
print 'solver ' + solver.__name__ + ' is not installed'
continue
p = NLP(x0 = 15, f = lambda x: x**4, df = lambda x: 4 * x**3, iprint = 0)
r = p.solve(solver, plot=1, show = solver == solvers[-1])
openopt-0.38+svn1589/openopt/doc/probAssign.py 0000664 0000000 0000000 00000003011 11757757505 0021211 0 ustar 00root root 0000000 0000000 # Problem assignment in OpenOpt is performed in the following way:
from openopt import NLP
# or other constructor names: LP, MILP, QP etc,
# for full list see http://openopt.org/Problems
# p = NLP(*args, **kwargs)
"""
you should read help(NLP) for more details,
also reading /examples/nlp_1.py and other files from the directory is highly recommended
Each class has some expected arguments
e.g. for NLP it's f and x0 - objective function and start point
thus using NLP(myFunc, myStartPoint) will assign myFunc to f and myStartPoint to x0 prob fields
alternatively, you could use it as kwargs, possibly along with some other kwargs:
"""
p = NLP(x0=15, f = lambda x: x**2-0.4, df = lambda x: 2*x, iprint = 0, plot = 1)
# after the problem is assigned, you could turn the parameters,
# along with some other that have been set as defaults:
p.x0 = 0.15
p.plot = 0
def f(x):
return x if x>0 else x**2
p.f = f
# At last, you can modify any prob parameters in minimize/maximize/solve/manage functions:
r = p.minimize('ralg', x0 = -1.5, iprint = -1, plot = 1, color = 'r')
# or
#r = p.manage('ralg', start = False, iprint = 0, x0 = -1.5)
"""
Note that *any* kwarg passed to constructor will be assigned
e.g.
p = NLP(f, x0, myName='JohnSmith')
is equivalent to
p.myName='JohnSmith'
It can be very convenient for user-supplied callback functions
(see /examples/userCallback.py)
(instead of using "global" as you have to do in MATLAB)
See also http://openopt.org/OOFrameworkDoc#Result_structure for result structure (r) fields
"""
openopt-0.38+svn1589/openopt/doc/restricted_dom.py 0000664 0000000 0000000 00000002100 11757757505 0022107 0 ustar 00root root 0000000 0000000
"""
Some non-linear functions have much more restricted dom than R^nVars.
For example F(x) = log(x); dom F = R+ = {x: x>0}
For optimization solvers it is wont to expect user-povided F(x) = nan if x is out of dom.
I can't inform how successfully OO-connected solvers
will handle a prob instance with restricted dom
because it seems to be too prob-specific
Still I can inform that ralg handles the problems rather well
provided in every point x from R^nVars at least one ineq constraint is active
(i.e. value constr[i](x) belongs to R+)
Note also that some solvers require x0 inside dom objFunc.
For ralg it doesn't matter.
"""
from numpy import *
from openopt import NLP
n = 100
an = arange(n) # array [0, 1, 2, ..., n-1]
x0 = n+15*(1+cos(an))
f = lambda x: (x**2).sum() + sqrt(x**3).sum()
df = lambda x: 2*x + 1.5*x**0.5
lb = zeros(n)
solvers = ['ralg']
#solvers = ['ipopt']
for solver in solvers:
p = NLP(f, x0, df=df, lb=lb, xtol = 1e-6, iprint = 50, maxIter = 10000, maxFunEvals = 1e8)
#p.checkdf()
r = p.solve(solver)
# expected r.xf = small values near zero
openopt-0.38+svn1589/openopt/doc/solverParams.py 0000664 0000000 0000000 00000001637 11757757505 0021574 0 ustar 00root root 0000000 0000000 """
Modifying of some solver default parameters is performed via
either kwargs for p.solve() (they can be solver or prob attributes)
or using oosolver (see examples/oosolver.py for more details).
"""
from numpy import *
from openopt import *
f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4
lb, ub = -ones(4), ones(4)
# example 1
p = GLP(f, lb=lb, ub=ub, maxIter = 1e3, maxCPUTime = 3, maxFunEvals=1e5, fEnough = 80)
# solve() kwargs can include some prob settings (like maxTime) as well
r = p.solve('galileo', crossoverRate = 0.80, maxTime = 3, population = 15, mutationRate = 0.15)
# example 2, via oosolver
solvers = [oosolver('ralg', h0 = 0.80, alp = 2.15, show = False), oosolver('ralg', h0 = 0.15, alp = 2.80, color = 'k')]
for i, solver in enumerate(solvers):
p = NSP(f, [0]*4, lb=lb, ub=ub, legend='ralg'+str(i+1))
r = p.solve(solver, plot=True)
openopt-0.38+svn1589/openopt/doc/textOutput.py 0000664 0000000 0000000 00000002031 11757757505 0021310 0 ustar 00root root 0000000 0000000 """
You can manage text output in OpenOpt via the following prob parameters:
Prob field name default value
iprint 10
iterObjFunTextFormat '%0.3e'
finalObjFunTextFormat '%0.8g'
iprint: do text output each iprint-th iteration
You can use iprint = 0 for final output only or iprint < 0 to omit whole output
In future warnings are intended to be shown if iprint >= -1.
However, some solvers like ALGENCAN have their own text output system, that's hard to suppress, it requires using different approach like, for example, http://permalink.gmane.org/gmane.comp.python.scientific.user/15465
iterObjFunTextFormat: how iter output objFun values are represented
for example, '%0.3e' yields lines like
iter objFunVal
0 1.947e+03
10 1.320e+03
...
finalObjFunTextFormat: how final output objFun value is represented
for example finalObjFunTextFormat='%0.1f' yields
...
objFunValue: 7.9
See Python language documentation for text format specification.
"""
openopt-0.38+svn1589/openopt/doc/userArgs.py 0000664 0000000 0000000 00000002465 11757757505 0020711 0 ustar 00root root 0000000 0000000 """
Example of using additional parameters for user f, c, h functions
Note! For oofun handling user parameters is performed
in the same way:
my_oofun.args = (...)
they will be passed to derivative function as well (if you have supplied it)
"""
from openopt import NLP
from numpy import asfarray
f = lambda x, a: (x**2).sum() + a * x[0]**4
x0 = [8, 15, 80]
p = NLP(f, x0)
#using c(x)<=0 constraints
p.c = lambda x, b, c: (x[0]-4)**2 - 1 + b*x[1]**4 + c*x[2]**4
#using h(x)=0 constraints
p.h = lambda x, d: (x[2]-4)**2 + d*x[2]**4 - 15
p.args.f = 4 # i.e. here we use a=4
# so it's the same to "a = 4; p.args.f = a" or just "p.args.f = a = 4"
p.args.c = (1,2)
p.args.h = 15
# Note 1: using tuple p.args.h = (15,) is valid as well
# Note 2: if all your funcs use same args, you can just use
# p.args = (your args)
# Note 3: you could use f = lambda x, a: (...); c = lambda x, a, b: (...); h = lambda x, a: (...)
# Note 4: if you use df or d2f, they should handle same additional arguments;
# same to c - dc - d2c, h - dh - d2h
# Note 5: instead of myfun = lambda x, a, b: ...
# you can use def myfun(x, a, b): ...
r = p.solve('ralg')
"""
If you will encounter any problems with additional args implementation,
you can use the simple python trick
p.f = lambda x: other_f(x, )
same to c, h, df, etc
"""
openopt-0.38+svn1589/openopt/doc/userCallback.py 0000664 0000000 0000000 00000010645 11757757505 0021510 0 ustar 00root root 0000000 0000000 """
usage:
p = someOOclass(..., callback=MyIterFcn, ...)
or
p = ...
p.callback = MyIterFcn
or p.callback = (MyIterFcn1, MyIterFcn2, MyIterFcn3, ..., MyIterFcnN)
or p.callback = [MyIterFcn1, MyIterFcn2, MyIterFcn3, ..., MyIterFcnN]
each user-defined function MyIterFunc should return one of the following:
1. a flag value - 0, 1, True, False
flag = True or 1 means user want to stop calculations
(r.istop=80, r.msg = 'user-defined' )
2. someRealValue like 15 or 80.15 or 1.5e4 (r.istop=someRealValue, r.msg = 'user-defined')
3. Python list (or tuple) - [istop, msg] (r.istop=istop, r.msg=msg)
works for ralg and lincher, but may doesn't work for some other solvers
(like scipy_cobyla, that has neither native callback nor call gradient)
"""
def MyIterFcn(p):
# observing non-feasible ralg iter points
if p.rk > p.contol: # p.rk is current iter max residual
print '--= non-feasible ralg iter =--'
print 'itn:', p.iter
#however, I inted to change p.iter to p.iter in OpenOpt code soon
print 'curr f:', p.fk
# print 'curr x[:8]:', p.xk[:8]
print 'max constraint value', p.rk
"""
BTW you can store data in any unique field of p
for example
if some_cond: p.JohnSmith = 15
else: p.JohnSmith = 0
However, special field "user" is intended for the purpose:
p.user.mydata1 = (something)
# or, for another example:
if p.iter == 0: p.user.mylist = []
p.user.mylist.append(something)
"""
if p.fk < 1.5 and p.rk < p.contol:
#NB! you could use p.fEnough = 15, p.contol=1e-5 in prob assignment instead
return (15, 'value obtained is enough' )
# or
# return 15 (hence r.istop=15, r.msg='user-defined')
# or return True (hence r.istop=80, r.msg='user-defined')
# or return 1 (hence r.istop = 80, r.msg='user-defined')
else:
return False
# or
# return 0
from openopt import NSP
from numpy import cos, asfarray, arange, sign
N = 75
f = lambda x: sum(1.2 ** arange(len(x)) * abs(x))
df = lambda x: 1.2 ** arange(len(x)) * sign(x)
x0 = cos(1+asfarray(range(N)))
#non-linear constraint c(x) <= 0:
c = lambda x: abs(x[4]-0.8) + abs(x[5]-1.5) - 0.015
p = NSP(f, x0, df=df, c=c, callback=MyIterFcn, contol = 1e-5, maxIter = 1e4, iprint = 100, xtol = 1e-8, ftol = 1e-8)
#optional:
#p.plot = 1
r = p.solve('ralg')
print r.xf[:8]
"""
-----------------------------------------------------
solver: ralg problem: unnamed goal: minimum
iter objFunVal log10(maxResidual)
0 2.825e+06 0.02
--= non-feasible ralg iter =--
itn: 0
curr f: [ 2824966.83813157]
max constraint value 1.04116752789
--= non-feasible ralg iter =--
itn: 1
curr f: [ 2824973.2896607]
max constraint value 1.75725959686
--= non-feasible ralg iter =--
itn: 2
curr f: [ 2824966.83813157]
max constraint value 1.04116752789
--= non-feasible ralg iter =--
itn: 3
curr f: [ 2824970.22518437]
max constraint value 0.413756712605
--= non-feasible ralg iter =--
itn: 4
curr f: [ 2824969.02632034]
max constraint value 0.0818395397163
--= non-feasible ralg iter =--
itn: 5
curr f: [ 2824969.37414607]
max constraint value 0.0406513995891
--= non-feasible ralg iter =--
itn: 6
curr f: [ 2824969.20023321]
max constraint value 0.00849187556755
--= non-feasible ralg iter =--
itn: 7
curr f: [ 2824969.20119103]
max constraint value 0.00560799704173
--= non-feasible ralg iter =--
itn: 8
curr f: [ 2824969.2065267]
max constraint value 0.00416641026253
--= non-feasible ralg iter =--
itn: 9
curr f: [ 2824969.22185181]
max constraint value 0.0421905566026
--= non-feasible ralg iter =--
itn: 10
curr f: [ 2824969.2065267]
max constraint value 0.00416641026253
--= non-feasible ralg iter =--
itn: 11
curr f: [ 2824969.20952515]
max constraint value 0.00327175155207
100 2.665e+04 -100.00
200 4.845e+03 -100.00
300 1.947e+02 -100.00
400 9.298e+01 -100.00
500 5.160e+01 -100.00
600 2.600e+01 -100.00
700 1.070e+01 -100.00
800 6.994e+00 -100.00
900 5.375e+00 -100.00
1000 5.375e+00 -100.00
1094 5.375e+00 -100.00
istop: 4 (|| F[k] - F[k-1] || < ftol)
Solver: Time Elapsed = 4.62 CPU Time Elapsed = 4.48
objFunValue: 5.3748608 (feasible, max constraint = 0)
[ -1.06086135e-07 5.65437885e-08 -1.29682567e-07 6.12571176e-09
7.95256506e-01 1.49731951e+00 -1.42518171e-09 4.15961658e-08]
"""
openopt-0.38+svn1589/openopt/examples/ 0000775 0000000 0000000 00000000000 11757757505 0017606 5 ustar 00root root 0000000 0000000 openopt-0.38+svn1589/openopt/examples/GUI_1.py 0000664 0000000 0000000 00000002043 11757757505 0021023 0 ustar 00root root 0000000 0000000 """
OpenOpt GUI:
function manage() usage example
"""
from openopt import NLP, manage
from numpy import cos, arange, ones, asarray, abs, zeros
N = 50
M = 5
p = NLP(lambda x: ((x-M)**2).sum(), cos(arange(N)))
p.lb, p.ub = -6*ones(N), 6*ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
p.h = (lambda x: 1e1*(x[-1]-1)**4, lambda x: (x[-2]-1.5)**4)
"""
minTime is used here
for to provide enough time for user
to play with GUI
"""
minTime = 1.5 # sec
p.name = 'GUI_example'
p.minTime = minTime
"""
hence maxIter, maxFunEvals etc
will not trigger till minTime
only same iter point x_k-1=x_k
or some coords = nan
can stop calculations
other antistop criteria: minFunEvals, minIter, minCPUTime
however, some solvers cannot handle them
"""
# start=True means don't wait for user to press "Run"
r = manage(p,'ralg', plot=1, start=True)
"""
or calling manage() as filed of p:
r = p.manage('algencan', plot=1)
"""
if r is not None:
# r is None if user has pressed "Exit" button
print 'objfunc val:', r.ff
openopt-0.38+svn1589/openopt/examples/__init__.py 0000664 0000000 0000000 00000000001 11757757505 0021706 0 ustar 00root root 0000000 0000000
openopt-0.38+svn1589/openopt/examples/dfp_1.py 0000664 0000000 0000000 00000002466 11757757505 0021161 0 ustar 00root root 0000000 0000000 """
In the DFP example we will search for z=(a, b, c, d)
that minimizes Sum_i || F(z, X_i) - Y_i ||^2
for the function
F(x0, x1) = a^3 + b * x0 + c * x1 + d * (x0^2+x1^2)
Suppose we have the following measurements
X_0 = [0, 1]; Y_0 = 15
X_1 = [1, 0]; Y_1 = 8
X_2 = [1, 1]; Y_2 = 80
X_3 = [3, 4]; Y_3 = 100
X_4 = [1, 15]; Y_4 = 150
subjected to a>=4, c<=30
(we could handle other constraints as well: Ax <= b, Aeq x = beq, c(x) <= 0, h(x) = 0)
"""
from openopt import DFP
from numpy import inf
f = lambda z, X: z[0]**3 + z[1]*X[0] + z[2]*X[1] + z[3]*(X[0]+X[1])**2
initEstimation = [0] * 4 # start point for solver: [0, 0, 0, 0]
X = ([0, 1], [1, 0], [1, 1], [3, 4], [1, 15]) # list, tuple, numpy array or array-like are OK as well
Y = [15, 8, 80, 100, 150]
lb = [4, -inf, -inf, -inf]
ub = [inf, inf, 30, inf]
p = DFP(f, initEstimation, X, Y, lb=lb, ub=ub)
# optional: derivative
p.df = lambda z, X: [3*z[0]**2, X[0], X[1], (X[0]+X[1])**2]
r = p.solve('nlp:ralg', plot=0, iprint = 10)
print('solution: '+str(r.xf)+'\n||residuals||^2 = '+str(r.ff)+'\nresiduals: '+str([f(p.xf, X[i])-Y[i] for i in xrange(len(Y))]))
#solution: [ 3.99999936 5.99708861 -12.25696614 1.04221073]
#||residuals||^2 = 5992.63887806
#residuals: [37.785213926923028, 63.039268675751572, -18.091065285780857, -15.968303801844399, 2.9485118103557397]
openopt-0.38+svn1589/openopt/examples/dfp_2.py 0000664 0000000 0000000 00000003302 11757757505 0021150 0 ustar 00root root 0000000 0000000 """
In the DFP example we will search for z=(a, b, c, d)
that minimizes Sum_i || F(z, X_i) - Y_i ||^2
for the function F: R^2 -> R^2
F(x0, x1) = [
a^3 + b * x0 + c * x1 + d * (x0^2+x1^2),
2*a + 3*b * x0 + 4*c * x1 + 5*d * (x0^2+x1^2)
]
Suppose we have the following measurements
X_0 = [0, 1]; Y_0 = [15, 1]
X_1 = [1, 0]; Y_1 = [8, 16]
X_2 = [1, 1]; Y_2 = [80, 800]
X_3 = [3, 4]; Y_3 = [100, 120]
X_4 = [1, 15]; Y_4 = [150, 1500]
subjected to a>=4, c<=30
(we could handle other constraints as well: Ax <= b, Aeq x = beq, c(x) <= 0, h(x) = 0)
"""
from openopt import DFP
from numpy import *
f = lambda z, X: (z[0]**3 + z[1]*X[0] + z[2]*X[1] + z[3]*(X[0]+X[1])**2, 2*z[0] + 3*z[1]*X[0] + 4*z[2]*X[1] + 5*z[3]*(X[0]+X[1])**2)
initEstimation = [0] * 4 # start point for solver: [0, 0, 0, 0]
X = ([0, 1], [1, 0], [1, 1], [3, 4], [1, 15]) # list, tuple, numpy array or array-like are OK as well
Y = [[15, 1], [8, 16], [80, 800], [100, 120], [150, 1500]]
lb = [4, -inf, -inf, -inf]
ub = [inf, inf, 30, inf]
p = DFP(f, initEstimation, X, Y, lb=lb, ub=ub)
# optional: derivatives
#p.df = lambda z, X: ([3*z[0]**2, X[0], X[1], (X[0]+X[1])**2], [2, 3*X[0], 4*X[1], 5*(X[0]+X[1])**2])
r = p.solve('nlp:ralg', plot=1, iprint = 10)
print('solution: '+str(r.xf)+'\n||residuals||^2 = '+str(r.ff)+'\nresiduals: ')
rr = [array(f(p.xf, X[i]))-array(Y[i]) for i in xrange(len(Y))]
print rr
#solution: [ 3.99999936 -1.9497013 18.25467922 0.24926213]
#||residuals||^2 = 653639.695247
#residuals:
#[array([ 67.50391074, 81.26502627]), array([ 54.29953022, -12.60279452]), array([ 1.30199584, -719.84514565]), array([ 43.38342685, 223.59677693]), array([ 249.68156224, -83.51282276])]
openopt-0.38+svn1589/openopt/examples/eig_1.py 0000664 0000000 0000000 00000002021 11757757505 0021137 0 ustar 00root root 0000000 0000000 from openopt import EIG
# create a 5 x 5 matrix
import numpy.random as nr
nr.seed(0)
N = 5
A = nr.rand(N, N)
#define prob
p = EIG(A, goal = {'lm':3}) # search for 3 eigenvalues of largest magnitude
# or goal={'largest magnitude':3}, with or without space inside, case-insensitive
# for whole list of available goals see http://openopt.org/EIG
#solve
r = p.solve('arpack') # arpack is name of the involved solver
print(r.eigenvalues) # [ 0.14607289-0.19602952j -0.65372843+0.j 2.89776724+0.j ]
# for i-th eigenvalue r.eigenvectors[:,i] is corresponding vector,
# as well as it is done for numpy/scipy functions
print(r.eigenvectors)
'''
[[-0.10391145-0.56334829j 0.19592536+0.j 0.43733688+0.j ]
[-0.20999235+0.1812288j -0.03219327+0.j 0.49662623+0.j ]
[-0.21334642+0.21648181j -0.55544796+0.j 0.42977207+0.j ]
[ 0.34828527+0.36295959j 0.62338178+0.j 0.38727512+0.j ]
[ 0.04820760-0.49714496j -0.51327338+0.j 0.47687818+0.j ]]
'''
openopt-0.38+svn1589/openopt/examples/eig_2.py 0000664 0000000 0000000 00000002242 11757757505 0021145 0 ustar 00root root 0000000 0000000 # An example of OpenOpt EIG, see http://openopt.org/EIG for more examples and details
from openopt import EIG
# create a 5 x 5 matrix
import numpy.random as nr
nr.seed(0)
N = 5
A = nr.rand(N, N)
#define prob
p = EIG(A)
#solve
r = p.solve('numpy_eig') # solver numpy.linalg.eig will be used
print(r.eigenvalues) # [ 2.89776724+0.j -0.65372843+0.j 0.14607289+0.19602952j 0.14607289-0.19602952j -0.08530815+0.j]
# for i-th eigenvalue r.eigenvectors[:,i] is corresponding vector,
# as well as it is done for numpy/scipy functions
print(r.eigenvectors)
'''
[[ 0.43733688+0.j -0.19592536+0.j 0.57285154+0.j
0.57285154+0.j 0.63764724+0.j ]
[ 0.49662623+0.j 0.03219327+0.j -0.14013112+0.23938241j
-0.14013112-0.23938241j -0.53642409+0.j ]
[ 0.42977207+0.j 0.55544796+0.j -0.17419089+0.24907549j
-0.17419089-0.24907549j 0.29171743+0.j ]
[ 0.38727512+0.j -0.62338178+0.j -0.42011495-0.27666898j
-0.42011495+0.27666898j -0.45403266+0.j ]
[ 0.47687818+0.j 0.51327338+0.j 0.48015310-0.13758665j
0.48015310+0.13758665j 0.12004364+0.j ]]
'''
openopt-0.38+svn1589/openopt/examples/glp_1.py 0000664 0000000 0000000 00000000621 11757757505 0021161 0 ustar 00root root 0000000 0000000 from openopt import GLP
from numpy import *
f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4
p = GLP(f, lb = -ones(4), ub = ones(4), maxIter = 1e3, maxFunEvals = 1e5, maxTime = 3, maxCPUTime = 3)
#optional: graphic output
#p.plot = 1 or p.solve(..., plot=1) or p = GLP(..., plot=1)
r = p.solve('de', plot=1)
x_opt, f_opt = r.xf, r.ff
openopt-0.38+svn1589/openopt/examples/glp_2.py 0000664 0000000 0000000 00000000614 11757757505 0021164 0 ustar 00root root 0000000 0000000 from openopt import GLP
from numpy import *
f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4
p = GLP(f, lb = -ones(4), ub = ones(4), maxIter = 1e3, maxFunEvals = 1e5, maxTime = 3, maxCPUTime = 3)
#optional: graphic output
#p.plot = 1
r = p.solve('pswarm', x0=[0, 0, 0, 0], plot=0, debug=1, maxIter=200)
x_opt, f_opt = r.xf, r.ff
openopt-0.38+svn1589/openopt/examples/glp_3.py 0000664 0000000 0000000 00000000505 11757757505 0021164 0 ustar 00root root 0000000 0000000 from openopt import GLP
from numpy import *
N = 100
aN = arange(N)
f = lambda x: ((x-aN)**2).sum()
p = GLP(f, lb = -ones(N), ub = N*ones(N), maxIter = 1e3, maxFunEvals = 1e5, maxTime = 10, maxCPUTime = 300)
#optional: graphic output
#p.plot = 1
r = p.solve('de', plot=1, debug=1, iprint=0)
x_opt, f_opt = r.xf, r.ff
openopt-0.38+svn1589/openopt/examples/glp_Ab_c.py 0000664 0000000 0000000 00000001571 11757757505 0021652 0 ustar 00root root 0000000 0000000 from openopt import GLP
from numpy import *
# objective function
# (x0 - 1.5)^2 + sin(0.8 * x1^2 + 15)^4 + cos(0.8 * x2^2 + 15)^4 + (x3 - 7.5)^4 -> min
f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4
# box-bound constraints lb <= x <= ub
lb, ub = -ones(4), ones(4)
# linear inequality constraints
# x0 + x3 <= 0.15
# x1 + x3 <= 1.5
# as Ax <= b
A = mat('1 0 0 1; 0 1 0 1') # tuple, list, numpy array etc are OK as well
b = [0.15, 1.5] # tuple, list, numpy array etc are OK as well
# non-linear constraints
# x0^2 + x2^2 <= 0.15
# 1.5 * x0^2 + x1^2 <= 1.5
c = lambda x: (x[0] ** 2 + x[2] ** 2 - 0.15, 1.5 * x[0] ** 2 + x[1] ** 2 - 1.5)
p = GLP(f, lb=lb, ub=ub, A=A, b=b, c=c, maxIter = 250, maxFunEvals = 1e5, maxTime = 30, maxCPUTime = 30)
r = p.solve('de', mutationRate = 0.15, plot=1)
x_opt, f_opt = r.xf, r.ff
openopt-0.38+svn1589/openopt/examples/lcp_1.py 0000664 0000000 0000000 00000003050 11757757505 0021154 0 ustar 00root root 0000000 0000000 """
Example:
Concider the LCP problem w = Mz + q
M = array([
[0.42956806, -0.40076658, -0.02880148, -0.42956806, 0.40076658, 0.02880148],
[-0.40076658, 0.47288367, -0.07211709, 0.40076658, -0.47288367, 0.07211709],
[-0.02880148, -0.07211709, 0.10091857, 0.02880148, 0.07211709, -0.10091857],
[-0.42956806, 0.40076658, 0.02880148, 0.42956806, -0.40076658, -0.02880148],
[ 0.40076658, -0.47288367, 0.07211709, -0.40076658, 0.47288367, -0.07211709],
[ 0.02880148, 0.07211709, -0.10091857, -0.02880148, -0.07211709, 0.10091857]])
q = array([1.09389333, -0.53851907, -0.05537426, -0.79389333, 0.83851907, 0.35537426])
"""
from numpy import *
from openopt import LCP
M = array([
[0.42956806, -0.40076658, -0.02880148, -0.42956806, 0.40076658, 0.02880148],
[-0.40076658, 0.47288367, -0.07211709, 0.40076658, -0.47288367, 0.07211709],
[-0.02880148, -0.07211709, 0.10091857, 0.02880148, 0.07211709, -0.10091857],
[-0.42956806, 0.40076658, 0.02880148, 0.42956806, -0.40076658, -0.02880148],
[ 0.40076658, -0.47288367, 0.07211709, -0.40076658, 0.47288367, -0.07211709],
[ 0.02880148, 0.07211709, -0.10091857, -0.02880148, -0.07211709, 0.10091857]])
q = array([1.09389333, -0.53851907, -0.05537426, -0.79389333, 0.83851907, 0.35537426])
p = LCP(M, q)
r = p.solve('lcpsolve')
f_opt, x_opt = r.ff, r.xf
w, z = x_opt[x_opt.size/2:], x_opt[:x_opt.size/2]
print('w: %s z: %s' % (w, z))
# w: [ 0. 0. 0.02167615 1.84666668 0. 0. ] z: [ 0.3 0.2 0. 0. 0.1 0.3]
openopt-0.38+svn1589/openopt/examples/llavp_1.py 0000664 0000000 0000000 00000001015 11757757505 0021513 0 ustar 00root root 0000000 0000000 from numpy import empty, sin, cos, arange, ones
from openopt import LLAVP
M, N = 150, 15
C = empty((M,N))
d = empty(M)
for j in range(M):
d[j] = 1.5*N+80*sin(j)
C[j] = 8*sin(4.0+arange(N)) + 15*cos(j)
lb = sin(arange(N))
ub = lb + 1
p = LLAVP(C, d, lb=lb, ub=ub, dump = 10, X = ones(N), maxIter = 1e4, maxFunEvals = 1e100)
#optional: plot
p.plot=1
r = p.solve('nsp:ralg', iprint = 100, maxIter = 1000)
#r = p.solve('nsp:ipopt', iprint = 100, maxIter = 1000)
print('f_opt: %f' % r.ff)
#print 'x_opt:', r.xf
openopt-0.38+svn1589/openopt/examples/llsp_1.py 0000664 0000000 0000000 00000001255 11757757505 0021355 0 ustar 00root root 0000000 0000000 __docformat__ = "restructuredtext en"
from numpy import empty, sin, cos, arange
from openopt import LLSP
M, N = 1500, 1000
C = empty((M,N))
d = empty(M)
for j in range(M):
d[j] = 1.5*N+80*sin(j)
C[j] = 8*sin(4.0+arange(N)) + 15*cos(j)
""" alternatively, try the sparse problem - lsqr solver can take benefits of it.
Also, if your C is too large for your RAM
you can pass C of any scipy.sparse matrix format
for j in xrange(M):
d[j] = 1.5*N+80*sin(j)
C[j, j%N] = 15*cos(j) #+ 8*sin(4.0+arange(N))
C[j, (1 + j)%N] = 15*cos(j) #+ 8*sin(4.0+arange(N))
"""
p = LLSP(C, d)
r = p.solve('lsqr')
print('f_opt: %f' % r.ff) # 2398301.68347
#print 'x_opt:', r.xf
openopt-0.38+svn1589/openopt/examples/llsp_2.py 0000664 0000000 0000000 00000001564 11757757505 0021361 0 ustar 00root root 0000000 0000000 __docformat__ = "restructuredtext en"
from numpy import diag, ones, sin, cos, arange, sqrt, vstack, zeros, dot
from openopt import LLSP, NLP
N = 150
C1 = diag(sqrt(arange(N)))
C2 = (1.5+arange(N)).reshape(1, -1) * (0.8+arange(N)).reshape(-1, 1)
C = vstack((C1, C2))
d = arange(2*N)
lb = -2.0+sin(arange(N))
ub = 5+cos(arange(N))
############################LLSP################################
LLSPsolver = 'bvls'
p = LLSP(C, d, lb=lb, ub=ub)
r = p.solve(LLSPsolver)
#############################NLP################################
NLPsolver = 'scipy_lbfgsb'# you could try scipy_tnc or ralg as well
#NLPsolver = 'scipy_tnc'
p2 = LLSP(C, d, lb=lb, ub=ub)
r2=p2.solve('nlp:'+NLPsolver)
##################################################################
print '###########Results:###########'
print 'LLSP solver '+ LLSPsolver + ':', r.ff
print 'NLP solver '+ NLPsolver + ':', r2.ff
openopt-0.38+svn1589/openopt/examples/lp_1.py 0000664 0000000 0000000 00000002352 11757757505 0021015 0 ustar 00root root 0000000 0000000 """
Example:
Let's concider the problem
15x1 + 8x2 + 80x3 -> min (1)
subjected to
x1 + 2x2 + 3x3 <= 15 (2)
8x1 + 15x2 + 80x3 <= 80 (3)
8x1 + 80x2 + 15x3 <=150 (4)
100x1 + 10x2 + x3 >= 800 (5)
80x1 + 8x2 + 15x3 = 750 (6)
x1 + 10x2 + 100x3 = 80 (7)
x1 >= 4 (8)
-8 >= x2 >= -80 (9)
"""
from numpy import *
from openopt import LP
f = array([15,8,80])
A = mat('1 2 3; 8 15 80; 8 80 15; -100 -10 -1') # numpy.ndarray is also allowed
b = [15, 80, 150, -800] # numpy.ndarray, matrix etc are also allowed
Aeq = mat('80 8 15; 1 10 100') # numpy.ndarray is also allowed
beq = (750, 80)
lb = [4, -80, -inf]
ub = [inf, -8, inf]
p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub)
#or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub)
#r = p.minimize('glpk') # CVXOPT must be installed
#r = p.minimize('lpSolve') # lpsolve must be installed
r = p.minimize('pclp')
#search for max: r = p.maximize('glpk') # CVXOPT & glpk must be installed
#r = p.minimize('nlp:ralg', ftol=1e-7, xtol=1e-7, goal='min', plot=1)
print('objFunValue: %f' % r.ff) # should print 204.48841578
print('x_opt: %s' % r.xf) # should print [ 9.89355041 -8. 1.5010645 ]
openopt-0.38+svn1589/openopt/examples/lunp_1.py 0000664 0000000 0000000 00000000561 11757757505 0021360 0 ustar 00root root 0000000 0000000 __docformat__ = "restructuredtext en"
from numpy import *
from openopt import LUNP
M, N = 1500, 150
C = empty((M,N))
d = empty(M)
for j in xrange(M):
d[j] = 1.5*N+80*sin(j)
C[j] = 8*sin(4.0+arange(N)) + 15*cos(j)
lb = sin(arange(N))
ub = lb + 1
p = LUNP(C, d, lb=lb, ub=ub)
r = p.solve('lp:glpk', iprint = -1)
print 'f_opt:', r.ff
#print 'x_opt:', r.xf
openopt-0.38+svn1589/openopt/examples/milp_1.py 0000664 0000000 0000000 00000001362 11757757505 0021343 0 ustar 00root root 0000000 0000000 __docformat__ = "restructuredtext en"
from numpy import *
from openopt import MILP
f = [1, 2, 3, 4, 5, 4, 2, 1]
# indexing starts from ZERO!
# while in native lpsolve-python wrapper from 1
# so if you used [5,8] for native lp_solve python binding
# you should use [4,7] instead
intVars = [4, 7]
lb = -1.5 * ones(8)
ub = 15 * ones(8)
A = zeros((5, 8))
b = zeros(5)
for i in xrange(5):
for j in xrange(8):
A[i,j] = -8+sin(8*i) + cos(15*j)
b[i] = -150 + 80*sin(80*i)
p = MILP(f=f, lb=lb, ub=ub, A=A, b=b, intVars=intVars, goal='min')
r = p.solve('lpSolve')
#r = p.solve('glpk', iprint =-1)
#r = p.solve('cplex')
print('f_opt: %f' % r.ff) # 25.801450769161505
print('x_opt: %s' % r.xf) # [ 15. 10.15072538 -1.5 -1.5 -1. -1.5 -1.5 15.]
openopt-0.38+svn1589/openopt/examples/minlp_1.py 0000664 0000000 0000000 00000002640 11757757505 0021521 0 ustar 00root root 0000000 0000000 """
Example of MINLP
It is recommended to read help(NLP) before
and /examples/nlp_1.py
"""
from openopt import MINLP
from numpy import *
N = 150
K = 50
#objective function:
f = lambda x: ((x-5.45)**2).sum()
#optional: 1st derivatives
df = lambda x: 2*(x-5.45)
# start point
x0 = 8*cos(arange(N))
# assign prob:
# 1st arg - objective function
# 2nd arg - start point
# for more details see
# http://openopt.org/Assignment
p = MINLP(f, x0, df=df, maxIter = 1e3)
# optional: set some box constraints lb <= x <= ub
p.lb = [-6.5]*N
p.ub = [6.5]*N
# see help(NLP) for handling of other constraints:
# Ax<=b, Aeq x = beq, c(x) <= 0, h(x) = 0
# see also /examples/nlp_1.py
# required tolerance for smooth constraints, default 1e-6
p.contol = 1.1e-6
p.name = 'minlp_1'
# required field: nlpSolver - should be capable of handling box-bounds at least
#nlpSolver = 'ralg'
nlpSolver = 'ipopt'
# coords of discrete variables and sets of allowed values
p.discreteVars = {7:range(3, 10), 8:range(3, 10), 9:[2, 3.1, 9]}
# required tolerance for discrete variables, default 10^-5
p.discrtol = 1.1e-5
#optional: check derivatives, you could use p.checkdc(), p.checkdh() for constraints
#p.checkdf()
# optional: maxTime, maxCPUTime
# p.maxTime = 15
# p.maxCPUTime = 15
r = p.solve('branb', nlpSolver=nlpSolver, plot = False)
# optim point and value are r.xf and r.ff,
# see http://openopt.org/OOFrameworkDoc#Result_structure for more details
openopt-0.38+svn1589/openopt/examples/miqcqp_1.py 0000664 0000000 0000000 00000002051 11757757505 0021670 0 ustar 00root root 0000000 0000000 """
Concider the MIQCQP problem
0.5 * (x1^2 + 2x2^2 + 3x3^2) + 15x1 + 8x2 + 80x3 -> min (1)
subjected to
x1 + 2x2 + 3x3 <= 150 (2)
8x1 + 15x2 + 80x3 <= 800 (3)
x2 - x3 = 25.5 (4)
x1 <= 15 (5)
x1^2 + 2.5 x2^2 + 3 x3^2 + 0.1 x1 + 0.2 x2 + 0.3 x3 - 1000 <= 0 (6)
2 x1^2 + x2^2 + 3 x3^2 + 0.1 x1 + 0.5 x2 + 0.3 x3 <= 1000 (7)
x1, x3 are integers
"""
from numpy import diag, matrix, inf
from openopt import QP
H = diag([1.0, 2.0,3.0])
f = [15,8,80]
A = matrix('1 2 3; 8 15 80')
b = [150, 800]
# Qc should be list or tuple of triples (P, q, s): 0.5 x^T P x + q x + s <= 0
QC = ((diag([1.0, 2.5, 3.0]), [0.1, 0.2, 0.3], -1000), (diag([2.0, 1.0, 3.0]), [0.1, 0.5, 0.3], -1000))
p = QP(H, f, A = A, b = b, Aeq = [0, 1, -1], beq = 25.5, ub = [15,inf,inf], QC = QC, intVars = [0, 2])
# or p = QP(H=diag([1,2,3]), f=[15,8,80], ...)
r = p.solve('cplex', iprint = 0, plot=1)
f_opt, x_opt = r.ff, r.xf
# x_opt = array([ -2.99999999, 9.5 , -16. ])
# f_opt = -770.24999989134858
openopt-0.38+svn1589/openopt/examples/miqp_1.py 0000664 0000000 0000000 00000001245 11757757505 0021350 0 ustar 00root root 0000000 0000000 """
Example:
Concider the problem
0.5 * (x1^2 + 2x2^2 + 3x3^2) + 15x1 + 8x2 + 80x3 -> min (1)
subjected to
x1 + 2x2 + 3x3 <= 150 (2)
8x1 + 15x2 + 80x3 <= 800 (3)
x2 - x3 = 25.5 (4)
x1 <= 15 (5)
x1, x3 are integers
"""
from numpy import diag, matrix, inf
from openopt import QP
p = QP(diag([1, 2, 3]), [15, 8, 80], A = matrix('1 2 3; 8 15 80'), b = [150, 800], Aeq = [0, 1, -1], beq = 25.5, ub = [15,inf,inf])
# or p = QP(H=diag([1,2,3]), f=[15,8,80], A= ...)
r = p.solve('cplex', intVars= [0, 2], iprint = 0)
f_opt, x_opt = r.ff, r.xf
# x_opt = array([-15. , -2.5, -28. ])
# f_opt = -1190.25
openopt-0.38+svn1589/openopt/examples/mmp_1.py 0000664 0000000 0000000 00000005502 11757757505 0021173 0 ustar 00root root 0000000 0000000 """
Example of solving Mini-Max Problem
via converter to NLP
latter works via solving NLP
t -> min
subjected to
t >= f0(x)
t >= f1(x)
...
t >= fk(x)
Splitting f into separate funcs could benefit some solvers
(ralg, algencan; see NLP docpage for more details)
but is not implemented yet
"""
from numpy import *
from openopt import *
n = 15
f1 = lambda x: (x[0]-15)**2 + (x[1]-80)**2 + (x[2:]**2).sum()
f2 = lambda x: (x[1]-15)**2 + (x[2]-8)**2 + (abs(x[3:]-100)**1.5).sum()
f3 = lambda x: (x[2]-8)**2 + (x[0]-80)**2 + (abs(x[4:]+150)**1.2).sum()
f = [f1, f2, f3]
# you can define matrices as numpy array, matrix, Python lists or tuples
#box-bound constraints lb <= x <= ub
lb = [0]*n
ub = [15, inf, 80] + (n-3) * [inf]
# linear ineq constraints A*x <= b
A = array([[4, 5, 6] + [0]*(n-3), [80, 8, 15] + [0]*(n-3)])
b = [100, 350]
# non-linear eq constraints Aeq*x = beq
Aeq = [15, 8, 80] + [0]*(n-3)
beq = 90
# non-lin ineq constraints c(x) <= 0
c1 = lambda x: x[0] + (x[1]/8) ** 2 - 15
c2 = lambda x: x[0] + (x[2]/80) ** 2 - 15
c = [c1, c2]
#or: c = lambda x: (x[0] + (x[1]/8) ** 2 - 15, x[0] + (x[2]/80) ** 2 - 15)
# non-lin eq constraints h(x) = 0
h = lambda x: x[0]+x[2]**2 - x[1]
x0 = [0, 1, 2] + [1.5]*(n-3)
p = MMP(f, x0, lb = lb, ub = ub, A=A, b=b, Aeq = Aeq, beq = beq, c=c, h=h, xtol = 1e-6, ftol=1e-6)
# optional, matplotlib is required:
p.plot=1
r = p.solve('nlp:ipopt', iprint=50, maxIter=1e3)
print 'MMP result:', r.ff
### let's check result via comparison with NSP solution
F= lambda x: max([f1(x), f2(x), f3(x)])
p = NSP(F, x0, iprint=50, lb = lb, ub = ub, c=c, h=h, A=A, b=b, Aeq = Aeq, beq = beq, xtol = 1e-6, ftol=1e-6)
r_nsp = p.solve('ipopt', maxIter = 1e3)
print 'NSP result:', r_nsp.ff, 'difference:', r_nsp.ff - r.ff
"""
-----------------------------------------------------
solver: ipopt problem: unnamed goal: minimax
iter objFunVal log10(maxResidual)
0 1.196e+04 1.89
50 1.054e+04 -8.00
100 1.054e+04 -8.00
150 1.054e+04 -8.00
161 1.054e+04 -6.10
istop: 1000
Solver: Time Elapsed = 0.93 CPU Time Elapsed = 0.88
objFunValue: 10536.481 (feasible, max constraint = 7.99998e-07)
MMP result: 10536.4808622
-----------------------------------------------------
solver: ipopt problem: unnamed goal: minimum
iter objFunVal log10(maxResidual)
0 1.196e+04 1.89
50 1.054e+04 -4.82
100 1.054e+04 -10.25
150 1.054e+04 -15.35
StdOut: Problem solved
[PyIPOPT] Ipopt will use Hessian approximation.
[PyIPOPT] nele_hess is 0
192 1.054e+04 -13.85
istop: 1000
Solver: Time Elapsed = 2.42 CPU Time Elapsed = 2.42
objFunValue: 10536.666 (feasible, max constraint = 1.42109e-14)
NSP result: 10536.6656339 difference: 0.184771728482
"""
openopt-0.38+svn1589/openopt/examples/mmp_2.py 0000664 0000000 0000000 00000006436 11757757505 0021203 0 ustar 00root root 0000000 0000000 """
Example of solving Mini-Max Problem
max { (x0-15)^2+(x1-80)^2, (x1-15)^2 + (x2-8)^2, (x2-8)^2 + (x0-80)^2 } -> min
Currently nsmm is single OO solver available for MMP
It defines function F(x) = max_i {f[i](x)}
and solves NSP F(x) -> min using solver ralg.
It's very far from specialized solvers (like MATLAB fminimax),
but it's better than having nothing at all,
and allows using of nonsmooth and noisy funcs.
This solver is intended to be enhanced in future.
"""
from numpy import *
from openopt import *
f1 = lambda x: (x[0]-15)**2 + (x[1]-80)**2
f2 = lambda x: (x[1]-15)**2 + (x[2]-8)**2
f3 = lambda x: (x[2]-8)**2 + (x[0]-80)**2
f = [f1, f2, f3]
# you can define matrices as numpy array, matrix, Python lists or tuples
#box-bound constraints lb <= x <= ub
lb = [0]*3# i.e. [0,0,0]
ub = [15, inf, 80]
# linear ineq constraints A*x <= b
A = mat('4 5 6; 80 8 15')
b = [100, 350]
# non-linear eq constraints Aeq*x = beq
Aeq = mat('15 8 80')
beq = 90
# non-lin ineq constraints c(x) <= 0
c1 = lambda x: x[0] + (x[1]/8) ** 2 - 15
c2 = lambda x: x[0] + (x[2]/80) ** 2 - 15
c = [c1, c2]
#or: c = lambda x: (x[0] + (x[1]/8) ** 2 - 15, x[0] + (x[2]/80) ** 2 - 15)
# non-lin eq constraints h(x) = 0
h = lambda x: x[0]+x[2]**2 - x[1]
x0 = [0, 1, 2]
p = MMP(f, x0, lb = lb, ub = ub, A=A, b=b, Aeq = Aeq, beq = beq, c=c, h=h, xtol = 1e-6, ftol=1e-6)
#p = MMP(f, x0, ftol=1e-8)
# optional, matplotlib is required:
#p.plot=1
r = p.solve('nsmm', iprint=1, NLPsolver = 'ralg', maxIter=1e3, minIter=1e2)
print 'MMP result:', r.ff
#
### let's check result via comparison with NSP solution
F= lambda x: max([f1(x), f2(x), f3(x)])
p = NSP(F, x0, lb = lb, ub = ub, c=c, h=h, A=A, b=b, Aeq = Aeq, beq = beq, xtol = 1e-6, ftol=1e-6)
#p = NSP(F, x0)
r_nsp = p.solve('ralg')
#print 'NSP result:', r_nsp.ff, 'difference:', r_nsp.ff - r.ff
#"""
#starting solver nsmm (license: BSD) with problem unnamed
# iter ObjFun log10(maxResidual)
# 0 6.4660e+03 +1.89
# 10 6.4860e+03 -0.68
# 20 6.4158e+03 -1.23
# 30 6.4119e+03 -3.08
# 40 6.3783e+03 -2.95
# 50 6.3950e+03 -4.05
# 60 6.3951e+03 -6.02
# 70 6.3938e+03 -6.02
# 78 6.3936e+03 -6.00
#nsmm has finished solving the problem unnamed
#istop: 3 (|| X[k] - X[k-1] || < xtol)
#Solver: Time Elapsed = 0.41 CPU Time Elapsed = 0.38
#objFunValue: 6393.6196095379446 (feasible, max constraint = 9.95421e-07)
#MMP result: 6393.6196095379446
#starting solver ralg (license: BSD) with problem unnamed
#itn 0 : Fk= 6466.0 MaxResidual= 78.0
#itn 10 Fk: 6485.9728487666425 MaxResidual: 2.07e-01 ls: 2
#itn 20 Fk: 6415.8358391383163 MaxResidual: 5.92e-02 ls: 1
#itn 30 Fk: 6411.9310394431113 MaxResidual: 8.22e-04 ls: 3
#itn 40 Fk: 6378.3471060481961 MaxResidual: 1.12e-03 ls: 2
#itn 50 Fk: 6394.9848936519056 MaxResidual: 8.94e-05 ls: 0
#itn 60 Fk: 6395.054402295913 MaxResidual: 9.57e-07 ls: 1
#itn 70 Fk: 6393.8314202292149 MaxResidual: 9.63e-07 ls: 1
#itn 78 Fk: 6393.6196095379446 MaxResidual: 9.95e-07 ls: 1
#ralg has finished solving the problem unnamed
#istop: 3 (|| X[k] - X[k-1] || < xtol)
#Solver: Time Elapsed = 0.44 CPU Time Elapsed = 0.32
#objFunValue: 6393.6196095379446 (feasible, max constraint = 9.95421e-07)
#NSP result: 6393.6196095379446 difference: 0.0
#"""
openopt-0.38+svn1589/openopt/examples/nllsp_1.py 0000664 0000000 0000000 00000003227 11757757505 0021534 0 ustar 00root root 0000000 0000000 """
Let us solve the overdetermined nonlinear equations:
a^2 + b^2 = 15
a^4 + b^4 = 100
a = 3.5
Let us concider the problem as
x[0]**2 + x[1]**2 - 15 = 0
x[0]**4 + x[1]**4 - 100 = 0
x[0] - 3.5 = 0
Now we will solve the one using solver scipy_leastsq
"""
from openopt import NLLSP
from numpy import *
f = lambda x: ((x**2).sum() - 15, (x**4).sum() - 100, x[0]-3.5)
# other possible f assignments:
# f = lambda x: [(x**2).sum() - 15, (x**4).sum() - 100, x[0]-3.5]
#f = [lambda x: (x**2).sum() - 15, lambda x: (x**4).sum() - 100, lambda x: x[0]-3.5]
# f = (lambda x: (x**2).sum() - 15, lambda x: (x**4).sum() - 100, lambda x: x[0]-3.5)
# f = lambda x: asfarray(((x**2).sum() - 15, (x**4).sum() - 100, x[0]-3.5))
#optional: gradient
def df(x):
r = zeros((3,2))
r[0,0] = 2*x[0]
r[0,1] = 2*x[1]
r[1,0] = 4*x[0]**3
r[1,1] = 4*x[1]**3
r[2,0] = 1
return r
# init esimation of solution - sometimes rather pricise one is very important
x0 = [1.5, 8]
#p = NLLSP(f, x0, diffInt = 1.5e-8, xtol = 1.5e-8, ftol = 1.5e-8)
# or
# p = NLLSP(f, x0)
# or
p = NLLSP(f, x0, df = df, xtol = 1.5e-8, ftol = 1.5e-8)
#optional: user-supplied gradient check:
p.checkdf()
#r = p.solve('scipy_leastsq', plot=1, iprint = -1)
#or using converter lsp2nlp:
r = p.solve('nlp:ralg', iprint = 1, plot=1)
#r = p.solve('nlp:ipopt',plot=1), r = p.solve('nlp:algencan'), r = p.solve('nlp:ralg'), etc
#(some NLP solvers require additional installation)
print 'x_opt:', r.xf # 2.74930862, +/-2.5597651
print 'funcs Values:', p.f(r.xf) # [-0.888904734668, 0.0678251418575, -0.750691380965]
print 'f_opt:', r.ff, '; sum of squares (should be same value):', (p.f(r.xf) ** 2).sum() # 1.35828942657
openopt-0.38+svn1589/openopt/examples/nlp_1.py 0000664 0000000 0000000 00000005035 11757757505 0021174 0 ustar 00root root 0000000 0000000 """
Example:
(x0-5)^2 + (x2-5)^2 + ... +(x149-5)^2 -> min
subjected to
# lb<= x <= ub:
x4 <= 4
8 <= x5 <= 15
# Ax <= b
x0+...+x149 >= 825
x9 + x19 <= 3
x10+x11 <= 9
# Aeq x = beq
x100+x101 = 11
# c(x) <= 0
2*x0^4-32 <= 0
x1^2+x2^2-8 <= 0
# h(x) = 0
(x[149]-1)**6 = 0
(x[148]-1.5)**6 = 0
"""
from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array
N = 150
# objective function:
f = lambda x: ((x-5)**2).sum()
# objective function gradient (optional):
df = lambda x: 2*(x-5)
# start point (initial estimation)
x0 = 8*cos(arange(N))
# c(x) <= 0 constraints
c = [lambda x: 2* x[0] **4-32, lambda x: x[1]**2+x[2]**2 - 8]
# dc(x)/dx: non-lin ineq constraints gradients (optional):
dc0 = lambda x: [8 * x[0]**3] + [0]*(N-1)
dc1 = lambda x: [0, 2 * x[1], 2 * x[2]] + [0]*(N-3)
dc = [dc0, dc1]
# h(x) = 0 constraints
def h(x):
return (x[N-1]-1)**6, (x[N-2]-1.5)**6
# other possible return types: numpy array, matrix, Python list, tuple
# or just h = lambda x: [(x[149]-1)**6, (x[148]-1.5)**6]
# dh(x)/dx: non-lin eq constraints gradients (optional):
def dh(x):
r = zeros((2, N))
r[0, -1] = 6*(x[N-1]-1)**5
r[1, -2] = 6*(x[N-2]-1.5)**5
return r
# lower and upper bounds on variables
lb = -6*ones(N)
ub = 6*ones(N)
ub[4] = 4
lb[5], ub[5] = 8, 15
# general linear inequality constraints
A = zeros((3, N))
A[0, 9] = 1
A[0, 19] = 1
A[1, 10:12] = 1
A[2] = -ones(N)
b = [7, 9, -825]
# general linear equality constraints
Aeq = zeros(N)
Aeq[100:102] = 1
beq = 11
# required constraints tolerance, default for NLP is 1e-6
contol = 1e-7
# If you use solver algencan, NB! - it ignores xtol and ftol; using maxTime, maxCPUTime, maxIter, maxFunEvals, fEnough is recommended.
# Note that in algencan gtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
gtol = 1e-7 # (default gtol = 1e-6)
# Assign problem:
# 1st arg - objective function
# 2nd arg - start point
p = NLP(f, x0, df=df, c=c, dc=dc, h=h, dh=dh, A=A, b=b, Aeq=Aeq, beq=beq,
lb=lb, ub=ub, gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#optional: graphic output, requires pylab (matplotlib)
p.plot = True
#optional: user-supplied 1st derivatives check
p.checkdf()
p.checkdc()
p.checkdh()
solver = 'ralg'
#solver = 'algencan'
#solver = 'ipopt'
#solver = 'scipy_slsqp'
# solve the problem
r = p.solve(solver, plot=0) # string argument is solver name
# r.xf and r.ff are optim point and optim objFun value
# r.ff should be something like 132.05
openopt-0.38+svn1589/openopt/examples/nlp_11.py 0000664 0000000 0000000 00000006461 11757757505 0021261 0 ustar 00root root 0000000 0000000 """
Example:
(x0-5)^2 + (x2-5)^2 + ... +(x149-5)^2 -> min
subjected to
# lb<= x <= ub:
x4 <= 4
8 <= x5 <= 15
# Ax <= b
x0+...+x149 >= 825
x9 + x19 <= 3
x10+x11 <= 9
# Aeq x = beq
x100+x101 = 11
# c(x) <= 0
2*x0^4-32 <= 0
x1^2+x2^2-8 <= 0
# h(x) = 0
(x[149]-1)**6 = 0
(x[148]-1.5)**6 = 0
"""
from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array
N = 150
# objective function:
f = lambda x: ((x-5)**2).sum()
# objective function gradient (optional):
df = lambda x: 2*(x-5)
# start point (initial estimation)
x0 = 8*cos(arange(N))
# c(x) <= 0 constraints
c = [lambda x: 2* x[0] **4-32, lambda x: x[1]**2+x[2]**2 - 8]
# dc(x)/dx: non-lin ineq constraints gradients (optional):
dc0 = lambda x: [8 * x[0]**3] + [0]*(N-1)
dc1 = lambda x: [0, 2 * x[1], 2 * x[2]] + [0]*(N-3)
dc = [dc0, dc1]
# h(x) = 0 constraints
def h(x):
#return (x[-1]-1)**2
return (x[-1]-1)**6, (x[-2]-1.5)**6
# other possible return types: numpy array, matrix, Python list, tuple
# dh(x)/dx: non-lin eq constraints gradients (optional):
def dh(x):
r = zeros((2, N))
r[0, -1] = 6*(x[-1]-1)**5
r[1, -2] = 6*(x[-2]-1.5)**5
return r
#def dh(x):
# r = zeros((1, N))
# r[0, -1] = 2*(x[-1]-1)**1
# #r[1, -2] = 6*(x[148]-1.5)**5
# return r
# lower and upper bounds on variables
lb = -6*ones(N)
ub = 6*ones(N)
ub[4] = 4
lb[5], ub[5] = 8, 15
#lb[110:120]=5.67
#ub[110:120]=5.67
# general linear inequality constraints
A = zeros((3, N))
A[0, 9] = 1
A[0, 19] = 1
A[1, 10:12] = 1
A[2] = -ones(N)
b = [7, 9, -825]
# general linear equality constraints
Aeq = zeros((1, N))
Aeq[0, 100:102] = 1
beq = 11
#Aeq[1, 105] = 1
#Aeq[1, 106] = 1.00001
#beq = [11, 11.00005]
# required constraints tolerance, default for NLP is 1e-6
contol = 1e-6
# If you use solver algencan, NB! - it ignores xtol and ftol; using maxTime, maxCPUTime, maxIter, maxFunEvals, fEnough is recommended.
# Note that in algencan gtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
gtol = 1e-7 # (default gtol = 1e-6)
# Assign problem:
# 1st arg - objective function
# 2nd arg - start point
p = NLP(f, x0, df=df, c=c, dc=dc, h=h, dh=dh, A=A, b=b, Aeq=Aeq, beq=beq,
lb=lb, ub=ub, gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#p = NLP(f, x0, df=df, Aeq=Aeq, beq=beq,
# gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#p = NLP(f, x0, df=df, lb=lb, ub=ub, gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#optional: graphic output, requires pylab (matplotlib)
p.plot = 1
#optional: user-supplied 1st derivatives check
p.checkdf()
p.checkdc()
p.checkdh()
def MyIterFcn(p):
return 0
# print 'Iteration',p.iter
# if p.iter == 50:
# p.user.mylist.append(p.xk.copy())
# return 0
p.user.mylist = []
# solve the problem
#p.debug=1
solver = 'algencan'
solver = 'ralg'
#solver = 'scipy_cobyla'
#solver = 'ipopt'
#solver = 'scipy_slsqp'
p.debug=1
r = p.solve(solver, showRej=1, iprint=1, maxTime = 15000, newLinEq=1, callback = MyIterFcn) # string argument is solver name
#r = p.solve('r2', iprint = 1, plot=0, showLS=1, maxIter=480)
# r.xf and r.ff are optim point and optim objFun value
# r.ff should be something like 132.0522
openopt-0.38+svn1589/openopt/examples/nlp_2.py 0000664 0000000 0000000 00000001615 11757757505 0021175 0 ustar 00root root 0000000 0000000 from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))
p.df = lambda x: 2*(x-M)
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
def dc(x):
r = zeros((2, p.n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2]
return r
p.dc = dc
h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
p.h = lambda x: (h1(x), h2(x))
def dh(x):
r = zeros((2, p.n))
r[0,-1] = 1e1*4*(x[-1]-1)**3
r[1,-2] = 4*(x[-2]-1.5)**3
return r
p.dh = dh
p.lb = -6*ones(N)
p.ub = 6*ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5
#r = p.solve('ipopt', showLS=0, xtol=1e-7, maxIter = 1504)
#solver = 'ipopt'
solver = 'ralg'
#solver = 'scipy_slsqp'
#solver = 'algencan'
r = p.solve(solver, maxIter = 1504, plot=1)
#!! fmin_cobyla can't use user-supplied gradient
#r = p.solve('scipy_cobyla')
openopt-0.38+svn1589/openopt/examples/nlp_3.py 0000664 0000000 0000000 00000005163 11757757505 0021200 0 ustar 00root root 0000000 0000000 from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros, sqrt, asscalar
from pylab import legend, show, plot, subplot, xlabel, subplots_adjust
from string import rjust, ljust, expandtabs
N = 15
M = 5
f = lambda x: -(abs(x-M) ** 1.5).sum()
x0 = cos(arange(N))
#c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
global cc1, cc2, cc3
def c1(x):
global cc1
cc1 += 1
return 2* x[0] **4-32
def c2(x):
global cc2
cc2 += 1
return x[1]**2+x[2]**2 - 8
def c3(x):
global cc3
cc3 += 1
return x[1]**2+x[2]**2 + x[3]**2 - 35
c = [c1, c2, c3]
h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
h = (h1, h2)
lb = -6*ones(N)
ub = 6*ones(N)
lb[3] = 5.5
ub[4] = 4.5
colors = ['b', 'k', 'y', 'r', 'g']
#############
#solvers = ['ralg','scipy_cobyla', 'lincher']
solvers = ['ralg', 'scipy_cobyla', 'lincher','ipopt','algencan' ]
solvers = ['ralg', 'ralg3', 'ralg5']
solvers = ['ralg', 'scipy_cobyla']
#solvers = ['ipopt']
solvers = ['ralg', 'ipopt']
solvers = ['ralg']
solvers = ['scipy_slsqp']
solvers = ['ralg']
#############
colors = colors[:len(solvers)]
lines, results = [], {}
for j in range(len(solvers)):
cc1, cc2, cc3 = 0, 0, 0
solver = solvers[j]
color = colors[j]
p = NLP(f, x0, c=c, h=h, lb = lb, ub = ub, ftol = 1e-6, maxFunEvals = 1e7, maxIter = 1220, plot = 1, color = color, iprint = 0, legend = [solvers[j]], show= False, xlabel='time', goal='maximum', name='nlp3')
if solver == 'algencan':
p.gtol = 1e-1
elif solver == 'ralg':
p.debug = 1
r = p.solve(solver, debug=1)
print 'c1 evals:', cc1, 'c2 evals:', cc2, 'c3 evals:', cc3
results[solver] = (r.ff, p.getMaxResidual(r.xf), r.elapsed['solver_time'], r.elapsed['solver_cputime'], r.evals['f'], r.evals['c'], r.evals['h'])
subplot(2,1,1)
F0 = asscalar(p.f(p.x0))
lines.append(plot([0, 1e-15], [F0, F0], color= colors[j]))
for i in range(2):
subplot(2,1,i+1)
legend(lines, solvers)
subplots_adjust(bottom=0.2, hspace=0.3)
xl = ['Solver f_opt MaxConstr Time CPUTime fEvals cEvals hEvals']
for i in range(len(results)):
xl.append((expandtabs(ljust(solvers[i], 16)+' \t', 15)+'%0.2f'% (results[solvers[i]][0]) + ' %0.1e' % (results[solvers[i]][1]) + (' %0.2f'% (results[solvers[i]][2])) + ' %0.2f '% (results[solvers[i]][3]) + str(results[solvers[i]][4]) + ' ' + rjust(str(results[solvers[i]][5]), 5) + expandtabs('\t' +str(results[solvers[i]][6]),8)))
xl = '\n'.join(xl)
subplot(2,1,1)
xlabel('Time elapsed (without graphic output), sec')
from pylab import *
subplot(2,1,2)
xlabel(xl)
show()
openopt-0.38+svn1589/openopt/examples/nlp_4.py 0000664 0000000 0000000 00000001733 11757757505 0021200 0 ustar 00root root 0000000 0000000 from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array, sin, cos, sign, abs, inf
N = 1500
K = 50
# 1st arg - objective function
# 2nd arg - x0
p = NLP(lambda x: (abs(x-5)).sum(), 8*cos(arange(N)), iprint = 50, maxIter = 1e3)
# f(x) gradient (optional):
p.df = lambda x: sign(x-5)
p.lb = 5*ones(N) + sin(arange(N)) - 0.1
p.ub = 5*ones(N) + sin(arange(N)) + 0.1
p.lb[:N/4] = -inf
p.ub[3*N/4:] = inf
#p.ub[4] = 4
#p.lb[5], p.ub[5] = 8, 15
#A = zeros((K, N))
#b = zeros(K)
#for i in xrange(K):
# A[i] = 1+cos(i+arange(N))
# b[i] = sin(i)
#p.A = A
#p.b = b
#p.Aeq = zeros(p.n)
#p.Aeq[100:102] = 1
#p.beq = 11
p.contol = 1e-6
p.plot = 1
p.maxFunEvals = 1e7
p.name = 'nlp_4'
p.debug=1
solver = 'ralg'
solver = 'gsubg'
#solver = 'scipy_cobyla'
solver = 'algencan'
#solver = 'ipopt'
r = p.solve(solver, xlabel = 'time', fTol = 10, debug=0, maxIter = 5500, plot=0, maxTime=1000, ftol = 1e-8, xtol = 1e-6, iprint=1, showLS=0, showFeas=0, show_hs=0)
openopt-0.38+svn1589/openopt/examples/nlp_5.py 0000664 0000000 0000000 00000005217 11757757505 0021202 0 ustar 00root root 0000000 0000000 from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros, sqrt, asscalar, inf
from string import rjust, ljust, expandtabs, center, lower
N = 100
M = 5
Power = 1.13
ff = lambda x: (abs(x-M) ** Power).sum()
x0 = cos(arange(N))
c = [lambda x: 2* x[0] **4-32, lambda x: x[1]**2+x[2]**2 - 8]
#c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8000]
h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
h = (h1, h2)
lb = -6*ones(N)
ub = 6*ones(N)
lb[3] = 5.5
ub[4] = 4.5
gtol=1e-6
ftol = 1e-6
diffInt = 1e-8
contol = 1e-6
maxFunEvals = 1e6
maxTime = 10
Xlabel = 'time'
PLOT = 0
colors = ['b', 'k', 'y', 'g', 'r', 'm', 'c']
###############################################################
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt','algencan']
#solvers = ['ralg', 'ipopt']
#solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt']
#solvers = ['ralg', 'ralg3']
solvers = ['ralg']
###############################################################
lines, results = [], {}
for j, solver in enumerate(solvers):
p = NLP(ff, x0, xlabel = Xlabel, c=c, h=h, lb = lb, ub = ub, gtol=gtol, diffInt = diffInt, ftol = ftol, maxIter = 1390, plot = PLOT, color = colors[j], iprint = 10, df_iter = 4, legend = solver, show=False, contol = contol, maxTime = maxTime, maxFunEvals = maxFunEvals, name='NLP_5')
if solver =='algencan':
p.gtol = 1e-2
elif solver == 'ralg':
pass
#p.debug = 1
p.debug = 1
r = p.solve(solver)
for fn in ('h','c'):
if not r.evals.has_key(fn): r.evals[fn]=0 # if no c or h are used in problem
results[solver] = (r.ff, p.getMaxResidual(r.xf), r.elapsed['solver_time'], r.elapsed['solver_cputime'], r.evals['f'], r.evals['c'], r.evals['h'])
if PLOT:
subplot(2,1,1)
F0 = asscalar(p.f(p.x0))
lines.append(plot([0, 1e-15], [F0, F0], color= colors[j]))
if PLOT:
for i in range(2):
subplot(2,1,i+1)
legend(lines, solvers)
subplots_adjust(bottom=0.2, hspace=0.3)
xl = ['Solver f_opt MaxConstr Time CPUTime fEvals cEvals hEvals']
for i in range(len(results)):
s=(ljust(lower(solvers[i]), 40-len(solvers[i]))+'%0.3f'% (results[solvers[i]][0]) + ' %0.1e' % (results[solvers[i]][1]) + (' %0.2f'% (results[solvers[i]][2])) + ' %0.2f '% (results[solvers[i]][3]) + str(results[solvers[i]][4]) + ' ' + rjust(str(results[solvers[i]][5]), 5) + ' '*8 +str(results[solvers[i]][6]))
xl.append(s)
xl = '\n'.join(xl)
subplot(2,1,1)
xlabel(Xlabel)
from pylab import *
subplot(2,1,2)
xlabel(xl)
show()
openopt-0.38+svn1589/openopt/examples/nlp_ALGENCAN.py 0000664 0000000 0000000 00000007752 11757757505 0022214 0 ustar 00root root 0000000 0000000 from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array
N = 50
# objfunc:
# (x0-1)^4 + (x2-1)^4 + ... +(x49-1)^4 -> min (N=nVars=50)
f = lambda x : ((x-1)**4).sum()
x0 = cos(arange(N))
p = NLP(f, x0, maxIter = 1e3, maxFunEvals = 1e5)
# f(x) gradient (optional):
p.df = lambda x: 4*(x-1)**3
# lb<= x <= ub:
# x4 <= -2.5
# 3.5 <= x5 <= 4.5
# all other: lb = -5, ub = +15
p.lb = -5*ones(N)
p.ub = 15*ones(N)
p.ub[4] = -2.5
p.lb[5], p.ub[5] = 3.5, 4.5
# Ax <= b
# x0+...+xN>= 1.1*N
# x9 + x19 <= 1.5
# x10+x11 >= 1.6
p.A = zeros((3, N))
p.A[0, 9] = 1
p.A[0, 19] = 1
p.A[1, 10:12] = -1
p.A[2] = -ones(N)
p.b = [1.5, -1.6, -1.1*N]
# you can use any types of A, Aeq, b, beq:
# Python list, numpy.array, numpy.matrix, even Python touple
# so p.b = array([1.5, -1.6, -825]) or p.b = (1.5, -1.6, -825) are valid as well
# Aeq x = beq
# x20+x21 = 2.5
p.Aeq = zeros(N)
p.Aeq[20:22] = 1
p.beq = 2.5
# non-linear inequality constraints c(x) <= 0
# 2*x0^4 <= 1/32
# x1^2+x2^2 <= 1/8
# x25^2 +x25*x35 + x35^2<= 2.5
p.c = lambda x: [2* x[0] **4-1./32, x[1]**2+x[2]**2 - 1./8, x[25]**2 + x[35]**2 + x[25]*x[35] -2.5]
# other valid c:
# p.c = [lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)]
# p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x))
# p.c = lambda x: numpy.array(c1(x), c2(x), c3(x))
# def c(x):
# return c1(x), c2(x), c3(x)
# p.c = c
# dc(x)/dx: non-lin ineq constraints gradients (optional):
def DC(x):
r = zeros((3, N))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2]
r[2,25] = 2*x[25] + x[35]
r[2,35] = 2*x[35] + x[25]
return r
p.dc = DC
# non-linear equality constraints h(x) = 0
# 1e6*(x[last]-1)**4 = 0
# (x[last-1]-1.5)**4 = 0
p.h = lambda x: (1e4*(x[-1]-1)**4, (x[-2]-1.5)**4)
# dh(x)/dx: non-lin eq constraints gradients (optional):
def DH(x):
r = zeros((2, p.n))
r[0, -1] = 1e4*4 * (x[-1]-1)**3
r[1, -2] = 4 * (x[-2]-1.5)**3
return r
p.dh = DH
p.contol = 1e-3 # required constraints tolerance, default for NLP is 1e-6
# for ALGENCAN solver gtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gtol = 1e-5 # gradient stop criterium (default for NLP is 1e-6)
# see also: help(NLP) -> maxTime, maxCPUTime, ftol and xtol
# that are connected to / used in lincher and some other solvers
# optional: check of user-supplied derivatives
p.checkdf()
p.checkdc()
p.checkdh()
# last but not least:
# please don't forget,
# Python indexing starts from ZERO!!
p.plot = 0
p.iprint = 0
p.df_iter = 4
p.maxTime = 4000
p.debug=1
#r = p.solve('algencan')
r = p.solve('ralg')
#r = p.solve('lincher')
"""
typical output:
OpenOpt checks user-supplied gradient df (size: (50,))
according to:
prob.diffInt = 1e-07
prob.check.maxViolation = 1e-05
max(abs(df_user - df_numerical)) = 2.50111104094e-06
(is registered in df number 41)
sum(abs(df_user - df_numerical)) = 4.45203815948e-05
========================
OpenOpt checks user-supplied gradient dc (size: (50, 3))
according to:
prob.diffInt = 1e-07
prob.check.maxViolation = 1e-05
max(abs(dc_user - dc_numerical)) = 1.20371180401e-06
(is registered in dc number 0)
sum(abs(dc_user - dc_numerical)) = 1.60141862837e-06
========================
OpenOpt checks user-supplied gradient dh (size: (50, 2))
according to:
prob.diffInt = 1e-07
prob.check.maxViolation = 1e-05
dh num i,j:dh[i]/dx[j] user-supplied numerical difference
98 49 / 0 -1.369e+04 -1.369e+04 -2.941e-03
max(abs(dh_user - dh_numerical)) = 0.00294061290697
(is registered in dh number 98)
sum(abs(dh_user - dh_numerical)) = 0.00294343472179
========================
starting solver ALGENCAN (GPL license) with problem unnamed
solver ALGENCAN has finished solving the problem unnamed
istop: 1000
Solver: Time elapsed = 0.34 CPU Time Elapsed = 0.34
objFunValue: 190.041570332 (feasible, max constraint = 0.000677961)
"""
openopt-0.38+svn1589/openopt/examples/nlp_bench_1.py 0000664 0000000 0000000 00000006475 11757757505 0022344 0 ustar 00root root 0000000 0000000 from openopt import *
from numpy import cos, arange, ones, asarray, abs, zeros, sqrt, asscalar, inf
from pylab import legend, show, plot, subplot, xlabel, subplots_adjust
from string import rjust, ljust, expandtabs, center, lower
N = 10
M = 5
Power = 1.13
ff = lambda x: (abs(x-M) ** Power).sum()
x0 = cos(arange(N))
c = [lambda x: 2* x[0] **4-32, lambda x: x[1]**2+x[2]**2 - 8]
#c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8000]
h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
h = (h1, h2)
lb = -6*ones(N)
ub = 6*ones(N)
lb[3] = 5.5
ub[4] = 4.5
gtol=1e-6
ftol = 1e-6
diffInt = 1e-8
contol = 1e-6
xtol = 1e-9
maxFunEvals = 1e6
maxTime = 1000
Xlabel = 'time'
PLOT = 1
colors = ['k', 'r', 'b', 'g', 'r', 'm', 'c']
###############################################################
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt','algencan']
#solvers = ['ralg', 'ipopt']
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt']
#solvers = ['ralg', 'scipy_slsqp', 'scipy_cobyla', 'algencan']
#solvers = ['ipopt','ralg', 'algencan']
#solvers = ['ralg', 'scipy_cobyla']
#solvers = ['ralg']
solvers = ['gsubg', 'ralg', 'scipy_cobyla']
solvers = ['gsubg', 'ipopt']
#solvers = ['gsubg', 'ipopt', 'scipy_cg']
#solvers = ['ipopt']*3
#solvers = ['ipopt']
solvers = [oosolver('gsubg', dilation = False)]
#lb = [-1]*N
###############################################################
lines, results = [], {}
legends = solvers
h = None
for j, solver in enumerate(solvers):
p = NLP(ff, x0, xlabel = Xlabel, c=c, h=h, lb = lb, ub = ub, gtol=gtol, xtol = xtol, diffInt = diffInt, ftol = ftol, fTol = 1e-1,
maxIter = 1390, plot = PLOT, color = colors[j], iprint = 1, df_iter = 4, legend = solver, show=False,
contol = contol, maxTime = maxTime, maxFunEvals = maxFunEvals, name='NLP_bench_1')
p.legend = legends[j]
if solver =='algencan':
p.gtol = 1e-2
elif solver == 'ralg':
pass
#p.debug = 1
r = p.solve(solver)
from numpy import *
xx = array([ 1.99999982, 2.11725165, 1.87543228, 5.00000823, 4.50000036,
5.00000278, 5.00001633, 5.00000858, 1.5299053 , 1.01681614])
for fn in ('h','c'):
if not r.evals.has_key(fn): r.evals[fn]=0 # if no c or h are used in problem
results[solver] = (r.ff, p.getMaxResidual(r.xf), r.elapsed['solver_time'], r.elapsed['solver_cputime'], r.evals['f'], r.evals['c'], r.evals['h'])
if PLOT:
subplot(2,1,1)
F0 = asscalar(p.f(p.x0))
lines.append(plot([0, 1e-15], [F0, F0], color= colors[j]))
if PLOT:
for i in range(2):
subplot(2,1,i+1)
legend(lines, legends)
subplots_adjust(bottom=0.2, hspace=0.3)
xl = ['Solver f_opt MaxConstr Time CPUTime fEvals cEvals hEvals']
for i in range(len(results)):
s=(ljust(lower(solvers[i]), 40-len(solvers[i]))+'%0.3f'% (results[solvers[i]][0]) + ' %0.1e' % (results[solvers[i]][1]) + (' %0.2f'% (results[solvers[i]][2])) + ' %0.2f '% (results[solvers[i]][3]) + str(results[solvers[i]][4]) + ' ' + rjust(str(results[solvers[i]][5]), 5) + ' '*8 +str(results[solvers[i]][6]))
xl.append(s)
xl = '\n'.join(xl)
subplot(2,1,1)
xlabel(Xlabel)
from pylab import *
subplot(2,1,2)
xlabel(xl)
show()
openopt-0.38+svn1589/openopt/examples/nlp_bench_2.py 0000664 0000000 0000000 00000006642 11757757505 0022341 0 ustar 00root root 0000000 0000000 from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros, sqrt, sign, asscalar
from pylab import legend, show, plot, subplot, xlabel, subplots_adjust
from string import rjust, ljust, expandtabs, center, lower
from scipy import rand
N = 10
M = 5
s = 1.3
f = lambda x: (abs(x-M) ** s).sum()
df = lambda x: s * sign(x-M) * abs(x-M) ** (s-1)
x0 = cos(arange(N)) #+ rand(N)
c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
def dc(x):
r = zeros((len(c(x0)), p.n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2]
return r
K = 1e2
h1 = lambda x: K*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
h3 = lambda x: (x[-5]+x[-6]-2*M+1.5)**6
h = lambda x: (h1(x), h2(x), h3(x))
def dh(x):
r = zeros((3, N))
r[0, -1] = 4 * K * (x[-1]-1)**3
r[1, -2] = 4 * (x[-2]-1.5)**3
r[2, -5] = 6 * (x[-5]+x[-6]-2*M+1.5)**5
r[2, -6] = 6 * (x[-5]+x[-6]-2*M+1.5)**5
return r
lb = -6*ones(N)
ub = 6*ones(N)
lb[3] = 5.5
ub[4] = 4.5
gtol=1e-1
ftol = 1e-6
xtol = 1e-6
diffInt = 1e-8
contol = 1e-6
maxTime = 10
maxIter = 13700
colors = ['b', 'k', 'y', 'g', 'r']
########################################
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp']
#solvers = ['scipy_cobyla', 'ralg']
#solvers = ['lincher', 'scipy_cobyla']
solvers = [ 'ralg','scipy_cobyla']
#solvers = [ 'algencan','algencan2']
#solvers = [ 'ralg', 'ralg3']
########################################
colors = colors[:len(solvers)]
lines, results = [], {}
for j in range(len(solvers)):
solver = solvers[j]
color = colors[j]
p = NLP(f, x0, name = 'bench2', df = df, c=c, dc = dc, h=h, dh = dh, lb = lb, ub = ub, gtol=gtol, ftol = ftol, maxFunEvals = 1e7, maxIter = maxIter, maxTime = maxTime, plot = 1, color = color, iprint = 10, legend = [solvers[j]], show=False, contol = contol)
# p = NLP(f, x0, name = 'bench2', df = df, c=c, dc = dc, lb = lb, ub = ub, gtol=gtol, ftol = ftol, maxFunEvals = 1e7, maxIter = 1e4, maxTime = maxTime, plot = 1, color = color, iprint = 0, legend = [solvers[j]], show=False, contol = contol)
if solver[:4] == ['ralg']:
pass
# p.gtol = 1e-8
# p.ftol = 1e-7
# p.xtol = 1e-7
elif solver == 'lincher':
#p.iprint = 1
p.maxTime = 1e15
p.maxIter = 100
## p.check.df = 1
## p.check.dc = 1
## p.check.dh = 1
r = p.solve(solver)
for fn in ('h','c'):
if not r.evals.has_key(fn): r.evals[fn]=0 # if no c or h are used in problem
results[solver] = (r.ff, p.getMaxResidual(r.xf), r.elapsed['solver_time'], r.elapsed['solver_cputime'], r.evals['f'], r.evals['c'], r.evals['h'])
subplot(2,1,1)
F0 = asscalar(p.f(p.x0))
lines.append(plot([0, 1e-15], [F0, F0], color= colors[j]))
for i in range(2):
subplot(2,1,i+1)
legend(lines, solvers)
subplots_adjust(bottom=0.2, hspace=0.3)
xl = ['Solver f_opt MaxConstr Time CPUTime fEvals cEvals hEvals']
for i in range(len(results)):
s=(ljust(lower(solvers[i]), 40-len(solvers[i]))+'%0.3f'% (results[solvers[i]][0]) + ' %0.1e' % (results[solvers[i]][1]) + (' %0.2f'% (results[solvers[i]][2])) + ' %0.2f '% (results[solvers[i]][3]) + str(results[solvers[i]][4]) + ' ' + rjust(str(results[solvers[i]][5]), 5) + ' '*5 +str(results[solvers[i]][6]))
xl.append(s)
xl = '\n'.join(xl)
subplot(2,1,1)
xlabel('Time elapsed (without graphic output), sec')
from pylab import *
subplot(2,1,2)
xlabel(xl)
show()
openopt-0.38+svn1589/openopt/examples/nlp_d2f.py 0000664 0000000 0000000 00000001466 11757757505 0021513 0 ustar 00root root 0000000 0000000 """
this is an example of using d2f - Hesse matrix (2nd derivatives)
d2c, d2h, d2l are intended to be implemented soon
and to be connected to ALGENCAN and/or CVXOPT
and/or other NLP solvers
//Dmitrey
"""
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros, diag
N = 300
M = 5
ff = lambda x: ((x-M)**4).sum()
p = NLP(ff, cos(arange(N)))
p.df = lambda x: 4*(x-M)**3
p.d2f = lambda x: diag(12*(x-M)**2)
# other valid assignment:
# p = NLP(lambda x: ((x-M)**4).sum(), cos(arange(N)), df = lambda x: 4*(x-M)**3, d2f = lambda x: diag(12*(x-M)**2))
# or
# p = NLP(x0 = cos(arange(N)), f = lambda x: ((x-M)**4).sum(), df = lambda x: 4*(x-M)**3, d2f = lambda x: diag(12*(x-M)**2))
r = p.solve('scipy_ncg')
print('objfunc val: %e' % r.ff) # it should be a small positive like 5.23656378549e-08
openopt-0.38+svn1589/openopt/examples/nlsp_1.py 0000664 0000000 0000000 00000002341 11757757505 0021354 0 ustar 00root root 0000000 0000000 """
Solving system of equations:
x[0]**3+x[1]**3-9 = 0
x[0]-0.5*x[1] = 0
cos(x[2])+x[0]-1.5 = 0
"""
from openopt import SNLE
from numpy import asfarray, zeros, cos, sin
#f = lambda x: (x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5)
# or:
f = (lambda x: x[0]**3+x[1]**3-9, lambda x: x[0]-0.5*x[1], lambda x: cos(x[2])+x[0]-1.5)
# Python list, numpy.array are allowed as well:
#f = lambda x: [x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5]
#or f = lambda x: asfarray((x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5))
# start point
x0 = [8,15, 80]
#optional: gradient
df = (lambda x: [3*x[0]**2, 3*x[1]**2, 0], lambda x: [1, -0.5, 0], lambda x:[1, 0, -sin(x[2])])
#w/o gradient:
#p = SNLE(f, x0)
p = SNLE(f, x0, df = df)
#optional: user-supplied gradient check:
#p.checkdf()
#optional: graphical output, requires matplotlib installed
p.plot = 1
#r = p.solve('scipy_fsolve')
r = p.solve('nssolve')
#or using converter to nlp, try to minimize sum(f_i(x)^2):
#r = p.solve('nlp:ralg')
print('solution: %s' % r.xf)
print('max residual: %e' % r.ff)
###############################
#should print:
#solution: [ 1. 2. 55.50147021] (3rd coord may differ due to cos is periodic)
#max residual: 2.72366951215e-09
openopt-0.38+svn1589/openopt/examples/nlsp_constrained.py 0000664 0000000 0000000 00000003673 11757757505 0023536 0 ustar 00root root 0000000 0000000 """
Solving system of equations:
x[0]**3+x[1]**3-9 = 0
x[0]-0.5*x[1] = 0
cos(x[2])+x[0]-1.5 = 0
with some constraints:
150 <= x[2] <= 158
and possible non-linear constraint:
(x[2] - 150.8)**2 <= 1.5
Note:
1. Using Ax <= b constraints is also allowed
2. You can try using equality constraints (h(x)=0, Aeq x = beq) as well.
3. Required function tolerance is p.ftol, constraints tolerance is p.contol,
and hence using h(x)=0 constraints is not 100% same
to some additional f coords
"""
from openopt import SNLE
from numpy import *
# you can define f in several ways:
f = lambda x: (x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5)
#f = (lambda x: x[0]**3+x[1]**3-9, lambda x: x[0]-0.5*x[1], lambda x: cos(x[2])+x[0]-1.5)
# Python list, numpy.array are allowed as well:
#f = lambda x: [x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5]
#or f = lambda x: asfarray((x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5))
#optional: gradient
def df(x):
df = zeros((3,3))
df[0,0] = 3*x[0]**2
df[0,1] = 3*x[1]**2
df[1,0] = 1
df[1,1] = -0.5
df[2,0] = 1
df[2,2] = -sin(x[2])
return df
x0 = [8,15, 80]
#w/o gradient:
#p = SNLE(f, x0)
p = SNLE(f, x0, df = df, maxFunEvals = 1e5, iprint = 10, plot=1, ftol = 1e-8, contol=1e-15)
#optional: user-supplied gradient check:
#p.checkdf()
#optional: graphical output, requires matplotlib installed
#p.plot = 1
#set some constraints
p.lb, p.ub = [-inf]*3, [inf]*3
p.lb[2], p.ub[2] = 145, 150
# you could try also comment/uncomment nonlinear constraints:
p.c = lambda x: (x[2] - 146)**2-1.5
# optional: gradient
p.dc = lambda x: asfarray((0, 0, 2*(x[2]-146)))
# also you could set it via p=SNLE(f, x0, ..., c = c, dc = dc)
#optional: user-supplied dc check:
#p.checkdc()
#r = p.solve('nssolve', debug=0, maxIter=1e9)
# using converter to nlp, try to minimize sum(f_i(x)^2):
r = p.solve('nlp:ralg', xlabel='iter', iprint=10, plot=1)
print('solution: %s' % r.xf)
print('max residual: %e' % r.ff)
openopt-0.38+svn1589/openopt/examples/nsp_1.py 0000664 0000000 0000000 00000001625 11757757505 0021204 0 ustar 00root root 0000000 0000000 """
Example:
Solving nonsmooth problem
|x1| + 1.2|x2| + 1.44|x3| + ... + 1.2^N |xN| -> min
N=75
x0 = [cos(1), cos(2), ..., cos(N)]
x_opt = all-zeros
f_opt = 0
"""
from numpy import *
from openopt import NSP
N = 75
objFun = lambda x: sum(1.2 ** arange(len(x)) * abs(x))
x0 = cos(1+asfarray(range(N)))
p = NSP(objFun, x0, maxFunEvals = 1e7, xtol = 1e-8)
#These assignments are also correct:
#p = NLP(objFun, x0=x0)
#p = NLP(f=objFun, x0=x0)
#p = NLP(ftol = 1e-5, contol = 1e-5, f=objFun, x0=x0)
p.maxIter = 5000
#optional (requires matplotlib installed)
#p.plot = 1
#p.graphics.xlabel = 'cputime'#default: time, case-unsensetive; also here maybe 'cputime', 'niter'
#OPTIONAL: user-supplied gradient/subgradient
p.df = lambda x: 1.2 ** arange(len(x)) * sign(x)
r = p.solve('ralg') # ralg is name of a solver
print('x_opt: %s' % r.xf)
print('f_opt: %f' % r.ff) # should print small positive number like 0.00056
openopt-0.38+svn1589/openopt/examples/nssolveVSfsolve_1.py 0000664 0000000 0000000 00000011322 11757757505 0023560 0 ustar 00root root 0000000 0000000 """
Solving system of equations:
x[0]**3 + x[1]**3 - 9 = 0
x[0] - 0.5*x[1] - 0.15*x[2]= 0
sinh(x[2]) + x[0] - 15 = 0
!! with numerical noise 1e-8 !!
Note that both fsolve and nssolve
get same gradient -
if no user-supplied one is available,
then same OO finite-difference one
is used (according to p.diffInt value)
If you have matplotlib installed,
you'll get a figure.
Typical fsolve fails number
(for scipy 0.6.0)
is ~ 10-15%
This test runs ~ a minute on my AMD 3800+
"""
noise = 1e-8
from openopt import SNLE
from numpy import asfarray, zeros, cos, sin, arange, cosh, sinh, log10, ceil, floor, arange, inf, nanmin, nanmax
from time import time
from scipy import rand
x0 = [8, 15, 0.80]
global count1, count2, count3
count1 = count2 = count3 = 0
def Count1():
global count1; count1 +=1; return 0
def Count2():
global count2; count2 +=1; return 0
def Count3():
global count3; count3 +=1; return 0
# ATTENTION!
# when no user-supplied gradient is available
# nssolve can take benefites from splitting funcs
# so using f=[fun1, fun2, fun3, ...](or f=(fun1, fun2, fun3, ...))
# (where each fun is separate equation)
# is more appriciated than
# f = lambda x: (...)
# or def f(x): (...)
f_without_noise = \
[lambda x: x[0]**2+x[1]**2-9, lambda x: x[0]-0.5*x[1] - 0.15*x[2], lambda x: sinh(x[2])+x[0]-15]
def fvn(x):
r = -inf
for f in f_without_noise: r = max(r, abs(f(x)))
return r
f = [lambda x: x[0]**2+x[1]**2-9 + noise*rand(1)+Count1(), lambda x: x[0]-0.5*x[1] - 0.15*x[2] + noise*rand(1)+Count2(), \
lambda x: sinh(x[2])+x[0]-15 + noise*rand(1)+Count3()]# + (2007 * x[3:]**2).tolist()
#optional: gradient
##def DF(x):
## df = zeros((3,3))
## df[0,0] = 3*x[0]**2
## df[0,1] = 3*x[1]**2
## df[1,0] = 1
## df[1,1] = -0.5
## df[1,2] = -0.15
## df[2,0] = 1
## df[2,2] = cosh(x[2])
## return df
N = 100
desired_ftol = 1e-6
assert desired_ftol - noise*len(x0) > 1e-7
#w/o gradient:
scipy_fsolve_failed, fs = 0, []
print '----------------------------------'
print 'desired ftol:', desired_ftol, 'objFunc noise:', noise
############################################################################
print '---------- fsolve fails ----------'
t = time()
print 'N log10(MaxResidual) MaxResidual'
for i in xrange(N):
p = SNLE(f, x0, ftol = desired_ftol - noise*len(x0), iprint = -1, maxFunEvals = int(1e7))
r = p.solve('scipy_fsolve')
v = fvn(r.xf)
fs.append(log10(v))
if v > desired_ftol:
scipy_fsolve_failed += 1
print i+1, ' %0.2f ' % log10(v), v
else:
print i+1, 'OK'
print 'fsolve time elapsed', time()-t
#print 'fsolve_failed number:', scipy_fsolve_failed , '(from', N, '),', 100.0*scipy_fsolve_failed / N, '%'
print 'counters:', count1, count2, count3
############################################################################
count1 = count2 = count3 = 0
t = time()
print '---------- nssolve fails ---------'
nssolve_failed, ns = 0, []
print 'N log10(MaxResidual) MaxResidual'
for i in xrange(N):
p = SNLE(f, x0, ftol = desired_ftol - noise*len(x0), iprint = -1, maxFunEvals = int(1e7))
r = p.solve('nssolve')
#r = p.solve('nlp:amsg2p')
#r = p.solve('nlp:ralg')
v = fvn(r.xf)
ns.append(log10(v))
if v > desired_ftol:
nssolve_failed += 1
print i+1, ' %0.2f ' % log10(v), v
else:
print i+1, 'OK'
print 'nssolve time elapsed', time()-t
print 'nssolve_failed number:', nssolve_failed , '(from', N, '),', 100.0 * nssolve_failed / N, '%'
print 'counters:', count1, count2, count3
############################################################################
print '------------ SUMMARY -------------'
print 'fsolve_failed number:', scipy_fsolve_failed , '(from', N, '),', 100.0*scipy_fsolve_failed / N, '%'
print 'nssolve_failed number:', nssolve_failed , '(from', N, '),', 100.0 * nssolve_failed / N, '%'
#try:
from pylab import *
subplot(2,1,1)
grid(1)
title('scipy.optimize fsolve fails to achive desired ftol: %0.1f%%' %(100.0*scipy_fsolve_failed / N))
xmin1, xmax1 = floor(nanmin(fs)), ceil(nanmax(fs))+1
hist(fs, arange(xmin1, xmax1))
#xlabel('log10(maxResidual)')
axvline(log10(desired_ftol), color='green', linewidth=3, ls='--')
[ymin1, ymax1] = ylim()
################
subplot(2,1,2)
grid(1)
title('openopt nssolve fails to achive desired ftol: %0.1f%%' % (100.0*nssolve_failed / N))
xmin2, xmax2 = floor(nanmin(ns)), ceil(nanmax(ns))+1
#hist(ns, 5)
hist(ns, arange(xmin2, xmax2))
xlabel('log10(maxResidual)')
axvline(log10(desired_ftol), color='green', linewidth=3, ls='--')
[ymin2, ymax2] = ylim()
################
xmin, xmax = min(xmin1, xmin2) - 0.1, max(xmax1, xmax2) + 0.1
ymin, ymax = 0, max(ymax1, ymax2) * 1.05
subplot(2,1,1)
xlim(xmin, xmax)
ylim(0, ymax)
subplot(2,1,2)
xlim(xmin, xmax)
show()
#except:
# pass
openopt-0.38+svn1589/openopt/examples/qcqp_1.py 0000664 0000000 0000000 00000002066 11757757505 0021350 0 ustar 00root root 0000000 0000000 """
Concider the MIQCQP problem
0.5 * (x1^2 + 2x2^2 + 3x3^2) + 15x1 + 8x2 + 80x3 -> min (1)
subjected to
x1 + 2x2 + 3x3 <= 150 (2)
8x1 + 15x2 + 80x3 <= 800 (3)
x2 - x3 = 25.5 (4)
x1 <= 15 (5)
x1^2 + 2.5 x2^2 + 3 x3^2 + 0.1 x1 + 0.2 x2 + 0.3 x3 - 1000 <= 0 (6)
2 x1^2 + x2^2 + 3 x3^2 + 0.1 x1 + 0.5 x2 + 0.3 x3 <= 1000 (7)
x1, x3 are integers
"""
from numpy import diag, matrix, inf
from openopt import QP
H = diag([1.0, 2.0,3.0])
f = [15,8,80]
A = matrix('1 2 3; 8 15 80')
b = [150, 800]
# QC should be list or tuple of triples (P, q, s): 0.5 x^T P x + q x + s <= 0
QC = ((diag([1.0, 2.5, 3.0]), [0.1, 0.2, 0.3], -1000), (diag([2.0, 1.0, 3.0]), [0.1, 0.5, 0.3], -1000))
p = QP(H, f, A = A, b = b, Aeq = [0, 1, -1], beq = 25.5, ub = [15,inf,inf], QC = QC, name='OpenOpt QCQP example 1')
# or p = QP(H=diag([1,2,3]), f=[15,8,80], ...)
r = p.solve('cplex', iprint = 0, plot=1)
f_opt, x_opt = r.ff, r.xf
# x_opt = array([ -2.99999999, 9.5 , -16. ])
# f_opt = -770.24999989134858
openopt-0.38+svn1589/openopt/examples/qp_1.py 0000664 0000000 0000000 00000001204 11757757505 0021015 0 ustar 00root root 0000000 0000000 """
Example:
Concider the problem
0.5 * (x1^2 + 2x2^2 + 3x3^2) + 15x1 + 8x2 + 80x3 -> min (1)
subjected to
x1 + 2x2 + 3x3 <= 150 (2)
8x1 + 15x2 + 80x3 <= 800 (3)
x2 - x3 = 25.5 (4)
x1 <= 15 (5)
"""
from numpy import diag, matrix, inf
from openopt import QP
p = QP(diag([1, 2, 3]), [15, 8, 80], A = matrix('1 2 3; 8 15 80'), b = [150, 800], Aeq = [0, 1, -1], beq = 25.5, ub = [15,inf,inf])
# or p = QP(H=diag([1,2,3]), f=[15,8,80], A= ...)
r = p._solve('cvxopt_qp', iprint = 0)
f_opt, x_opt = r.ff, r.xf
# x_opt = array([-15. , -2.5, -28. ])
# f_opt = -1190.25
openopt-0.38+svn1589/openopt/examples/sdp_1.py 0000664 0000000 0000000 00000001643 11757757505 0021172 0 ustar 00root root 0000000 0000000 """
This is OpenOpt SDP example,
for the problem
http://openopt.org/images/1/12/SDP.png
"""
from numpy import mat
from openopt import SDP
S, d = {}, {}
S[0, 0] = mat('-7 -11; -11 3') # numpy array, array-like, CVXOPT matrix are allowed as well
S[0, 1] = mat('7 -18; -18 8')
S[0, 2] = mat('-2 -8; -8 1')
d[0] = mat('33, -9; -9, 26')
S[1, 0] = mat('-21 -11 0; -11 10 8; 0 8 5')
S[1, 1] = mat('0 10 16; 10 -10 -10; 16 -10 3')
S[1, 2] = mat('-5 2 -17; 2 -6 8; -17 -7 6')
d[1] = mat('14, 9, 40; 9, 91, 10; 40, 10, 15')
p = SDP([1, -1, 1], S = S, d = d)
# Also you can use A, b, Aeq, beq for linear matrix (in)equality constraints
# and lb, ub for box-bound constraints lb <= x <= ub
# see /examples/lp_1.py
#r = p.solve('cvxopt_sdp', iprint = 0)
r = p.solve('dsdp', iprint = -1)
f_opt, x_opt = r.ff, r.xf
print('x_opt: %s' % x_opt)
print('f_opt: %s' % f_opt)
#x_opt: [-0.36766609 1.89832827 -0.88755043]
#f_opt: -3.15354478797
openopt-0.38+svn1589/openopt/examples/sle_1.py 0000664 0000000 0000000 00000000532 11757757505 0021163 0 ustar 00root root 0000000 0000000 __docformat__ = "restructuredtext en"
from numpy import *
from openopt import SLE
N = 1000
C = empty((N,N))
d = 1.5+80*sin(arange(N))
for j in xrange(N):
C[j] = 8*sin(4.0+arange(j, N+j)**2) + 15*cos(j)
p = SLE(C, d)
#r = p.solve('defaultSLEsolver'), or just
r = p.solve()
print('max residual: %e' % r.ff)
#print('solution: %s' % r.xf)
openopt-0.38+svn1589/openopt/examples/snle_1.py 0000775 0000000 0000000 00000002416 11757757505 0021347 0 ustar 00root root 0000000 0000000 """
Solving system of equations:
x[0]**3+x[1]**3-9 = 0
x[0]-0.5*x[1] = 0
cos(x[2])+x[0]-1.5 = 0
"""
from openopt import SNLE
from numpy import asfarray, zeros, cos, sin
#f = lambda x: (x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5)
# or:
f = (lambda x: x[0]**3+x[1]**3-9, lambda x: x[0]-0.5*x[1], lambda x: cos(x[2])+x[0]-1.5)
# Python list, numpy.array are allowed as well:
#f = lambda x: [x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5]
#or f = lambda x: asfarray((x[0]**3+x[1]**3-9, x[0]-0.5*x[1], cos(x[2])+x[0]-1.5))
# start point
x0 = [8,15, 80]
#optional: gradient
df = (lambda x: [3*x[0]**2, 3*x[1]**2, 0], lambda x: [1, -0.5, 0], lambda x:[1, 0, -sin(x[2])])
#w/o gradient:
#p = NLSP(f, x0)
p = SNLE(f, x0, df = df)
#optional: user-supplied gradient check:
#p.checkdf()
#optional: graphical output, requires matplotlib installed
p.plot = 1
p.maxFunEvals = 1e5
p.iprint = 10
#r = p.solve('scipy_fsolve')
#r = p.solve('nssolve')
#or using converter nlsp2nlp, try to minimize sum(f_i(x)^2):
r = p.solve('nlp:ralg', plot=1)
print('solution: %s' % r.xf)
print('max residual: %e' % r.ff)
###############################
#should print:
#solution: [ 1. 2. 55.50147021] (3rd coord may differ due to cos is periodic)
#max residual: 2.72366951215e-09
openopt-0.38+svn1589/openopt/examples/socp_1.py 0000664 0000000 0000000 00000001257 11757757505 0021351 0 ustar 00root root 0000000 0000000 """
OpenOpt SOCP example
for the problem http://openopt.org/images/2/28/SOCP.png
"""
from numpy import *
from openopt import SOCP
f = array([-2, 1, 5])
C0 = mat('-13 3 5; -12 12 -6')
d0 = [-3, -2]
q0 = array([-12, -6, 5])
s0 = -12
C1 = mat('-3 6 2; 1 9 2; -1 -19 3')
d1 = [0, 3, -42]
q1 = array([-3, 6, -10])
s1 = 27
p = SOCP(f, C=[C0, C1], d=[d0, d1], q=[q0, q1], s=[s0, s1])
# you could add lb <= x <= ub, Ax <= b, Aeq x = beq constraints
# via p = SOCP(f, ..., A=A, b=b, Aeq=Aeq, beq=beq,lb=lb, ub=ub)
r = p.solve('cvxopt_socp')
x_opt, f_opt = r.xf, r.ff
print(' f_opt: %f x_opt: %s' % (f_opt, x_opt))
# f_opt: -38.346368 x_opt: [-5.01428121 -5.76680444 -8.52162517]
openopt-0.38+svn1589/openopt/kernel/ 0000775 0000000 0000000 00000000000 11757757505 0017250 5 ustar 00root root 0000000 0000000 openopt-0.38+svn1589/openopt/kernel/DFP.py 0000664 0000000 0000000 00000006007 11757757505 0020236 0 ustar 00root root 0000000 0000000 from baseProblem import NonLinProblem
from ooMisc import assignScript
from numpy import sum, dot, asfarray, atleast_2d, array, zeros
import NLP
class DFP(NonLinProblem):
_optionalData = ['lb', 'ub', 'A', 'Aeq', 'b', 'beq', 'c', 'h']
probType = 'DFP'
expectedArgs = ['f', 'x0', 'X', 'Y']
allowedGoals = ['minimum', 'min']
goal = 'minimum'
showGoal = False
isObjFunValueASingleNumber = False
def _Prepare(self):
self.X = atleast_2d(self.X)
self.Y = array(self.Y, float)
if self.X.shape[0] != self.Y.shape[0]:
if self.X.shape[1] != self.Y.shape[0]:
self.err('incorrect shape of input data')
else:
self.X = self.X.T
NonLinProblem._Prepare(self)
#if self.Y.ndim
if self.userProvided.df:
assert len(self.user.df) == 1
self.dfff = self.user.df[0]
def dff(x):
r = zeros(self.n)
for i in range(self.Y.shape[0]):
#print asfarray(self.fff(x, self.X[i])-self.Y[i]), asfarray(self.dfff(x, self.X[i]))
r += dot(2.0 * asfarray(self.fff(x, self.X[i])-self.Y[i]), asfarray(self.dfff(x, self.X[i])))
return r
self.df = self.user.df = dff
def __finalize__(self):
NonLinProblem.__finalize__(self)
if self.userProvided.df:
self.df = self.dfff
self.f = self.fff
def __init__(self, *args, **kwargs):
NonLinProblem.__init__(self, *args, **kwargs)
assignScript(self, kwargs)
self.fff = self.f
def ff(x):
r = []
for i in range(self.Y.shape[0]):
r.append(asfarray(self.fff(x, self.X[i])-self.Y[i])**2)
return r
self.f = ff
def objFuncMultiple2Single(self, fv):
assert all(fv.flatten()>=0)
return fv.sum() # they are squared in other place
def dfp2nlp(self, solver, **solver_params):
ff = lambda x: (asfarray(self.f(x))).sum()
if self.userProvided.df:
#dff = lambda x: dot(2*asfarray(ff(x)), asfarray(self.df(x)))
p = NLP.NLP(ff, self.x0, df=self.df)
else:
p = NLP.NLP(ff, self.x0)
#p = NLP.NLP(lambda x: self.f(x), self.x0)
#if self.userProvided.df: p.df = dff
self.inspire(p, sameConstraints=True)
p.f = ff # to prefent overwriting
def dfp_iterfcn(*args, **kwargs):
self.iterfcn(*args, **kwargs)
if self.istop != 0: p.istop, p.msg = self.istop, self.msg
tmp_iterfcn(*args, **kwargs)
if p.istop != 0: self.istop, self.msg = p.istop, p.msg
p.iterfcn, tmp_iterfcn = dfp_iterfcn, p.iterfcn
#p.iprint = -1
self.iprint = -1
if self.plot:
self.plot, p.plot = 0, 1
p.show = self.show
#p.show = False
p.checkdf()
r = p.solve(solver, **solver_params)
#r.ff = ff(r.xf)
return r
openopt-0.38+svn1589/openopt/kernel/EIG.py 0000664 0000000 0000000 00000007174 11757757505 0020237 0 ustar 00root root 0000000 0000000 from baseProblem import MatrixProblem
#from numpy.linalg import norm
from numpy import vstack, isscalar
class EIG(MatrixProblem):
probType = 'EIG'
goal = 'all'
allowedGoals = None
showGoal = True
expectedArgs = ['C']
M = None
_optionalData = ['M']
xtol = 0.0
FuncDesignerSign = 'C'
N = 0
#ftol = None
def __init__(self, *args, **kwargs):
MatrixProblem.__init__(self, *args, **kwargs)
if self.goal == 'all':
Name, name = 'all eigenvectors and eigenvalues', 'all'
if not isinstance(self.C[0], dict):
self.N = self.C.shape[0]
else:
assert type(self.goal) in (dict, tuple, list) and len(self.goal) == 1, \
'EIG goal argument should be "all" or Python dict {goal_name: number_of_required_eigenvalues}'
if type(self.goal) == dict:
goal_name, N = list(self.goal.items())[0]
else:
goal_name, N = self.goal
self.N = N
name = ''.join(goal_name.lower().split())
if name in ('lm', 'largestmagnitude'):
Name, name = 'largest magnitude', 'le'
elif name in ('sm', 'smallestmagnitude'):
Name, name = 'smallest magnitude', 'sm'
elif name in ('lr', 'largestrealpart'):
Name, name = 'largest real part', 'lr'
elif name in ('sr', 'smallestrealpart'):
Name, name = 'smallest real part', 'sr'
elif name in ('li', 'largestimaginarypart'):
Name, name = 'largest imaginary part', 'li'
elif name in ('si', 'smallestimaginarypart'):
Name, name = 'smallest imaginary part', 'si'
elif name in ('la', 'largestamplitude'):
Name, name = 'largestamplitude', 'la'
elif name in ('sa', 'smallestamplitude'):
Name, name = 'smallest amplitude', 'sa'
elif name in ('be', 'bothendsofthespectrum'):
Name, name = 'both ends of the spectrum', 'be'
self.goal = Name
self._goal = name
#if not isinstance(self.C[0], dict):
def solve(self, *args, **kw):
C = self.C
if type(C) in (tuple, list) and isinstance(C[0], dict):
from FuncDesigner import ootranslator
K = set()
N = 0
varSizes = {}
for d in C:
K.update(d.keys())
for key in d.keys():
if key in varSizes:
if varSizes[key] != d[key].shape[1]:
s = 'incorrect shape 2nd coordinate %d for variable %s, defined in other array as %d' %(d[key].shape[1], key.name, varSizes[key])
self.err(s)
else:
varSizes[key] = d[key].shape[1] if not isscalar(d[key]) else 1
tmp = list(d.values())
N += tmp[0].shape[0] if not isscalar(tmp[0]) else 1
P = dict([(key, [0]*val) for key, val in varSizes.items()])
T = ootranslator(P)
C2 = vstack([T.pointDerivative2array(d) for d in C])
self.C = C2
if C2.shape != (N, N):
self.err('square matrix of shape (%d,%d) expected, shape %s obtained instead' % (N, N, C2.shape))
r = MatrixProblem.solve(self, *args, **kw)
if type(C) in (tuple, list) and isinstance(C[0], dict):
r.eigenvectors = [T.vector2point(v) for v in self.eigenvectors.T]
return r
def objFunc(self, x):
return 0
#raise 'unimplemented yet'
openopt-0.38+svn1589/openopt/kernel/GLP.py 0000664 0000000 0000000 00000003677 11757757505 0020261 0 ustar 00root root 0000000 0000000 from ooMisc import assignScript
from baseProblem import NonLinProblem
from numpy import asarray, ones, inf, array
from setDefaultIterFuncs import MAX_NON_SUCCESS
class GLP(NonLinProblem):
probType = 'GLP'
_optionalData = ['lb', 'ub', 'c', 'A', 'b']
expectedArgs = ['f', 'x0']
allowedGoals = ['minimum', 'min', 'maximum', 'max']
goal = 'minimum'
showGoal = False
isObjFunValueASingleNumber = True
plotOnlyCurrentMinimum= True
_currentBestPoint = None
_nonSuccessCounter = 0
maxNonSuccess = 15
def __init__(self, *args, **kwargs):
#if len(args) > 1: self.err('incorrect args number for GLP constructor, must be 0..1 + (optionaly) some kwargs')
NonLinProblem.__init__(self, *args, **kwargs)
def maxNonSuccess(p):
newPoint = p.point(p.xk)
if self._currentBestPoint is None:
self._currentBestPoint = newPoint
return False
elif newPoint.betterThan(self._currentBestPoint):
self._currentBestPoint = newPoint
self._nonSuccessCounter = 0
return False
self._nonSuccessCounter += 1
if self._nonSuccessCounter > self.maxNonSuccess:
return (True, 'Non-Success Number > maxNonSuccess = ' + str(self.maxNonSuccess))
else:
return False
self.kernelIterFuncs[MAX_NON_SUCCESS] = maxNonSuccess
if 'lb' in kwargs.keys():
self.n = len(kwargs['lb'])
elif 'ub' in kwargs.keys():
self.n = len(kwargs['ub'])
elif 'b' in kwargs.keys():
self.n = asarray(b).size
if hasattr(self, 'n'):
if not hasattr(self, 'lb'):
self.lb = -inf * ones(self.n)
if not hasattr(self, 'ub'):
self.ub = inf * ones(self.n)
if 'x0' not in kwargs.keys(): self.x0 = (asarray(self.lb) + asarray(self.ub)) / 2.0
openopt-0.38+svn1589/openopt/kernel/GUI.py 0000664 0000000 0000000 00000012501 11757757505 0020245 0 ustar 00root root 0000000 0000000 # sometimes Tkinter is not installed
TkinterIsInstalled = True
import platform
if platform.python_version()[0] == '2':
# Python2
try:
from Tkinter import Tk, Toplevel, Button, Entry, Menubutton, Label, Frame, StringVar, DISABLED, ACTIVE
except:
TkinterIsInstalled = False
else:
# Python3
try:
from tkinter import Tk, Toplevel, Button, Entry, Menubutton, Label, Frame, StringVar, DISABLED, ACTIVE
except:
TkinterIsInstalled = False
from threading import Thread
from openopt import __version__ as ooversion
from setDefaultIterFuncs import BUTTON_ENOUGH_HAS_BEEN_PRESSED, USER_DEMAND_EXIT
from ooMisc import killThread
from runProbSolver import finalShow
def manage(p, *args, **kwargs):
p.isManagerUsed = True
if not TkinterIsInstalled: p.err('Tkinter is not installed. If you have Linux you could try using "apt-get install python-tk"')
# expected args are (solver, start) or (start, solver) or one of them
p._args = args
p._kwargs = kwargs
for arg in args:
if type(arg) == str or hasattr(arg, '__name__'): p.solver = arg
elif arg in (0, 1, True, False): start = arg
else: p.err('Incorrect argument for manage()')
start = kwargs.pop('start', True)
if 'solver' in kwargs.keys(): p.solver = kwargs['solver']
# root
root = Tk()
p.GUI_root = root
# Title
#root.wm_title('OpenOpt ' + ooversion)
p.GUI_buttons = {}
""" Buttons """
# OpenOpt label
Frame(root).pack(ipady=4)
Label(root, text=' OpenOpt ' + ooversion + ' ').pack()
Label(root, text=' Solver: ' + (p.solver if isinstance(p.solver, str) else p.solver.__name__) + ' ').pack()
Label(root, text=' Problem: ' + p.name + ' ').pack()
#TODO: use Menubutton
#Statistics
# stat = StringVar()
# stat.set('')
# Statistics = Button(root, textvariable = stat, command = lambda: invokeStatistics(p))
# cw = Entry(root)
#
#
# b = Button(root, text = 'Evaluate!', command = lambda: invokeCommand(cw))
# cw.pack(fill='x', side='left')
# b.pack(side='right')
# Run
t = StringVar()
t.set(" Run ")
RunPause = Button(root, textvariable = t, command = lambda: invokeRunPause(p))
Frame(root).pack(ipady=8)
RunPause.pack(ipady=15)
p.GUI_buttons['RunPause'] = RunPause
p.statusTextVariable = t
# Enough
def invokeEnough():
p.userStop = True
p.istop = BUTTON_ENOUGH_HAS_BEEN_PRESSED
if hasattr(p, 'stopdict'): p.stopdict[BUTTON_ENOUGH_HAS_BEEN_PRESSED] = True
p.msg = 'button Enough has been pressed'
if p.state == 'paused':
invokeRunPause(p, isEnough=True)
else:
RunPause.config(state=DISABLED)
Enough.config(state=DISABLED)
Frame(root).pack(ipady=8)
Enough = Button(root, text = ' Enough! ', command = invokeEnough)
Enough.config(state=DISABLED)
Enough.pack()
p.GUI_buttons['Enough'] = Enough
# Exit
def invokeExit():
p.userStop = True
p.istop = USER_DEMAND_EXIT
if hasattr(p, 'stopdict'): p.stopdict[USER_DEMAND_EXIT] = True
# however, the message is currently unused
# since openopt return r = None
p.msg = 'user pressed Exit button'
root.destroy()
Frame(root).pack(ipady=8)
Button(root, text=" Exit ", command = invokeExit).pack(ipady=15)
""" Start main loop """
state = 'paused'
if start:
Thread(target=invokeRunPause, args=(p, )).start()
root.mainloop()
#finalShow(p)
""" Handle result """
if hasattr(p, 'tmp_result'):
r = p.tmp_result
delattr(p, 'tmp_result')
else:
r = None
""" Return """
return r
def invokeRunPause(p, isEnough=False):
try:
import pylab
except:
if p.plot:
p.warn('to use graphics you should have matplotlib installed')
p.plot = False
if isEnough:
p.GUI_buttons['RunPause'].config(state=DISABLED)
if p.state == 'init':
p.probThread = Thread(target=doCalculations, args=(p, ))
p.state = 'running'
p.statusTextVariable.set(' Pause ')
p.GUI_buttons['Enough'].config(state=ACTIVE)
p.GUI_root.update_idletasks()
p.probThread.start()
elif p.state == 'running':
p.state = 'paused'
if p.plot: pylab.ioff()
p.statusTextVariable.set(' Run ')
p.GUI_root.update_idletasks()
elif p.state == 'paused':
p.state = 'running'
if p.plot:
pylab.ion()
p.statusTextVariable.set(' Pause ')
p.GUI_root.update_idletasks()
def doCalculations(p):
try:
p.tmp_result = p.solve(*p._args, **p._kwargs)
except killThread:
if p.plot:
if hasattr(p, 'figure'):
#p.figure.canvas.draw_drawable = lambda: None
pylab.ioff()
pylab.close('all')
#def invokeStatistics(p):
def invokeCommand(cw):
exec(cw.get())
openopt-0.38+svn1589/openopt/kernel/IP.py 0000664 0000000 0000000 00000001532 11757757505 0020133 0 ustar 00root root 0000000 0000000 from baseProblem import NonLinProblem
#from numpy.linalg import norm
from numpy import inf
class IP(NonLinProblem):
probType = 'IP'
goal = 'solution'
allowedGoals = ['solution']
showGoal = False
_optionalData = []
expectedArgs = ['f', 'domain']
ftol = None
def __init__(self, *args, **kwargs):
NonLinProblem.__init__(self, *args, **kwargs)
domain = args[1]
self.x0 = dict([(v, 0.5*(val[0]+val[1])) for v, val in domain.items()])
self.constraints = [v>bounds[0] for v, bounds in domain.items()] + [v