pyentropy-0.4.1/0000775000175000017510000000000011671654457014674 5ustar robincerobince00000000000000pyentropy-0.4.1/README0000664000175000017510000000034611671637701015550 0ustar robincerobince00000000000000 This is pyEntropy, a Python library for calculation of bias corrected entropy and mutual information values. For more information see the project home page: http://code.google.com/p/pyentropy or contact: pyentropy@robince.net pyentropy-0.4.1/setup.py0000664000175000017510000000054711671654356016412 0ustar robincerobince00000000000000from distutils.core import setup import pyentropy setup(name='pyentropy', version=pyentropy.__version__, description='Entropy and Information Theoretic Estimates', author=pyentropy.__author__, author_email='pyentropy@robince.net', url='http://code.google.com/p/pyentropy', packages=['pyentropy','pyentropy.tests'] ) pyentropy-0.4.1/COPYING0000664000175000017510000004310411671637701015722 0ustar robincerobince00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. pyentropy-0.4.1/PKG-INFO0000664000175000017510000000040711671654457015772 0ustar robincerobince00000000000000Metadata-Version: 1.0 Name: pyentropy Version: 0.4.1 Summary: Entropy and Information Theoretic Estimates Home-page: http://code.google.com/p/pyentropy Author: Robin Ince Author-email: pyentropy@robince.net License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN pyentropy-0.4.1/pyentropy/0000775000175000017510000000000011671654457016745 5ustar robincerobince00000000000000pyentropy-0.4.1/pyentropy/tests/0000775000175000017510000000000011671654457020107 5ustar robincerobince00000000000000pyentropy-0.4.1/pyentropy/tests/test_utils.py0000664000175000017510000000525511671637701022660 0ustar robincerobince00000000000000# This file is part of pyEntropy # # pyEntropy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # pyEntropy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyEntropy. If not, see . # # Copyright 2009, 2010 Robin Ince import numpy as np from nose.tools import assert_raises from numpy.testing import * from pyentropy.utils import * def setup(): global x, x2, y, a1, b1, a2, b2 x = np.arange(3**3) x2 = np.atleast_2d(x).T y = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 2], [0, 0, 1, 0], [0, 0, 1, 1], [0, 0, 1, 2], [0, 0, 2, 0], [0, 0, 2, 1], [0, 0, 2, 2], [0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 0, 2], [0, 1, 1, 0], [0, 1, 1, 1], [0, 1, 1, 2], [0, 1, 2, 0], [0, 1, 2, 1], [0, 1, 2, 2], [0, 2, 0, 0], [0, 2, 0, 1], [0, 2, 0, 2], [0, 2, 1, 0], [0, 2, 1, 1], [0, 2, 1, 2], [0, 2, 2, 0], [0, 2, 2, 1], [0, 2, 2, 2]]) a1 = np.array([8, 9, 7, 9, 3, 3, 9, 7, 9, 2]) b1 = np.array([0, 0, 1, 2, 0, 0, 0, 2, 1, 4]) / 10.0 a2 = np.array([0, 1, 7, 1, 3, 3, 1, 7, 1, 2]) b2 = np.array([1, 4, 1, 2, 0, 0, 0, 2, 0, 0]) / 10.0 def teardown(): global x, x2, y, a1, b1, a2, b2 del x, x2, y, a1, b1, a2, b2 def test_dec2base_1d(): assert_equal(dec2base(x,3,4),y) def test_dec2base_2d(): assert_equal(dec2base(x2,3,4),y) def test_dec2base_noncol(): assert_raises(ValueError, dec2base, x2.T, 3, 4) def test_base2dec(): assert_equal(base2dec(y,3),x) def test_decimalise(): assert_equal(decimalise(y.T,4,3),x) def test_decimalise_error(): assert_raises(ValueError, decimalise, y, 3, 4) def test_prob_naive(): assert_equal(prob(a1,10), b1) def test_prob_naive_missed_responses(): assert_equal(prob(a2,10), b2) def test_pt_bayescount(): # values match original bayescount.m file for n,r in [(100000, 5.0), (50, 5.0), (30, 6.0), (12, 7.0), (10, 8.0), (8, 9.0), (7, 10.0)]: yield check_pt_bayes, n, r def check_pt_bayes(n, r): assert_equal(pt_bayescount(b1,n),r) if __name__ == '__main__': run_module_suite() pyentropy-0.4.1/pyentropy/tests/test_maxent.py0000664000175000017510000000365111671637701023012 0ustar robincerobince00000000000000# This file is part of pyEntropy # # pyEntropy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # pyEntropy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyEntropy. If not, see . # # Copyright 2009, 2010 Robin Ince import numpy as np from numpy.testing import * from pyentropy.maxent import * def setup(): global a, a_loaded, p # remove cached file try: os.remove(os.path.join(get_data_dir(),'a_n%im%i.mat'%(3,4))) except OSError: pass # create from scratch # need to check both created from scratch and loaded # to catch any problems with savemat/loadmat round trip a = AmariSolve(3,4,confirm=False) # load a_loaded = AmariSolve(3,4) # a random distribution p = np.random.rand(64) p /= p.sum() def teardown(): global a, a_loaded, p del a, a_loaded, p def test_theta_roundtrip(): assert_array_almost_equal(p, a.p_from_theta(a.theta_from_p(p))) def test_theta_roundtrip_loaded(): assert_array_almost_equal(p, a_loaded.p_from_theta(a_loaded.theta_from_p(p))) # check first order marginals analytic # this shows numerical solution is accurate def test_first_order_solve(): p1a = a.solve(p, 1) p1d = order1direct(p, a) assert_array_almost_equal(p1a,p1d) def test_first_order_solve_loaded(): p1a = a.solve(p, 1) p1d = order1direct(p, a_loaded) assert_array_almost_equal(p1a,p1d) if __name__ == '__main__': run_module_suite() pyentropy-0.4.1/pyentropy/tests/__init__.py0000664000175000017510000000000011671637701022177 0ustar robincerobince00000000000000pyentropy-0.4.1/pyentropy/tests/test_systems.py0000664000175000017510000001215311671654356023227 0ustar robincerobince00000000000000# This file is part of pyEntropy # # pyEntropy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # pyEntropy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyEntropy. If not, see . # # Copyright 2009, 2010 Robin Ince import numpy as np from numpy.testing import * from nose.tools import with_setup from pyentropy import DiscreteSystem, SortedDiscreteSystem # TODO: test ChiXY1 HXY1 for binary data (Adelman Ispike) # TODO: test running more than once on an instance (to catch eg shuffling bug) # TODO: explicitly test shuffling? # TODO: test performance of bias corrections? # TODO: test decomposition helpers def setup(): global allcalc # all entropy values allcalc = ['HX','HY','HXY','SiHXi','HiX','HshX','HiXY', 'HshXY','ChiX'] # # test DiscreteSystem with simple 1D input, output # def setup_1d(): global x, y, HX, HXY, alltrue # simple channel which corrupts 50% randomly x = np.random.random_integers(0,9,100000) y = x.copy() indx = np.random.permutation(len(x))[:len(x)/2] y[indx] = np.random.random_integers(0,9,len(x)/2) # analytic results HX = np.log2(10) HXY = -9*0.05*np.log2(0.05) - 0.55*np.log2(0.55) # same order as allcalc alltrue = np.array([HX, HX, HXY, HX, HX, HX, HXY, HXY, HX]) def teardown_1d(): global x, y, HX, HXY, alltrue del x, y, HX, HXY, alltrue @with_setup(setup_1d, teardown_1d) def do_1d_check(method, qe_method): xc = x.copy() yc = y.copy() s = DiscreteSystem(xc,(1,10),yc,(1,10)) # calculate all entropies s.calculate_entropies(method=method, calc=allcalc, qe_method=qe_method) # check output assinged assert_(s.H == getattr(s,'H_%s'%method)) v = [] for k in allcalc: v.append(s.H[k]) assert_array_almost_equal(np.array(v), alltrue, decimal=2) # check didn't do something nasty to inputs assert_array_equal(x, xc) assert_array_equal(y, yc) def test_1d_plugin(): yield do_1d_check, 'plugin', None def test_1d_pt(): yield do_1d_check, 'pt', None def test_1d_qe(): yield do_1d_check, 'qe', 'plugin' def test_1d_qe_pt(): yield do_1d_check, 'qe', 'pt' # # test SortedDiscreteSystem with simple 1D input, output # def setup_1d_sorted(): global x, y, Ny setup_1d() # convert to sorted system format xs = np.zeros_like(x) Ny = np.zeros(10) start = 0 for i in range(10): oce = x[y==i] Ny[i] = len(oce) end = start + Ny[i] xs[start:end] = oce start = end x = xs def teardown_1d_sorted(): global x, y, Ny, HX, HXY, alltrue del x, y, Ny, HX, HXY, alltrue @with_setup(setup_1d_sorted, teardown_1d_sorted) def do_1d_check_sorted(method, qe_method): xc = x.copy() yc = y.copy() s = SortedDiscreteSystem(xc,(1,10),10, Ny) # calculate all entropies s.calculate_entropies(method=method, calc=allcalc, qe_method=qe_method) # check output assinged assert_(s.H == getattr(s,'H_%s'%method)) v = np.array([s.H[k] for k in allcalc]) assert_array_almost_equal(v, alltrue, decimal=2) # check didn't do something nasty to inputs assert_array_equal(x, xc) assert_array_equal(y, yc) def test_1d_plugin_sorted(): yield do_1d_check_sorted, 'plugin', None def test_1d_pt_sorted(): yield do_1d_check_sorted, 'pt', None def test_1d_qe_sorted(): yield do_1d_check_sorted, 'qe', 'plugin' def test_1d_qe_pt_sorted(): yield do_1d_check_sorted, 'qe', 'pt' # # toy system to check decomposition, PiX construction etc. # def setup_toy1(): global x, y, Ny, toycalc, toytrue x = np.array([[0,1,1], [1,1,2], [1,1,1], [0,1,1], [0,2,1], [0,0,0]]).T y = np.array([0,0,0,1,1,1]) Ny = np.array([3,3]) toycalc = ['HX','HXY','HiXY','ChiX', 'HiX', 'SiHXi'] # true values checked against ibtb toytrue = np.array([2.2516291673878226, 1.5849625007211561, 2.1699250014423122, 2.8365916681089796, 2.9477027792200903, 3.4215541688301352]) def teardown_toy1(): global x, y, Ny, toycalc del x, y, Ny, toycalc @with_setup(setup_toy1, teardown_toy1) def test_toy1(): s = DiscreteSystem(x,(3,3),y,(1,2)) s.calculate_entropies(method='plugin', calc=toycalc) v = np.array([s.H[t] for t in toycalc]) assert_array_almost_equal(v, toytrue) @with_setup(setup_toy1, teardown_toy1) def test_toy1_sorted(): s = SortedDiscreteSystem(x,(3,3),2,Ny) s.calculate_entropies(method='plugin', calc=toycalc) v = np.array([s.H[t] for t in toycalc]) assert_array_almost_equal(v, toytrue) if __name__ == '__main__': run_module_suite() pyentropy-0.4.1/pyentropy/maxent.py0000664000175000017510000003746411671654262020623 0ustar robincerobince00000000000000# This file is part of pyEntropy # # pyEntropy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # pyEntropy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyEntropy. If not, see . # # Copyright 2009, 2010 Robin Ince """ Module for computing finite-alphabet maximum entropy solutions using a coordinate transform method For details of the method see: Ince, R. A. A., Petersen, R. S., Swan, D. C., Panzeri, S., 2009 "Python for Information Theoretic Analysis of Neural Data", Frontiers in Neuroinformatics 3:4 doi:10.3389/neuro.11.004.2009 http://www.frontiersin.org/neuroinformatics/paper/10.3389/neuro.11/004.2009/ If you use this code in a published work, please cite the above paper. The generated transformation matrices for a given set of parameters are stored to disk. The default location for the cache is a ``.pyentropy`` (``_pyentropy`` on windows) directory in the users home directory. To override this and use a custom location (for example to share the folder between users) you can put a configuration file ``.pyentropy.cfg`` (``pyentropy.cfg`` on windows) file in the home directory with the following format:: [maxent] cache_dir = /path/to/cache :func:`pyentropy.maxent.get_config_file()` will show where it is looking for the config file. The probability vectors for a finite-alphabet space of ``n`` variables with ``m`` possible values is a length ``m**n-1`` vector ordered such that the value of the index is equal to the decimal value of the input state represented, when interpreted as a base m, length n word. eg for n=3,m=3:: P[0] = P(0,0,0) P[1] = P(0,0,1) P[2] = P(0,0,2) P[3] = P(0,1,0) P[4] = P(0,1,1) etc. This allows efficient vectorised conversion between probability index and response word using base2dec, dec2base. The output is in the same format. """ import time import os import sys import cPickle import numpy as np import scipy as sp import scipy.io as sio import scipy.sparse as sparse import scipy.optimize as opt # umfpack disabled due to bug in scipy # http://mail.scipy.org/pipermail/scipy-user/2009-December/023625.html #try: #import scikits.umfpack as um #HAS_UMFPACK = True #except: #HAS_UMFPACK = False HAS_UMFPACK = False from scipy.sparse.linalg import spsolve, use_solver use_solver(useUmfpack=False) from utils import dec2base, base2dec import ConfigParser def get_config_file(): """Get the location and name of the config file for specifying the data cache dir. You can call this to find out where to put your config. """ if sys.platform.startswith('win'): cfname = '~/pyentropy.cfg' else: cfname = '~/.pyentropy.cfg' return os.path.expanduser(cfname) def get_data_dir(): """Get the data cache dir to use to load and save precomputed matrices""" # default values if sys.platform.startswith('win'): dirname = '~/_pyentropy' else: dirname = '~/.pyentropy' # try to load user override config = ConfigParser.RawConfigParser() cf = config.read(get_config_file()) try: data_dir = os.path.expanduser(config.get('maxent','cache_dir')) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): data_dir = os.path.expanduser(dirname) # check directory exists if not os.path.isdir(data_dir): try: os.mkdir(data_dir) except: print "ERROR: could not create data dir. Please check your " + \ "configuration." raise return data_dir # # AmariSolve class # class AmariSolve: """A class for computing maximum-entropy solutions. When the class is initiliased the coordinate transform matrices are loaded from disk, if available, or generated. See module docstring for location of cache directory. An instance then exposes a solve method which returns the maximum entropy distribution preserving marginal constraints of the input probability vector up to a given order k. This class computed the full transformation matrix and so can compute solutions for any order. """ def __init__(self, n, m, filename='a_', local=False, confirm=True): """Setup transformation matrix for given parameter set. If existing matrix file is found, load the (sparse) transformation matrix A, otherwise generate it. :Parameters: n : int number of variables in the system m : int size of finite alphabet (number of symbols) filename : {str, None}, optional filename to load/save (designed to be used by derived classes). local : {False, True}, optional If True, then store/load arrays from 'data/' directory in current working directory. Otherwise use the package data dir (default ~/.pyentropy or ~/_pyentropy (windows)) Can be overridden through ~/.pyentropy.cfg or ~/pyentropy.cfg (windows) confirm : {True, False}, optional Whether to prompt for confirmation before generating matrix """ #if np.mod(m,2) != 1: # raise ValueError, "m must be odd" try: k = self.k except AttributeError: self.k = n self.n = n self.m = m self.l = (m-1)/2 # full dimension of probability space self.fdim = m**n # dimension of arrays (-1 dof) self.dim = self.fdim - 1 filename = filename + "n%im%i"%(n,m) if local: self.filename = os.path.join(os.getcwd(), 'data', filename) else: self.filename = os.path.join(get_data_dir(), filename) # if file exists load (matrix A) # must be running in correct directory if os.path.exists(self.filename+'.mat'): loaddict = sio.loadmat(self.filename+'.mat') self.A = loaddict['A'].tocsc() self.order_idx = loaddict['order_idx'].squeeze() elif confirm: inkey = raw_input("Existing .mat file not found..." + "Generate matrix? (y/n)") if inkey == 'y': # else call matrix generation function (and save) self._generate_matrix() else: print "File not found and generation aborted..." print "Do not use this class instance." return None else: # just generate it without confirmation self._generate_matrix() self.B = self.A.T # umfpack factorisation of matrix if HAS_UMFPACK: self._umfpack() return None def _umfpack(self): self.umf = um.UmfpackContext() self.umf.numeric(self.B) def _calculate_orders(self): k = self.k n = self.n m = self.m dim = self.dim # Calculate the length of each order self.order_idx = np.zeros(n+2, dtype=int) self.order_length = np.zeros(n+1, dtype=int) self.row_counter = 0 for ordi in xrange(n+1): self.order_length[ordi] = (sp.misc.comb(n, ordi+1, exact=1) * ((m-1)**(ordi+1))) self.order_idx[ordi] = self.row_counter self.row_counter += self.order_length[ordi] self.order_idx[n+1] = dim+1 # Calculate nnz for A # not needed for lil sparse format x = (m*np.ones(n))**np.arange(n-1,-1,-1) x = x[:k] y = self.order_length[:k] self.Annz = np.sum(x*y.T) def _generate_matrix(self): """Generate A matrix if required""" k = self.k n = self.n m = self.m dim = self.dim self._calculate_orders() self.A = sparse.dok_matrix((self.order_idx[k],dim)) self.row_counter = 0 for ordi in xrange(k): self.nterms = m**(n - (ordi+1)) self.terms = dec2base(np.c_[0:self.nterms,], m, n-(ordi+1)) self._recloop((ordi+1), 1, [], [], n, m) print "Order " + str(ordi+1) + " complete. Time: " + time.ctime() # save matrix to file self.A = self.A.tocsc() savedict = {'A':self.A, 'order_idx':self.order_idx} sio.savemat(self.filename, savedict) def _recloop(self, order, depth, alpha, pos, n, m, blocksize=None): terms = self.terms A = self.A if not blocksize: blocksize = self.nterms # starting point for position loop if len(pos)==0: pos_start = 0 else: pos_start = pos[-1] + 1 # loop over alphabet for ai in xrange(1, m): alpha_new = list(alpha) alpha_new.append(ai) # loop over position for pi in xrange(pos_start, (n-(order-depth))): pos_new = list(pos) pos_new.append(pi) # add columns? if depth == order: # special case for highest order # (can't insert columns into empty terms array) if order==n: cols = base2dec(np.atleast_2d(alpha_new),m)[0]-1 A[self.row_counter, cols] = 1 else: # add columns (insert and add to sparse) ins = np.tile(alpha_new,(blocksize,1)) temp = terms for coli in xrange(order): temp = inscol(temp, np.array(ins[:,coli],ndmin=2).T, pos_new[coli]) cols = (base2dec(temp,m)-1).tolist() A[self.row_counter, cols] = 1; self.row_counter += 1 else: self._recloop(order, depth+1, alpha_new, pos_new, n, m, blocksize=blocksize) def solve(self,Pr,k,eta_given=False,ic_offset=-0.01, **kwargs): """Find maxent distribution for a given order k :Parameters: Pr : (fdim,) probability distribution vector k : int Order of interest (marginals up to this order constrained) eta_given : {False, True}, optional Set this True if you are passing the marginals in Pr instead of the probabilities ic_offset : float, oprtional Initial condition offset for the numerical optimisation. If you are having trouble getting convergence, try playing with this. Usually making it smaller is effective (ie -0.00001) :Returns: Psolve : (fdim,) probability distribution vector of k-th order maximum entropy solution """ if len(Pr.shape) != 1: raise ValueError, "Input Pr should be a 1D array" if not eta_given and Pr.size != self.fdim: raise ValueError, "Input probability vector must have length fdim (m^n)" if eta_given: if Pr.size != self.dim: raise ValueError, "Input eta vector must have length dim (m^n -1)" else: if Pr.size != self.fdim: raise ValueError, "Input probability vector must have length fdim (m^n)" if not np.allclose(Pr.sum(), 1.0): raise ValueError, "Input probability vector must sum to 1" l = self.order_idx[k].astype(int) theta0 = np.zeros(self.order_idx[-1]-self.order_idx[k]-1) x0 = np.zeros(l)+ic_offset sf = self._solvefunc jacobian = kwargs.get('jacobian',True) Asmall = self.A[:l,:] Bsmall = Asmall.T if eta_given: eta_sampled = Pr[:l] else: eta_sampled = Asmall*Pr[1:] if jacobian: self.optout = opt.fsolve(sf, x0, (Asmall,Bsmall,eta_sampled, l), fprime=self._jacobian, col_deriv=1, full_output=1) else: self.optout = opt.fsolve(sf, x0, (Asmall,Bsmall,eta_sampled, l), full_output=1) #self.optout = opt.leastsq(sf, x0, (Asmall,Bsmall,eta_sampled), #full_output=1) the_k = self.optout[0] print "order: " + str(k) + \ " ierr: " + str(self.optout[2]) + " - " + self.optout[3] print "fval: " + str(np.mean(np.abs(self.optout[1]['fvec']))), # extra debug info for jacobian print "nfev: %d" % self.optout[1]['nfev'], try: print "njev: %d" % self.optout[1]['njev'] except KeyError: print "" Psolve = np.zeros(self.fdim) Psolve[1:] = self._p_from_theta(np.r_[the_k,theta0]) Psolve[0] = 1.0 - Psolve.sum() return Psolve def _solvefunc(self, theta_un, Asmall, Bsmall, eta_sampled, l): b = np.exp(Bsmall*theta_un) y = eta_sampled - ( (Asmall*b) / (b.sum()+1) ) return y def _jacobian(self, theta, Asmall, Bsmall, eta_sampled, l): x = np.exp(Bsmall*theta) p = Asmall*x q = x.sum() + 1 J = np.outer(p,p) xd = sparse.spdiags(x,0,x.size,x.size,format='csc') qdp = (Asmall * xd) * Bsmall qdp *= q J = J - qdp J /= (q*q) return J def _p_from_theta(self, theta): """Internal version - stays in dim space (missing p[0])""" pnorm = lambda p: ( p / (p.sum()+1) ) return pnorm(np.exp(self.A.T*theta)) def p_from_theta(self, theta): """Return full ``fdim`` p-vector from ``fdim-1`` length theta""" p = np.zeros(self.fdim) p[1:] = self._p_from_theta(theta) p[0] = 1.0 - p.sum() return p def theta_from_p(self, p): """Return theta vector from full probaility vector""" b = np.log(p[1:]) - np.log(p[0]) if HAS_UMFPACK: # use prefactored matrix theta = self.umf.solve(um.UMFPACK_A, self.B, b, autoTranspose=True) else: theta = spsolve(self.B, b) # add theta(0) or not? return theta def eta_from_p(self, p): """Return eta-vector (marginals) from full probability vector""" return self.A*p[1:] def inscol(x,h,n): xs = x.shape hs = h.shape if hs[0]==1: # row vector h=h.T hs=h.shape if n==0: y = np.hstack((h,x)) elif n==xs[1]: y = np.hstack((x,h)) else: y = np.hstack((x[:,:n],h,x[:,n:])) return y def order1direct(p,a): """Compute first order solution directly for testing""" if p.size != a.fdim: raise ValueError, "Probability vector doesn't match a.fdim" # 1st order marginals marg = a.eta_from_p(p)[:a.order_idx[1]] # output p1 = np.zeros(a.fdim) the1pos = lambda x,v: ((v-1)*a.n)+x # loop over all probabilities (not p(0)) for i in range(1,a.fdim): Pword = dec2base(np.atleast_2d(i).T,a.m,a.n) # loop over each variable for j in range(a.n): # this value x = Pword[0][j] if x!=0: # this is a normal non-zero marginal factor = marg[the1pos(j,x)] else: # this is a zero-value marginal factor = 1 - marg[the1pos(j,np.r_[1:a.m])].sum() if p1[i]==0: # first entry p1[i] = factor else: p1[i] *= factor # normalise p1[0] = 1.0 - p1.sum() return p1 pyentropy-0.4.1/pyentropy/__init__.py0000664000175000017510000000232211671654356021053 0ustar robincerobince00000000000000# This file is part of pyEntropy # # pyEntropy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # pyEntropy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyEntropy. If not, see . # # Copyright 2009, 2010 Robin Ince """ A package for calculating bias-corrected estimates of entropy and mutual information quantities over discrete spaces. For more information see the project home page: http://code.google.com/p/pyentropy """ __author__ = 'Robin Ince' __version__ = '0.4.1' from systems import DiscreteSystem, SortedDiscreteSystem from utils import (prob, decimalise, nsb_entropy, quantise, dec2base, base2dec, quantise_discrete) from numpy.testing import Tester test = Tester().test pyentropy-0.4.1/pyentropy/systems.py0000664000175000017510000007472411671654356021042 0ustar robincerobince00000000000000# This file is part of pyEntropy # # pyEntropy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # pyEntropy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyEntropy. If not, see . # # Copyright 2009, 2010 Robin Ince from __future__ import division import numpy as np from utils import (prob, _probcount, decimalise, pt_bayescount, nsb_entropy, dec2base, ent, malog2) class BaseSystem: """Base functionality for entropy calculations common to all systems""" def _calc_ents(self, method, sampling, methods): """Main entropy calculation function for non-QE methods""" self._sample(method=sampling) pt = (method == 'pt') or ('pt' in methods) plugin = (method == 'plugin') or ('plugin' in methods) nsb = (method == 'nsb') or ('nsb' in methods) calc = self.calc if (pt or plugin): self._calc_pt_plugin(pt) if nsb: self._calc_nsb() if 'HshXY' in calc: #TODO: not so efficient since samples PY again sh = self._sh_instance() sh.calculate_entropies(method=method, sampling=sampling, methods=methods, calc=['HXY']) if pt: self.H_pt['HshXY'] = sh.H_pt['HXY'] if nsb: self.H_nsb['HshXY'] = sh.H_nsb['HXY'] if plugin or pt: self.H_plugin['HshXY'] = sh.H_plugin['HXY'] if 'HshX' in calc: sh = self._shX_instance() sh.calculate_entropies(method=method, sampling=sampling, methods=methods, calc=['HX']) if pt: self.H_pt['HshX'] = sh.H_pt['HX'] if nsb: self.H_nsb['HshX'] = sh.H_nsb['HX'] if plugin or pt: self.H_plugin['HshX'] = sh.H_plugin['HX'] if method == 'plugin': self.H = self.H_plugin elif method == 'pt': self.H = self.H_pt elif method == 'nsb': self.H = self.H_nsb def _calc_pt_plugin(self, pt): """Calculate direct entropies and apply PT correction if required """ calc = self.calc pt_corr = lambda R: (R-1)/(2*self.N*np.log(2)) self.H_plugin = {} if pt: self.H_pt = {} # compute basic entropies if 'HX' in calc: H = ent(self.PX) self.H_plugin['HX'] = H if pt: self.H_pt['HX'] = H + pt_corr(pt_bayescount(self.PX, self.N)) if 'HY' in calc: H = ent(self.PY) self.H_plugin['HY'] = H if pt: self.H_pt['HY'] = H + pt_corr(pt_bayescount(self.PY, self.N)) if 'HXY' in calc: H = (self.PY * ent(self.PXY)).sum() self.H_plugin['HXY'] = H if pt: for y in xrange(self.Y_dim): H += pt_corr(pt_bayescount(self.PXY[:,y], self.Ny[y])) self.H_pt['HXY'] = H if 'SiHXi' in calc: H = ent(self.PXi).sum() self.H_plugin['SiHXi'] = H if pt: for x in xrange(self.X_n): H += pt_corr(pt_bayescount(self.PXi[:,x],self.N)) self.H_pt['SiHXi'] = H if 'HiXY' in calc: H = (self.PY * ent(self.PXiY)).sum() self.H_plugin['HiXY'] = H if pt: for x in xrange(self.X_n): for y in xrange(self.Y_dim): H += pt_corr(pt_bayescount(self.PXiY[:,x,y],self.Ny[y])) self.H_pt['HiXY'] = H if 'HiX' in calc: H = ent(self.PiX) self.H_plugin['HiX'] = H if pt: # no PT correction for HiX self.H_pt['HiX'] = H if 'ChiX' in calc: H = -(self.PX*malog2(np.ma.array(self.PiX,copy=False, mask=(self.PiX<=np.finfo(np.float).eps)))).sum(axis=0) self.H_plugin['ChiX'] = H if pt: # no PT correction for ChiX self.H_pt['ChiX'] = H # for adelman style I(k;spike) (bits/spike) if 'HXY1' in calc: if self.Y_m != 2: raise ValueError, \ "HXY1 calculation only makes sense for spike data, ie Y_m = 2" H = ent(self.PXY[:,1]) self.H_plugin['HXY1'] = H if pt: self.H_pt['HXY1'] = H + pt_corr(pt_bayescount(self.PXY[:,1],self.Ny[1])) if 'ChiXY1' in calc: if self.Y_m != 2: raise ValueError, \ "ChiXY1 calculation only makes sense for spike data, ie Y_m = 2" H = -np.ma.array(self.PXY[:,1]*np.log2(self.PX),copy=False, mask=(self.PX<=np.finfo(np.float).eps)).sum() self.H_plugin['ChiXY1'] = H if pt: # no PT for ChiXY1 self.H_pt['ChiXY1'] = H def _calc_nsb(self): """Calculate NSB corrected entropy""" calc = self.calc # TODO: 1 external program call if all y have same number of trials self.H_nsb = {} if 'HX' in calc: H = nsb_entropy(self.PX, self.N, self.X_dim)[0] / np.log(2) self.H_nsb['HX'] = H if 'HY' in calc: H = nsb_entropy(self.PY, self.N, self.Y_dim)[0] / np.log(2) self.H_nsb['HY'] = H if 'HXY' in calc: H = 0.0 for y in xrange(self.Y_dim): H += self.PY[y] * nsb_entropy(self.PXY[:,y], self.Ny[y], self.X_dim)[0] \ / np.log(2) self.H_nsb['HXY'] = H if 'SiHXi' in calc: # TODO: can easily use 1 call here H = 0.0 for i in xrange(self.X_n): H += nsb_entropy(self.PXi[:,i], self.N, self.X_m)[0] / np.log(2) self.H_nsb['SiHXi'] = H if 'HiXY' in calc: H = 0.0 for i in xrange(self.X_n): for y in xrange(self.Y_dim): H += self.PY[y] * nsb_entropy(self.PXiY[:,i,y], self.Ny[y], self.X_m)[0] / np.log(2) self.H_nsb['HiXY'] = H if 'HiX' in calc: H = nsb_entropy(self.PiX, self.N, self.X_dim)[0] / np.log(2) self.H_nsb['HiX'] = H def calculate_entropies(self, method='plugin', sampling='naive', calc=['HX','HXY'], **kwargs): """Calculate entropies of the system. :Parameters: method : {'plugin', 'pt', 'qe', 'nsb'} Bias correction method to use sampling : {'naive', 'kt', 'beta:x'}, optional Sampling method to use. 'naive' is the standard histrogram method. 'beta:x' is for an add-constant beta estimator, with beta value following the colon eg 'beta:0.01' [1]_. 'kt' is for the Krichevsky-Trofimov estimator [2]_, which is equivalent to 'beta:0.5'. calc : list of strs List of entropy values to calculate from ('HX', 'HY', 'HXY', 'SiHXi', 'HiX', 'HshX', 'HiXY', 'HshXY', 'ChiX', 'HXY1','ChiXY1') :Keywords: qe_method : {'plugin', 'pt', 'nsb'}, optional Method argument to be passed for QE calculation ('pt', 'nsb'). Allows combination of QE with other corrections. methods : list of strs, optional If present, method argument will be ignored, and all corrections in the list will be calculated. Use to comparing results of different methods with one calculation pass. :Returns: self.H : dict Dictionary of computed values. self.H_method : dict Dictionary of computed values using 'method'. Notes ----- * If the PT method is chosen with outputs 'HiX' or 'ChiX' no bias correction will be performed for these terms. References ---------- .. [1] T. Schurmann and P. Grassberger, "Entropy estimation of symbol sequences," Chaos,vol. 6, no. 3, pp. 414--427, 1996. .. [2] R. Krichevsky and V. Trofimov, "The performance of universal encoding," IEEE Trans. Information Theory, vol. 27, no. 2, pp. 199--207, Mar. 1981. """ self.calc = calc self.methods = kwargs.get('methods',[]) for m in (self.methods + [method]): if m not in ('plugin','pt','qe','nsb'): raise ValueError, 'Unknown correction method : '+str(m) methods = self.methods # allocate memory for requested calculations if any([c in calc for c in ['HXY','HiXY','HY']]): # need Py for any conditional entropies self.PY = np.zeros(self.Y_dim) if any([c in calc for c in ['HX','HshX','ChiX','ChiXY1']]): self.PX = np.zeros(self.X_dim) if ('HiX' in calc) or ('ChiX' in calc): self.PiX = np.zeros(self.X_dim) if any([c in calc for c in ['HXY','HXY1','ChiXY1']]): self.PXY = np.zeros((self.X_dim,self.Y_dim)) if 'SiHXi' in calc: self.PXi = np.zeros((self.X_m,self.X_n)) if ('HiXY' in calc) or ('HiX' in calc): self.PXiY = np.zeros((self.X_m,self.X_n,self.Y_dim)) if 'HshXY' in calc: self.Xsh = np.zeros(self.X.shape,dtype=np.int) if (method == 'qe') or ('qe' in methods): # default to plugin method if not specified qe_method = kwargs.get('qe_method','plugin') if qe_method == 'qe': raise ValueError, "Can't use qe for qe_method!" self._qe_ent(qe_method,sampling,methods) if method == 'qe': self.H = self.H_qe else: self._calc_ents(method, sampling, methods) def I(self, corr=None): """Convenience function to compute mutual information Must have already computed required entropies ['HX', 'HXY'] :Parameters: corr : str, optional If provided use the entropies from this correction rather than the default values in self.H """ try: if corr is not None: H = getattr(self,'H_%s'%corr) else: H = self.H I = H['HX'] - H['HXY'] except (KeyError, AttributeError): print "Error: must have computed HX and HXY for" + \ "mutual information" return return I def Ish(self, corr=None): """Convenience function to compute shuffled mutual information estimate Must have already computed required entropies ['HX', 'HiXY', 'HshXY', 'HXY'] :Parameters: corr : str, optional If provided use the entropies from this correction rather than the default values in self.H """ try: if corr is not None: H = getattr(self,'H_%s'%corr) else: H = self.H I = H['HX'] - H['HiXY'] + H['HshXY'] - H['HXY'] except (KeyError, AttributeError): print "Error: must have computed HX, HiXY, HshXY and HXY" + \ "for shuffled mutual information estimator" return return I def Ishush(self, corr=None): """Convenience function to compute full shuffled mutual information estimate Must have already computed required entropies ['HX', 'SiHXi', 'HshX', 'HiXY', 'HshXY', 'HXY'] :Parameters: corr : str, optional If provided use the entropies from this correction rather than the default values in self.H """ try: if corr is not None: H = getattr(self,'H_%s'%corr) else: H = self.H I = (H['HX'] - H['HshX'] + H['SiHXi'] - H['HiXY'] + H['HshXY'] - H['HXY']) except (KeyError, AttributeError): print "Error: must have computed HX, HshX, SiHXi, " + \ "HiXY, HshXY and HXY for shuffled mutual information estimator" return return I def pola_decomp(self): """Convenience function for Pola breakdown""" I = {} try: I['lin'] = self.H['SiHXi'] - self.H['HiXY'] I['sig-sim'] = self.H['HiX'] - self.H['SiHXi'] I['cor-ind'] = -self.H['HiX'] + self.H['ChiX'] I['cor-dep'] = self.Ish() - self.H['ChiX'] + self.H['HiXY'] except (KeyError, AttributeError): print "Error: must compute SiHXi, HiXY, HiX, ChiX and Ish for Pola breakdown" return I def Ispike(self): """Adelman (2003) style information per spike """ try: I = self.H['ChiXY1'] - self.H['HXY1'] except (KeyError, AttributeError): print "Error: must compute ChiXY1, HXY1 for Ispike" return return I def _qe_ent(self, qe_method, sampling, methods): """General Quadratic Extrapolation Function""" calc = self.calc self._qe_prep() N = self.N N2 = N/2.0 N4 = N/4.0 # full length # add on methods to do everything (other than qe) with this one call self._calc_ents(qe_method,sampling,methods) H1 = np.array([v for k,v in sorted(self.H.iteritems())]) # half length H2 = np.zeros(H1.shape) half_slices = [(2,0), (2,1)] for sl in half_slices: sys = self._subsampled_instance(sl) sys.calculate_entropies(method=qe_method, sampling=sampling, calc=calc) H2 += np.array([v for k,v in sorted(sys.H.iteritems())]) del sys H2 = H2 / 2.0 # quarter length H4 = np.zeros(H1.shape) quarter_slices = [(4,0), (4,1), (4,2), (4,3)] for sl in quarter_slices: sys = self._subsampled_instance(sl) sys.calculate_entropies(method=qe_method, sampling=sampling, calc=calc) H4 += np.array([v for k,v in sorted(sys.H.iteritems())]) del sys H4 = H4 / 4.0 # interpolation Hqe = np.zeros(H1.size) for i in xrange(H1.size): Hqe[i] = np.polyfit([N4,N2,N], [N4*N4*H4[i], N2*N2*H2[i], N*N*H1[i]], 2)[0] keys = [k for k,v in sorted(self.H_plugin.iteritems())] self.H_qe = dict(zip(keys, Hqe)) class DiscreteSystem(BaseSystem): """Class to hold probabilities and calculate entropies of a discrete stochastic system. :Attributes: PXY : (X_dim, Y_dim) Conditional probability vectors on decimalised space P(X|Y). ``PXY[:,i]`` is X probability distribution conditional on ``Y==i``. PX : (X_dim,) Unconditional decimalised X probability. PY : (Y_dim,) Unconditional decimalised Y probability. PXi : (X_m, X_n) Unconditional probability distributions for individual X components. ``PXi[i,j] = P(X_i==j)`` PXiY : (X_m, X_n, Y_dim) Conditional probability distributions for individual X compoenents. ``PXiY[i,j,k] = P(X_i==j | Y==k)`` PiX : (X_dim,) ``Pind(X) = _y`` """ def __init__(self, X, X_dims, Y, Y_dims, qe_shuffle=True): """Check and assign inputs. :Parameters: X : (X_n, t) int array Array of measured input values. X_n variables in X space, t trials X_dims : tuple (n, m) Dimension of X (input) space; length n, base m words Y : (Y_n, t) int array Array of corresponding measured output values. Y_n variables in Y space, t trials Y_dims : tuple (n ,m) Dimension of Y (output) space; length n, base m words qe_shuffle : {True, False}, optional Set to False if trials already in random order, to skip shuffling step in QE. Leave as True if trials have structure (ie one stimuli after another). """ self.X_dims = X_dims self.Y_dims = Y_dims self.X_n = X_dims[0] self.X_m = X_dims[1] self.Y_n = Y_dims[0] self.Y_m = Y_dims[1] self.X_dim = self.X_m ** self.X_n self.Y_dim = self.Y_m ** self.Y_n self.X = np.atleast_2d(X) self.Y = np.atleast_2d(Y) self._check_inputs(self.X, self.Y) self.N = self.X.shape[1] self.Ny = np.zeros(self.Y_dim) self.qe_shuffle = qe_shuffle self.sampled = False self.calc = [] def _sample(self, method='naive'): """Sample probabilities of system. Parameters ---------- method : {'naive', 'beta:x', 'kt'}, optional Sampling method to use. 'naive' is the standard histrogram method. 'beta:x' is for an add-constant beta estimator, with beta value following the colon eg 'beta:0.01' [1]_. 'kt' is for the Krichevsky-Trofimov estimator [2]_, which is equivalent to 'beta:0.5'. References ---------- .. [1] T. Schurmann and P. Grassberger, "Entropy estimation of symbol sequences," Chaos,vol. 6, no. 3, pp. 414--427, 1996. .. [2] R. Krichevsky and V. Trofimov, "The performance of universal encoding," IEEE Trans. Information Theory, vol. 27, no. 2, pp. 199--207, Mar. 1981. """ calc = self.calc # decimalise if any([c in calc for c in ['HXY','HX']]): if self.X_n > 1: d_X = decimalise(self.X, self.X_n, self.X_m) else: # make 1D d_X = self.X.reshape(self.X.size) if any([c in calc for c in ['HiX','HiXY','HXY','HY']]): if self.Y_n > 1: d_Y = decimalise(self.Y, self.Y_n, self.Y_m) else: # make 1D d_Y = self.Y.reshape(self.Y.size) # unconditional probabilities if ('HX' in calc) or ('ChiX' in calc): self.PX = prob(d_X, self.X_dim, method=method) """test docstring fpr PX""" if any([c in calc for c in ['HXY','HiX','HiXY','HY']]): self.PY = prob(d_Y, self.Y_dim, method=method) if 'SiHXi' in calc: for i in xrange(self.X_n): self.PXi[:,i] = prob(self.X[i,:], self.X_m, method=method) # conditional probabilities if any([c in calc for c in ['HiXY','HXY','HshXY']]): for i in xrange(self.Y_dim): indx = np.where(d_Y==i)[0] self.Ny[i] = indx.size if 'HXY' in calc: # output conditional ensemble oce = d_X[indx] if oce.size == 0: print 'Warning: Null output conditional ensemble for ' + \ 'output : ' + str(i) else: self.PXY[:,i] = prob(oce, self.X_dim, method=method) if any([c in calc for c in ['HiX','HiXY','HshXY']]): for j in xrange(self.X_n): # output conditional ensemble for a single variable oce = self.X[j,indx] if oce.size == 0: print 'Warning: Null independent output conditional ensemble for ' + \ 'output : ' + str(i) + ', variable : ' + str(j) else: self.PXiY[:,j,i] = prob(oce, self.X_m, method=method) if 'HshXY' in calc: # shuffle #np.random.shuffle(oce) shfoce = np.random.permutation(oce) self.Xsh[j,indx] = shfoce # Pind(X) = _y if ('HiX' in calc) or ('ChiX' in calc): # construct joint distribution words = dec2base(np.atleast_2d(np.r_[0:self.X_dim]).T,self.X_m,self.X_n) PiXY = np.zeros((self.X_dim, self.Y_dim)) PiXY = self.PXiY[words,np.r_[0:self.X_n]].prod(axis=1) # average over Y self.PiX = np.dot(PiXY,self.PY) self.sampled = True def _check_inputs(self, X, Y): if (not np.issubdtype(X.dtype, np.int)) \ or (not np.issubdtype(Y.dtype, np.int)): raise ValueError, "Inputs must be of integer type" if (X.max() >= self.X_m) or (X.min() < 0): raise ValueError, "X values must be in [0, X_m)" if (Y.max() >= self.Y_m) or (Y.min() < 0): raise ValueError, "Y values must be in [0, Y_m)" if (X.shape[0] != self.X_n): raise ValueError, "X.shape[0] must equal X_n" if (Y.shape[0] != self.Y_n): raise ValueError, "Y.shape[0] must equal Y_n" if (Y.shape[1] != X.shape[1]): raise ValueError, "X and Y must contain same number of trials" def _sh_instance(self): """Return shuffled instance""" # do it like this to allow easy inheritence return DiscreteSystem(self.Xsh, self.X_dims, self.Y, self.Y_dims) def _shX_instance(self): """Return shuffled instance""" # do it like this to allow easy inheritence # unconditional shuffle Xsh_un = np.zeros_like(self.X) for i in range(self.X_n): shindx = np.random.permutation(self.X.shape[1]) Xsh_un[i,:] = self.X[i,shindx] return DiscreteSystem(Xsh_un, self.X_dims, self.Y, self.Y_dims) def _qe_prep(self): """QE Preparation""" if self.qe_shuffle: # need to shuffle to ensure even stimulus distribution for QE shuffle = np.random.permutation(self.N) # fancy indexing makes a copy self.X = self.X[:,shuffle] self.Y = self.Y[:,shuffle] # ensure trials is a multiple of 4 for easy QE rem = np.mod(self.X.shape[1],4) if rem != 0: self.X = self.X[:,:-rem] self.Y = self.Y[:,:-rem] self.N = self.X.shape[1] def _subsampled_instance(self, sub): """Return subsampled instance for QE sub : tuple (df, i) red - reduction factor (2, 4) i - interval """ Nred = self.N / sub[0] sl = slice(sub[1]*Nred,(sub[1]+1)*Nred) return DiscreteSystem(self.X[:,sl], self.X_dims, self.Y[:,sl], self.Y_dims) class SortedDiscreteSystem(DiscreteSystem): """Class to hold probabilities and calculate entropies of a discrete stochastic system when the inputs are available already sorted by stimulus. :Attributes: PXY : (X_dim, Y_dim) Conditional probability vectors on decimalised space P(X|Y). ``PXY[:,i]`` is X probability distribution conditional on ``Y==i``. PX : (X_dim,) Unconditional decimalised X probability. PY : (Y_dim,) Unconditional decimalised Y probability. PXi : (X_m, X_n) Unconditional probability distributions for individual X components. ``PXi[i,j] = P(X_i==j)`` PXiY : (X_m, X_n, Y_dim) Conditional probability distributions for individual X compoenents. ``PXiY[i,j,k] = P(X_i==j | Y==k)`` PiX : (X_dim,) ``Pind(X) = _y`` """ def __init__(self, X, X_dims, Y_m, Ny): """Check and assign inputs. :Parameters: X : (X_n, t) int array Array of measured input values. X_n variables in X space, t trials X_dims : tuple (n,m) Dimension of X (input) space; length n, base m words Y_m : int Finite alphabet size of single variable Y Ny : (Y_m,) int array Array of number of trials available for each stimulus. This should be ordered the same as the order of X w.r.t. stimuli. Y_t.sum() = X.shape[1] """ self.X_dims = X_dims self.X_n = X_dims[0] self.X_m = X_dims[1] self.Y_m = Y_m self.X_dim = self.X_m ** self.X_n self.Y_dim = self.Y_m self.X = np.atleast_2d(X) self.Ny = Ny.astype(float) self.N = self.X.shape[1] self._check_inputs() self.sampled = False self.qe_shuffle = True self.calc = [] def _check_inputs(self): if (not np.issubdtype(self.X.dtype, np.int)): raise ValueError, "Inputs must be of integer type" if (self.X.max() >= self.X_m) or (self.X.min() < 0): raise ValueError, "X values must be in [0, X_m)" if (self.X.shape[0] != self.X_n): raise ValueError, "X.shape[0] must equal X_n" if (self.Ny.size != self.Y_m): raise ValueError, "Ny must contain Y_m elements" if (self.Ny.sum() != self.N): raise ValueError, "Ny.sum() must equal number of X input trials" def _sample(self, method='naive'): """Sample probabilities of system. Parameters ---------- method : {'naive', 'beta:x', 'kt'}, optional Sampling method to use. 'naive' is the standard histrogram method. 'beta:x' is for an add-constant beta estimator, with beta value following the colon eg 'beta:0.01' [1]_. 'kt' is for the Krichevsky-Trofimov estimator [2]_, which is equivalent to 'beta:0.5'. References ---------- .. [1] T. Schurmann and P. Grassberger, "Entropy estimation of symbol sequences," Chaos,vol. 6, no. 3, pp. 414--427, 1996. .. [2] R. Krichevsky and V. Trofimov, "The performance of universal encoding," IEEE Trans. Information Theory, vol. 27, no. 2, pp. 199--207, Mar. 1981. """ calc = self.calc # decimalise if any([c in calc for c in ['HXY','HX']]): if self.X_n > 1: d_X = decimalise(self.X, self.X_n, self.X_m) else: # make 1D d_X = self.X.reshape(self.X.size) # unconditional probabilities if ('HX' in calc) or ('ChiX' in calc): self.PX = prob(d_X, self.X_dim, method=method) if any([c in calc for c in ['HXY','HiX','HiXY','HY']]): self.PY = _probcount(self.Ny,self.N,method) if 'SiHXi' in calc: for i in xrange(self.X_n): self.PXi[:,i] = prob(self.X[i,:], self.X_m, method=method) # conditional probabilities if any([c in calc for c in ['HiXY','HXY','HshXY']]): sstart=0 for i in xrange(self.Y_dim): send = sstart+self.Ny[i] indx = slice(sstart,send) sstart = send if 'HXY' in calc: # output conditional ensemble oce = d_X[indx] if oce.size == 0: print 'Warning: Null output conditional ensemble for ' + \ 'output : ' + str(i) else: self.PXY[:,i] = prob(oce, self.X_dim, method=method) if any([c in calc for c in ['HiX','HiXY','HshXY']]): for j in xrange(self.X_n): # output conditional ensemble for a single variable oce = self.X[j,indx] if oce.size == 0: print 'Warning: Null independent output conditional ensemble for ' + \ 'output : ' + str(i) + ', variable : ' + str(j) else: self.PXiY[:,j,i] = prob(oce, self.X_m, method=method) if 'HshXY' in calc: # shuffle #np.random.shuffle(oce) shfoce = np.random.permutation(oce) self.Xsh[j,indx] = shfoce # Pind(X) = _y if ('HiX' in calc) or ('ChiX' in calc): # construct joint distribution words = dec2base(np.atleast_2d(np.r_[0:self.X_dim]).T,self.X_m,self.X_n) PiXY = np.zeros((self.X_dim, self.Y_dim)) PiXY = self.PXiY[words,np.r_[0:self.X_n]].prod(axis=1) # average over Y self.PiX = np.dot(PiXY,self.PY) self.sampled = True def _sh_instance(self): """Return shuffled instance""" return SortedDiscreteSystem(self.Xsh, self.X_dims, self.Y_m, self.Ny) def _shX_instance(self): """Return shuffled instance""" # unconditional shuffle Xsh_un = np.zeros_like(self.X) for i in range(self.X_n): shindx = np.random.permutation(self.X.shape[1]) Xsh_un[i,:] = self.X[i,shindx] return SortedDiscreteSystem(Xsh_un, self.X_dims, self.Y_m, self.Ny) def _qe_prep(self): """QE Preparation""" if self.qe_shuffle: # need to shuffle to ensure even stimulus distribution for QE sstart = 0 oldX = self.X self.X = np.zeros_like(oldX) for i in xrange(self.Y_m): send = sstart + int(self.Ny[i]) shuffle = np.random.permutation(int(self.Ny[i])) self.X[:,sstart:send] = oldX[:,sstart+shuffle] sstart = send def _subsampled_instance(self, sub): """Return subsampled instance for QE sub : tuple (df, i) red - reduction factor (2, 4) i - interval """ # reduce each Y data set slices = [] Ny_new = np.floor(self.Ny/sub[0]).astype(int) sstart = 0 for i in xrange(self.Y_m): send = sstart + int(self.Ny[i]) sl = slice(sstart + (sub[1] * Ny_new[i]), sstart + ((sub[1]+1) * Ny_new[i])) slices.append(sl) sstart = send X_new = self.X[:,np.r_[tuple(slices)]] return SortedDiscreteSystem(X_new, self.X_dims, self.Y_m, Ny_new) pyentropy-0.4.1/pyentropy/utils.py0000664000175000017510000002700111671654356020455 0ustar robincerobince00000000000000# This file is part of pyEntropy # # pyEntropy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # pyEntropy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyEntropy. If not, see . # # Copyright 2009, 2010 Robin Ince """Utility functions for working with discrete probability distributions. These functions are exposed in the top-level pyentropy namespace. """ from __future__ import division import numpy as np from tempfile import NamedTemporaryFile import os import subprocess from numpy.ma.core import _MaskedUnaryOperation, _DomainGreater import numpy.core.umath as umath malog2 = _MaskedUnaryOperation(umath.log2, 1.0, _DomainGreater(0.0)) def ent(p): mp = np.ma.array(p,copy=False,mask=(p<=np.finfo(np.float).eps)) return -(mp*malog2(mp)).sum(axis=0) def prob(x, m, method='naive'): """Sample probability of integer sequence. :Parameters: x : int array integer input sequence m : int alphabet size of input sequence (max(x) m-1: raise ValueError, "Input contains values that are too large" C = np.bincount(x) if C.size < m: # resize if any responses missed C.resize((m,)) return _probcount(C, x.size, method) def _probcount(C, N, method='naive'): """Estimate probability from a vector of bin counts :Parameters: C : int array integer vector of bin counts N : int number of trials method: {'naive', 'kt', 'beta:x','shrink'} Sampling method to use. """ N = float(N) if method.lower() == 'naive': # normal estimate P = C/N elif method.lower() == 'kt': # KT (constant addition) estimate P = (C + 0.5) / (N + (C.size/2.0)) elif method.lower() == 'shrink': # James-Stein shrinkage # http://www.strimmerlab.org/software/entropy/index.html Pnaive = C/N target = 1./C.size lam = _get_lambda_shrink(N, Pnaive, target) P = (lam * target) + ((1 - lam) * Pnaive) elif method.split(':')[0].lower() == 'beta': beta = float(method.split(':')[1]) # general add-constant beta estimate P = (C + beta) / (N + (beta*C.size)) else: raise ValueError, 'Unknown sampling method: '+str(est) return P def _get_lambda_shrink(N, u, target): """Lambda shrinkage estimator""" # *unbiased* estimator of variance of u varu = u*(1-u)/(N-1) # misspecification msp = ((u-target)**2).sum() # estimate shrinkage intensity if msp == 0: lam = 1. else: lam = (varu/msp).sum() # truncate if lam > 1: lam = 1 elif lam < 0: lam = 0 return lam def pt_bayescount(Pr, Nt): """Compute the support for analytic bias correction using the Bayesian approach of Panzeri and Treves (1996) :Parameters: Pr : 1D aray Probability vector Nt : int Number of trials :Returns: R : int Bayesian estimate of support """ # dimension of space dim = Pr.size # non zero probs only PrNZ = Pr[Pr>np.finfo(np.float).eps] Rnaive = PrNZ.size R = Rnaive if Rnaive < dim: Rexpected = Rnaive - ((1.0-PrNZ)**Nt).sum() deltaR_prev = dim deltaR = np.abs(Rnaive - Rexpected) xtr = 0.0 while (deltaR < deltaR_prev) and ((Rnaive+xtr) b-1: raise ValueError, "Input vector x doesnt match parameters" powers = b**np.arange(n-1,-0.5,-1) d_x = np.dot(x.T,powers).astype(int) return d_x def quantise(input, m, uniform='sampling', minmax=None, centers=True): """ Quantise 1D input vector into m levels (unsigned) :Parameters: uniform : {'sampling','bins'} Determine whether quantisation is uniform for sampling (equally occupied bins) or the bins have uniform widths minmax : tuple (min,max) Specify the range for uniform='bins' quantisation, rather than using min/max of input centers : {True, False} Return vector of bin centers instead of bin bounds """ bin_centers = np.zeros(m) if uniform == 'sampling': #bin_numel = np.round(input.size/m) - 1 bin_numel = np.floor(input.size/m) r = input.size - (bin_numel*m) stemp = input.copy() stemp.sort(axis=0) # original method #bin_bounds = stemp[bin_numel:-bin_numel+1:bin_numel] # more uniform method idx = np.arange(bin_numel, bin_numel*m, bin_numel, dtype=np.int) idx[0:r] = idx[0:r] + np.arange(1,r+1,dtype=np.int) idx[r:] = idx[r:] + r bin_bounds = stemp[idx] if centers: # calculate center for each bin bin_centers[0] = (bin_bounds[0]+stemp[0]) / 2.0 for i in range(1,m-1): bin_centers[i] = (bin_bounds[i]+bin_bounds[i-1])/2.0 bin_centers[m-1] = (stemp[-1]+bin_bounds[-1]) / 2.0 elif uniform == 'bins': if minmax is not None: min, max = minmax else: min, max = input.min(), input.max() drange = float(max) - float(min) bin_width = drange / float(m) bin_bounds = np.arange(1,m,dtype=float) bin_bounds *= bin_width bin_bounds += min if centers: bin_centers = r_[bin_bounds - (bin_width/2.0), bin_bounds[-1]+(bin_width/2.0)] else: raise ValueError, "Unknown value of 'uniform'" q_value = np.digitize(input, bin_bounds) if centers: # bin centers return q_value, bin_bounds, bin_centers else: return q_value, bin_bounds def quantise_discrete(input, m): """Re-bin an already discretised sequence (eg of integer counts) Input should already be non-negative integers """ # astype forces a copy even if already int X = input.astype(np.int) if (X.min() < 0) or not np.all(np.asarray(X,dtype=np.int)==X): raise ValueError, "Expecting non-negative integer input" if input.max() < m: # nothing to do return input # rebinning algorithm # get bincounts now to determine smallest bins # merge smallest bin to smallest neighbouring bin # (maintain continuity) until we have the right number counts = list(np.bincount(X)) Nbins = len(counts) labels = list(np.r_[0:Nbins]) def merge_bins(a,b): """Merge bin a into bin b""" counts[b] = counts[b] + counts[a] X[X==labels[a]] = labels[b] counts.pop(a) labels.pop(a) while Nbins > m: cidx = np.argsort(counts) # smallest one si = cidx[0] # if its at the edges can only merge one way if si == 0: merge_bins(si,1) elif si == len(counts)-1: merge_bins(si, si-1) else: # merge to the smallest neighbour target = [si-1, si+1][np.argmin([counts[si-1], counts[si+1]])] merge_bins(si, target) Nbins = Nbins - 1 # relabel newlabels = range(len(labels)) for i in range(len(labels)): if newlabels[i] != labels[i]: # only reassign if necessary X[X==labels[i]] = newlabels[i] return X