pax_global_header00006660000000000000000000000064141516625360014523gustar00rootroot0000000000000052 comment=0b6c9b373317db0785a72c6b3fb77c3222b5b5e7 ase-3.22.1/000077500000000000000000000000001415166253600123605ustar00rootroot00000000000000ase-3.22.1/.flake8000066400000000000000000000002071415166253600135320ustar00rootroot00000000000000[flake8] ignore = E129,W293,W503,W504,E741,E501,E226,E265,W291,E741,E127,E128,E126 max-line-length = 80 exclude = build, dist ase-3.22.1/.gitignore000066400000000000000000000006561415166253600143570ustar00rootroot00000000000000*.pyc doc/build/ /dist MANIFEST .DS_Store */.DS_Store *.traj /build /ase.egg-info /ase/db/static/jsmol doc/ase/dft/bztable.rst doc/tutorials/deltacodesdft/fit.json doc/ase/io/formatoptions.rst # Editor backup files *~ # Ignore PyCharm settings .idea/ # Ignore VS Code settings .vscode/ # Vim swap files: .*.sw? # Translation template ase/gui/po/ag.pot # Pytest and code coverage: .cache .coverage coverage-html .mypy_cache ase-3.22.1/.gitlab-ci.yml000066400000000000000000000141501415166253600150150ustar00rootroot00000000000000--- # ASE Gitlab CI configuration variables: OMP_NUM_THREADS: "1" stages: - test - paperwork - deploy .database-configuration: variables: POSTGRES_DB: testase POSTGRES_USER: ase POSTGRES_PASSWORD: "ase" MYSQL_DATABASE: testase_mysql MYSQL_ROOT_PASSWORD: ase services: - postgres:latest - mysql:latest - mariadb:latest # Check oldest supported Python with oldest supported libraries. # Does not install any optional libraries except matplotlib. # # With older python, pytest-xdist jumbles the tests differently # on different codes, then complains. So we use -j 0. oldlibs: image: registry.gitlab.com/ase/ase:ase-oldlibs extends: .database-configuration script: - pip install --no-deps . - > ase test --pytest --color=yes -W "ignore:can't resolve package" -W ignore::PendingDeprecationWarning # For testing newest versions of libraries against standard images # on dockerhub. # # psycopg2-binary is for testing the postgres backend for ase.db # Currently we have trouble with pyhon3.8 where we need to compile some # libraries that don't have binary wheels; we don't want to install gcc # in this test job. pipinstall: image: python:3.8 extends: .database-configuration script: - python --version - pip install psycopg2-binary pymysql cryptography - pip install .[test] - ase test --pytest --color=yes when: manual # This is the main test job using new versions of libraries. # The intention is to enable as many features and libraries as possible. # # We execute it inside the project dir with --editable in order for # the coverage script to correctly resolve the OMIT paths (a bit hacky). # It would be better to install it for real, and for things to just work. main: image: registry.gitlab.com/ase/ase:ase-main extends: .database-configuration # Inheriting variables from the database-configuration job # seems to be broken all of a sudden (gitlab-runner 13.0.1 and 13.3.1) # We need to redefine them here then, otherwise gitlab-runner will fail # those tests when run locally. Meanwhile everything works on gitlab.com. # Strange! variables: POSTGRES_DB: testase POSTGRES_USER: ase POSTGRES_PASSWORD: "ase" MYSQL_DATABASE: testase_mysql MYSQL_ROOT_PASSWORD: ase services: - postgres:latest - mysql:latest - mariadb:latest script: - python --version - pip install --no-deps --editable . - ase info --calculators - cd $CI_PROJECT_DIR - ase test --calculators asap,eam,ff,lj,morse,tip3p,tip4p --coverage --pytest --color=yes --durations 20 - mv ase/test/coverage-html coverage-main - mv ase/test/.coverage coverage-main/coverage.dat artifacts: paths: - coverage-main/ expire_in: 1 week # Calculator integration tests which always run. # Encompasses those tests marked as @pytest.mark.calculator_lite. # Please make sure these tests are cheap. calculators-lite: image: registry.gitlab.com/ase/ase:ase-full-monty script: - pip install --no-deps --editable . - ase test calculator --calculators=auto --coverage --pytest -m calculator_lite --color=yes --durations=20 - mv ase/test/coverage-html coverage-calculators-lite - mv ase/test/.coverage coverage-calculators-lite/coverage.dat artifacts: paths: - coverage-calculators-lite/ expire_in: 1 week # Plan: Test as many calculators as possible as well as possible. # Obviously this is kind of expensive so the job is manually activated. # Also, the docker serves as a knowledgebase for how to set up those # calculators with ASE. # # It would be great if someone could enable more calculators with this. calculators: image: registry.gitlab.com/ase/ase:ase-full-monty script: - pip install --no-deps --editable . - ase info --calculators - ase test calculator --calculators abinit,asap,cp2k,dftb,espresso,gpaw,kim,lammpslib,lammpsrun,nwchem,octopus,siesta --coverage --pytest --color=yes --durations 20 - mv ase/test/coverage-html coverage-calculators - mv ase/test/.coverage coverage-calculators/coverage.dat when: manual artifacts: paths: - coverage-calculators/ expire_in: 1 week doc: image: registry.gitlab.com/ase/ase:ase-main script: - pip install --no-deps .[docs] - ase info - which sphinx-build - cd $CI_PROJECT_DIR/doc - python -m ase.utils.sphinx run # test scripts - sphinx-build -W . build artifacts: paths: - $CI_PROJECT_DIR/doc/build/ expire_in: 1 week distribution-package: image: registry.gitlab.com/ase/ase:ase-main extends: .database-configuration script: - mkdir dist - python setup.py sdist | tee dist/setup_sdist.log - python setup.py bdist_wheel | tee dist/setup_bdist_wheel.log - pip install dist/ase-*.tar.gz - ase test --pytest --color=yes - pip uninstall --yes ase - pip install dist/ase-*-py3-none-any.whl - ase test --pytest --color=yes artifacts: paths: - dist expire_in: 1 week when: manual # Publish code coverage data on web. # * The deploy stage is specially recognized by gitlab # * The jobname pages is specially recognized by gitlab # * The public/ directory is specially recognized by gitlab # https://about.gitlab.com/blog/2016/11/03/publish-code-coverage-report-with-gitlab-pages/ pages: stage: deploy dependencies: - coverage-combine script: - mv coverage-html public artifacts: paths: - public expire_in: 2 weeks only: - master lint: image: registry.gitlab.com/ase/ase:ase-paperwork script: - cd $CI_PROJECT_DIR - mypy --color-output -p ase - python -We:invalid -m compileall -f -q ase/ - flake8 . coverage-combine: image: registry.gitlab.com/ase/ase:ase-paperwork stage: paperwork dependencies: - main - calculators-lite script: - cd ase/test # Next to .coveragerc and friends - coverage combine ../../coverage-main/coverage.dat ../../coverage-calculators-lite/coverage.dat - coverage report - coverage html - cd ../.. - mv ase/test/coverage-html . coverage: '/TOTAL.+ ([0-9]+\.[0-9]+%)/' artifacts: paths: - coverage-html expire_in: 1 week ase-3.22.1/.mailmap000066400000000000000000000503031415166253600140020ustar00rootroot00000000000000# The .mailmap file is used to unify distinct names and mail address of # committers as a single committer. # # Often the same committer has multiple aliases in commit history # because of inconsistent configuration or changing university e-mail # addresses; for the University giveth, and the University taketh away. # # If you can read this, consider updating your aliases below so you are # listed by "git shortlog -se" only once and with your real name and # working e-mail address. # # Example: # # Name Lastname Othername # # This specifies that commits under Othername should # be listed as Name Lastname instead. Adam Arvidsson Adam Fekete Adam Fekete Adam J. Jackson Adam J. Jackson Adam J. Jackson Adam Jackson Alejandro Pérez Paz Alexander Tygesen Alexander Sougaard Tygesen Alexander Tygesen Alexander Tygesen Alexander Tygesen Alexander Alin M Elena Amarjit Singh A Singh Andrew Logsdail logsdail Andrew Peterson Andrew Peterson Andrew Peterson anpet Andrew Rosen Andrey Sobolev anro Antoni Macià Anubhab Haldar Ask Hjorth Larsen askhl Asmus Ougaard Dohn Asmus O. Dohn Asmus Ougaard Dohn Asmus Asmus Ougaard Dohn Asmus O. Dohn Asmus Ougaard Dohn Asmus O Dohn Ben Blaiszik Bismarrck Jonas Bjork bjork Pedro Brandimarte brandimarte Brandon Cook Carsten Rostgaard carstenr chripa Christian Glinsvad s032082 Christoph Schober Christoph Daniele Selli dselli Daniele Selli Dselli Daniel S. Karls dskarls Daniel S. Karls dskarls David Kleiven David Kleiven David Landis dlandis David Ollodart david Marcin Dułak Marcin Dulak Marcin Dułak dulak Marcin Dułak dulak Edward Tait Eisuke Kawashima <2618741-e-kwsm@users.noreply.gitlab.com> E Kawashima <2618741-e-kwsm@users.noreply.gitlab.com> Emmanuel Farhi Emmanuel FARHI Enrique Zanardi Maffiotte Enrique Zanardi Eric Hermes ehermes Eric Hermes Eric hermes Eric Hermes Eric Hermes Eric Hermes ehermes Eric Hermes Eric Hermes Eric Dill Eric Prestat Erik Fransson Esben Leonhard Kolsbjerg Esben Leonhard Kolsbjerg Esben Leonhard Kolsbjerg Esben Leonhard Kolsbjerg Estefanía Garijo del Río Estefania Garijo del Rio Estefanía Garijo del Río egarijo Felix Hanke hanke Felix Tim Bölle FelixBoelle Felix Tim Bölle Feltbo Florian Knoop Florian Knoop (DRACO) Florian Knoop Florian Knoop fras Gaël Donval Gaël Donval George Tritsaris getri Geun Ho Gu Gianluca Fazio gianlu6022 Gianluca Levi giale GitLab Giuseppe Barbalinardo Giuseppe Giuseppe Barbalinardo Giuseppe Barbalinardo Glen R. Jenness Glen Richard Jenness Glen R. Jenness gjenness googhgoo Graham Inggs Graham Inggs Graham Inggs Graham Inggs Grigory Smirnov Heine Anton Hansen hahansen Hyeonsu Kim KimHyeonsu Hong Li Igor Mosyagin <@Hannelore c6h10o5@gmail.com> ithod Ivan Kondov Ivano Castelli Jacob Madsen Jakob Blomquist jakobb Jakob Blomquist knjakob-blomquist Jacob Russell Boes Jacob Boes Jakob Schiotz schiotz James Kermode kermode Jeppe Gavnholt gavnholt Jonas Amsler jonas.amsler Jonas Amsler jamsler <5144672-jamsler@users.noreply.gitlab.com> jber Janne Blomqvist jblomqvist Jens Jørgen Mortensen jensj Jens Jørgen Mortensen Jens Jørgen Mortensen Jens Jørgen Mortensen Jens Jorgen Mortensen Jens Jørgen Mortensen Jens Jørgen Mortensen Jens Jørgen Mortensen Jens Jørgen Mortensen Jesper Friis jesperf Jess Wellendorff jesswe Jakob Gath jg Jin Chang Jingzhe Chen jingzhe Joakim Löfgren joalof Johanna Sanchez Johannes Laurin Hörmann Johannes Hörmann John Kitchin jk7683@kit.edu John Kitchin jk7683@kit.edu John Kitchin jkitchin John Kitchin John Kitchin Joseba Alberdi Joseba Joshua Lansford Joshua Lansford Joshua Lansford JLans Juan M. Lorenzi Jussi Enkovaara jussie Jun Yan juya Keenan Lyon Kirsten Winther kirstenwinther Kristen Kaasbjerg kkaa Jesper Kleis kleis Korina Kuhar krbt Karsten Wedel Jacobsen kwj Lars Grabow grabow Lars Pastewka Lars Pastewka Lars Pastewka pastewka Lasse Vilhelmsen lassebv Leon Avakyan Letif Mones Logan Ward Lukasz Mentel Lynza Sprowl Lynza Halberstadt Maarten Van de Put Maarten Van de Put Mads Engelund Mads Engelund Mads Engelund mads.engelund Magnus Nord Magnus Nord Maja Lenz Maja-Olivia Lenz Marc Barbry marc barbry Marc Barbry marc barbry Marc Barbry Marc Barbry Marc Barbry marc Marcel Langer Marcel Marco Vanin mvanin Marko Melander Markus Kaukonen paapu68 Markus Kaukonen markus Martin Hangaard Hansen mhah Martin Hangaard Hansen mhangaard Martin Hangaard Hansen Martin Hangaard Hansen Mathias Ljungberg Mathias Ljungberg Mathias Ljungberg Mathias Ljungberg Mathias Ljungberg mathiasljungberg Mathias Ljungberg Mathias Ljungberg Mathias Ljungberg Mathias Ljungberg Mathias Ljungberg Mathias Matthias Slabanja slabanja Maxime van den Bossche mvdb Maxime van den Bossche mvdb Maxime van den Bossche Maxime Van den Bossche Maxime van den Bossche Maxime Van den Bossche Maxime van den Bossche Maxime Van den Bossche Michael J. Waters Michael Waters Michael J. Waters Michael Waters Michael J. Waters MikeJWaters Michael Walter Michael Walter Michael Walter Michael Walter <@ PC-L192 mcoywalter@gmail.com> Michael Walter miwalter Michael Walter MW Michael Walter Michael Linux IWM Michael Walter Michael Walter Michael Walter Michael Walter @ PC-L192 Miguel Caro Mikael Kuisma Mikkel Strange Mikkel Strange Mikkel Strange Strange, Mikkel (smikk) Mikkel Strange strange mohpa Morten Gjerding Morten Gjerding Morten Gjerding mogje Morten Gjerding mortengjerding Morten Nagel Morten Nagel Morten Nagel "Morten Nagel ext:(%22) Morten Nagel "Morten Nagel ext:(%22) Nicki Frank Hinsche Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Noam Bernstein Ole Schütt Ole Schuett Ole Schütt Ole Schuett Ole Schütt Ole Schütt Ole Schütt oschuett Ole Schütt Ole Schütt Oliver Brügner Otto Kohulák addman addman Otto Kohulák addman addman Otto Kohulák Otto Kohulak Patrick Melix Patrick Melix Pavel V. Stishenko mazay Pavel V. Stishenko Mazay Pavel V. Stishenko pvst Pavel V. Stishenko pvst Pavel V. Stishenko pvstishenko Pavel V. Stishenko pvst Paul C. Jennings Paul C. Jennings Paul Erhart Paweł T. Jochym Paweł T. Jochym Paweł T. Jochym Pawel T. Jochym Pedro Brandimarte Per Lindgren Per Lindgren Per S. Schmidt psisc Peter Koval Peter KOVAL Peter Koval Petr Koval Peter Koval kovalp Peter Mahler Larsen Peter Larsen Peter Mahler Larsen Peter Larsen Peter Mahler Larsen pmla Peter Mahler Larsen pmla Poul Georg Moses moses prtkm Ralf Meyer Ralf Meyer Rasmus K refreshx2 Robert Warmbier Robert Warmbier Robert Warmbier rowue Robert Warmbier rbw s042606 s052580 Santiago Cingolani schenks t Shim JaeHwan SJH Shim JaeHwan SJH shrx Simon Brodersen sihb Simone Sturniolo Simone Sturniolo Simone Sturniolo Simone Sturniolo Simone Sturniolo Simone Sturniolo Simone Sturniolo Simone Sturniolo Simone Sturniolo Simone Simone Sturniolo stur86 Simone Sturniolo Simone Sturniolo Simone Sturniolo stur86 Simon Rittmeyer Steen Lysgaard Steen Lysgaard Steen Lysgaard stly Steen Lysgaard stly Steen Lysgaard Steen Lysgaard Sten Haastrup Sten Haastrup Tamas K. Stenczel T. K. Stenczel tdd20@cam.ac.uk Thorsten Deilmann ithod Tiziano Müller Tao Jiang tjiang Thomas Olsen tolsen Thorbjørn Skovhus tskovhus Thorbjørn Skovhus Thorbjørn Skovhus Toma Susi Tom Daff Tristan Maxson Tristan G Maxson Tristan Maxson tgmaxson Tristan Maxson tgmaxson Tuomas Rossi Tuomas Rossi Tuomas Rossi ycshao Yingchun Zhang Yinjia Zhang Yunqi Shao Zhenhua Zeng zeng ase-3.22.1/CHANGELOG.rst000066400000000000000000000001521415166253600143770ustar00rootroot00000000000000Changelog ========= See what's new in ASE here: https://wiki.fysik.dtu.dk/ase/releasenotes.html ase-3.22.1/CONTRIBUTING.rst000066400000000000000000000001721415166253600150210ustar00rootroot00000000000000Contributing ============ See how to contribute here: https://wiki.fysik.dtu.dk/ase/development/contribute.html ase-3.22.1/COPYING000066400000000000000000000432541415166253600134230ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. ase-3.22.1/COPYING.LESSER000066400000000000000000000636421415166253600144220ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ase-3.22.1/LICENSE000066400000000000000000000011401415166253600133610ustar00rootroot00000000000000ASE is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 2.1 of the License, or (at your option) any later version. ASE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ASE. If not, see . ase-3.22.1/MANIFEST.in000066400000000000000000000005161415166253600141200ustar00rootroot00000000000000include MANIFEST.in include COPYING* LICENSE README.rst CONTRIBUTING.rst CHANGELOG.rst include bin/ase include ase/spacegroup/spacegroup.dat include ase/collections/*.json include ase/db/static/* include ase/db/templates/* include ase/gui/po/Makefile include ase/gui/po/??_??/LC_MESSAGES/ag.po include ase/gui/po/??/LC_MESSAGES/ag.po ase-3.22.1/README.rst000066400000000000000000000037151415166253600140550ustar00rootroot00000000000000Atomic Simulation Environment ============================= ASE is a set of tools and Python modules for setting up, manipulating, running, visualizing and analyzing atomistic simulations. Webpage: http://wiki.fysik.dtu.dk/ase Requirements ------------ * Python_ 3.6 or later * NumPy_ (base N-dimensional array package) * SciPy_ (library for scientific computing) Optional: * For ASE's GUI: Matplotlib_ (2D Plotting) * tkinter (for ase.gui) * Flask (for ase.db web-interface) Installation ------------ Add ``~/ase`` to your $PYTHONPATH environment variable and add ``~/ase/bin`` to $PATH (assuming ``~/ase`` is where your ASE folder is). Testing ------- Please run the tests:: $ ase test # takes 1 min. and send us the output if there are failing tests. Contact ------- * Mailing list: ase-users_ * IRC_: #ase on freenode.net Please send us bug-reports, patches, code, ideas and questions. Example ------- Geometry optimization of hydrogen molecule with NWChem: >>> from ase import Atoms >>> from ase.optimize import BFGS >>> from ase.calculators.nwchem import NWChem >>> from ase.io import write >>> h2 = Atoms('H2', positions=[[0, 0, 0], [0, 0, 0.7]]) >>> h2.calc = NWChem(xc='PBE') >>> opt = BFGS(h2, trajectory='h2.traj') >>> opt.run(fmax=0.02) BFGS: 0 19:10:49 -31.435229 2.2691 BFGS: 1 19:10:50 -31.490773 0.3740 BFGS: 2 19:10:50 -31.492791 0.0630 BFGS: 3 19:10:51 -31.492848 0.0023 >>> write('H2.xyz', h2) >>> h2.get_potential_energy() # ASE's units are eV and Ang -31.492847800329216 This example requires NWChem to be installed. :: $ ase gui h2.traj .. _Python: http://www.python.org/ .. _NumPy: http://docs.scipy.org/doc/numpy/reference/ .. _SciPy: http://docs.scipy.org/doc/scipy/reference/ .. _Matplotlib: http://matplotlib.org/ .. _ase-users: https://listserv.fysik.dtu.dk/mailman/listinfo/ase-users .. _IRC: http://webchat.freenode.net/?randomnick=0&channels=ase ase-3.22.1/appveyor.yml000066400000000000000000000037001415166253600147500ustar00rootroot00000000000000environment: matrix: # For Python versions available on Appveyor, see # http://www.appveyor.com/docs/installed-software#python # # Python 3.6 #- PYTHON: "C:\\Python36" # Python 3.6 - 64-bit - PYTHON: "C:\\Python36-x64" # # Conda 3.6 #- PYTHON: "C:\\Miniconda36" # # Conda 3.6 64-bit #- PYTHON: "C:\\Miniconda36-x64" install: # Prepend chosen Python to the PATH of this build - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" # Check that we have the expected version and architecture for Python - "python --version" - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" # Install the conda supplied packages if using conda, otherwise use pip # The wheel package is needed for 'pip wheel' # Turn off progressbars '-q' otherwise PowerShell thinks there are errors - "echo %PYTHON%" - ps: | if($env:PYTHON -match "conda") { echo "install with conda" conda update -yq conda conda install -yq pip=21.0.1 wheel numpy scipy pyflakes matplotlib flask pytest pytest-mock } else { echo "install with pip" #pip install --upgrade pip python.exe -m pip install --upgrade pip==21.0.1 pip install wheel pytest --disable-pip-version-check } # install ase into the current python - "echo %cd%" - "where pip" - "pip install .[test] --disable-pip-version-check" build: off test_script: # run tests from temp dir so source tree doesn't interfere - "cd %TEMP%" - "ase info" - "ase -T test" after_test: # This step builds distribution. - "cd %APPVEYOR_BUILD_FOLDER%" - "pip wheel -w dist --no-deps ." artifacts: # bdist_wheel puts your built wheel in the dist directory - path: dist\* #on_success: # You can use this step to upload your artifacts to a public website. # See Appveyor's documentation for more details. Or you can simply # access your wheels from the Appveyor "artifacts" tab for your build. ase-3.22.1/ase/000077500000000000000000000000001415166253600131305ustar00rootroot00000000000000ase-3.22.1/ase/__init__.py000066400000000000000000000007751415166253600152520ustar00rootroot00000000000000# Copyright 2008, 2009 CAMd # (see accompanying license files for details). """Atomic Simulation Environment.""" import sys if sys.version_info[0] == 2: raise ImportError('ASE requires Python3. This is Python2.') __all__ = ['Atoms', 'Atom'] __version__ = '3.22.1' from ase.atom import Atom from ase.atoms import Atoms # import ase.parallel early to avoid circular import problems when # ase.parallel does "from gpaw.mpi import world": import ase.parallel # noqa ase.parallel # silence pyflakes ase-3.22.1/ase/__main__.py000066400000000000000000000000451415166253600152210ustar00rootroot00000000000000from ase.cli.main import main main() ase-3.22.1/ase/atom.py000066400000000000000000000150071415166253600144450ustar00rootroot00000000000000"""This module defines the Atom object.""" import numpy as np from ase.data import atomic_numbers, chemical_symbols, atomic_masses # Singular, plural, default value: names = {'position': ('positions', np.zeros(3)), 'number': ('numbers', 0), 'tag': ('tags', 0), 'momentum': ('momenta', np.zeros(3)), 'mass': ('masses', None), 'magmom': ('initial_magmoms', 0.0), 'charge': ('initial_charges', 0.0)} def atomproperty(name, doc): """Helper function to easily create Atom attribute property.""" def getter(self): return self.get(name) def setter(self, value): self.set(name, value) def deleter(self): self.delete(name) return property(getter, setter, deleter, doc) def abcproperty(index): """Helper function to easily create Atom ABC-property.""" def getter(self): return self.scaled_position[index] def setter(self, value): # We can't just do self.scaled_position[i] = value # because scaled_position is a new buffer, not a view into # something we can write back to. # This is a clear bug! spos = self.scaled_position spos[index] = value self.scaled_position = spos return property(getter, setter, doc='ABC'[index] + '-coordinate') def xyzproperty(index): """Helper function to easily create Atom XYZ-property.""" def getter(self): return self.position[index] def setter(self, value): self.position[index] = value return property(getter, setter, doc='XYZ'[index] + '-coordinate') class Atom: """Class for representing a single atom. Parameters: symbol: str or int Can be a chemical symbol (str) or an atomic number (int). position: sequence of 3 floats Atomic position. tag: int Special purpose tag. momentum: sequence of 3 floats Momentum for atom. mass: float Atomic mass in atomic units. magmom: float or 3 floats Magnetic moment. charge: float Atomic charge. """ __slots__ = ['data', 'atoms', 'index'] def __init__(self, symbol='X', position=(0, 0, 0), tag=None, momentum=None, mass=None, magmom=None, charge=None, atoms=None, index=None): self.data = d = {} if atoms is None: # This atom is not part of any Atoms object: if isinstance(symbol, str): d['number'] = atomic_numbers[symbol] else: d['number'] = symbol d['position'] = np.array(position, float) d['tag'] = tag if momentum is not None: momentum = np.array(momentum, float) d['momentum'] = momentum d['mass'] = mass if magmom is not None: magmom = np.array(magmom, float) d['magmom'] = magmom d['charge'] = charge self.index = index self.atoms = atoms @property def scaled_position(self): pos = self.position spos = self.atoms.cell.scaled_positions(pos[np.newaxis]) return spos[0] @scaled_position.setter def scaled_position(self, value): pos = self.atoms.cell.cartesian_positions(value) self.position = pos def __repr__(self): s = "Atom('%s', %s" % (self.symbol, list(self.position)) for name in ['tag', 'momentum', 'mass', 'magmom', 'charge']: value = self.get_raw(name) if value is not None: if isinstance(value, np.ndarray): value = value.tolist() s += ', %s=%s' % (name, value) if self.atoms is None: s += ')' else: s += ', index=%d)' % self.index return s def cut_reference_to_atoms(self): """Cut reference to atoms object.""" for name in names: self.data[name] = self.get_raw(name) self.index = None self.atoms = None def get_raw(self, name): """Get name attribute, return None if not explicitly set.""" if name == 'symbol': return chemical_symbols[self.get_raw('number')] if self.atoms is None: return self.data[name] plural = names[name][0] if plural in self.atoms.arrays: return self.atoms.arrays[plural][self.index] else: return None def get(self, name): """Get name attribute, return default if not explicitly set.""" value = self.get_raw(name) if value is None: if name == 'mass': value = atomic_masses[self.number] else: value = names[name][1] return value def set(self, name, value): """Set name attribute to value.""" if name == 'symbol': name = 'number' value = atomic_numbers[value] if self.atoms is None: assert name in names self.data[name] = value else: plural, default = names[name] if plural in self.atoms.arrays: array = self.atoms.arrays[plural] if name == 'magmom' and array.ndim == 2: assert len(value) == 3 array[self.index] = value else: if name == 'magmom' and np.asarray(value).ndim == 1: array = np.zeros((len(self.atoms), 3)) elif name == 'mass': array = self.atoms.get_masses() else: default = np.asarray(default) array = np.zeros((len(self.atoms),) + default.shape, default.dtype) array[self.index] = value self.atoms.new_array(plural, array) def delete(self, name): """Delete name attribute.""" assert self.atoms is None assert name not in ['number', 'symbol', 'position'] self.data[name] = None symbol = atomproperty('symbol', 'Chemical symbol') number = atomproperty('number', 'Atomic number') position = atomproperty('position', 'XYZ-coordinates') tag = atomproperty('tag', 'Integer tag') momentum = atomproperty('momentum', 'XYZ-momentum') mass = atomproperty('mass', 'Atomic mass') magmom = atomproperty('magmom', 'Initial magnetic moment') charge = atomproperty('charge', 'Initial atomic charge') x = xyzproperty(0) y = xyzproperty(1) z = xyzproperty(2) a = abcproperty(0) b = abcproperty(1) c = abcproperty(2) ase-3.22.1/ase/atoms.py000066400000000000000000002126641415166253600146400ustar00rootroot00000000000000# Copyright 2008, 2009 CAMd # (see accompanying license files for details). """Definition of the Atoms class. This module defines the central object in the ASE package: the Atoms object. """ import copy import numbers from math import cos, sin, pi import numpy as np import ase.units as units from ase.atom import Atom from ase.cell import Cell from ase.stress import voigt_6_to_full_3x3_stress, full_3x3_to_voigt_6_stress from ase.data import atomic_masses, atomic_masses_common from ase.geometry import (wrap_positions, find_mic, get_angles, get_distances, get_dihedrals) from ase.symbols import Symbols, symbols2numbers from ase.utils import deprecated class Atoms: """Atoms object. The Atoms object can represent an isolated molecule, or a periodically repeated structure. It has a unit cell and there may be periodic boundary conditions along any of the three unit cell axes. Information about the atoms (atomic numbers and position) is stored in ndarrays. Optionally, there can be information about tags, momenta, masses, magnetic moments and charges. In order to calculate energies, forces and stresses, a calculator object has to attached to the atoms object. Parameters: symbols: str (formula) or list of str Can be a string formula, a list of symbols or a list of Atom objects. Examples: 'H2O', 'COPt12', ['H', 'H', 'O'], [Atom('Ne', (x, y, z)), ...]. positions: list of xyz-positions Atomic positions. Anything that can be converted to an ndarray of shape (n, 3) will do: [(x1,y1,z1), (x2,y2,z2), ...]. scaled_positions: list of scaled-positions Like positions, but given in units of the unit cell. Can not be set at the same time as positions. numbers: list of int Atomic numbers (use only one of symbols/numbers). tags: list of int Special purpose tags. momenta: list of xyz-momenta Momenta for all atoms. masses: list of float Atomic masses in atomic units. magmoms: list of float or list of xyz-values Magnetic moments. Can be either a single value for each atom for collinear calculations or three numbers for each atom for non-collinear calculations. charges: list of float Initial atomic charges. cell: 3x3 matrix or length 3 or 6 vector Unit cell vectors. Can also be given as just three numbers for orthorhombic cells, or 6 numbers, where first three are lengths of unit cell vectors, and the other three are angles between them (in degrees), in following order: [len(a), len(b), len(c), angle(b,c), angle(a,c), angle(a,b)]. First vector will lie in x-direction, second in xy-plane, and the third one in z-positive subspace. Default value: [0, 0, 0]. celldisp: Vector Unit cell displacement vector. To visualize a displaced cell around the center of mass of a Systems of atoms. Default value = (0,0,0) pbc: one or three bool Periodic boundary conditions flags. Examples: True, False, 0, 1, (1, 1, 0), (True, False, False). Default value: False. constraint: constraint object(s) Used for applying one or more constraints during structure optimization. calculator: calculator object Used to attach a calculator for calculating energies and atomic forces. info: dict of key-value pairs Dictionary of key-value pairs with additional information about the system. The following keys may be used by ase: - spacegroup: Spacegroup instance - unit_cell: 'conventional' | 'primitive' | int | 3 ints - adsorbate_info: Information about special adsorption sites Items in the info attribute survives copy and slicing and can be stored in and retrieved from trajectory files given that the key is a string, the value is JSON-compatible and, if the value is a user-defined object, its base class is importable. One should not make any assumptions about the existence of keys. Examples: These three are equivalent: >>> d = 1.104 # N2 bondlength >>> a = Atoms('N2', [(0, 0, 0), (0, 0, d)]) >>> a = Atoms(numbers=[7, 7], positions=[(0, 0, 0), (0, 0, d)]) >>> a = Atoms([Atom('N', (0, 0, 0)), Atom('N', (0, 0, d))]) FCC gold: >>> a = 4.05 # Gold lattice constant >>> b = a / 2 >>> fcc = Atoms('Au', ... cell=[(0, b, b), (b, 0, b), (b, b, 0)], ... pbc=True) Hydrogen wire: >>> d = 0.9 # H-H distance >>> h = Atoms('H', positions=[(0, 0, 0)], ... cell=(d, 0, 0), ... pbc=(1, 0, 0)) """ ase_objtype = 'atoms' # For JSONability def __init__(self, symbols=None, positions=None, numbers=None, tags=None, momenta=None, masses=None, magmoms=None, charges=None, scaled_positions=None, cell=None, pbc=None, celldisp=None, constraint=None, calculator=None, info=None, velocities=None): self._cellobj = Cell.new() self._pbc = np.zeros(3, bool) atoms = None if hasattr(symbols, 'get_positions'): atoms = symbols symbols = None elif (isinstance(symbols, (list, tuple)) and len(symbols) > 0 and isinstance(symbols[0], Atom)): # Get data from a list or tuple of Atom objects: data = [[atom.get_raw(name) for atom in symbols] for name in ['position', 'number', 'tag', 'momentum', 'mass', 'magmom', 'charge']] atoms = self.__class__(None, *data) symbols = None if atoms is not None: # Get data from another Atoms object: if scaled_positions is not None: raise NotImplementedError if symbols is None and numbers is None: numbers = atoms.get_atomic_numbers() if positions is None: positions = atoms.get_positions() if tags is None and atoms.has('tags'): tags = atoms.get_tags() if momenta is None and atoms.has('momenta'): momenta = atoms.get_momenta() if magmoms is None and atoms.has('initial_magmoms'): magmoms = atoms.get_initial_magnetic_moments() if masses is None and atoms.has('masses'): masses = atoms.get_masses() if charges is None and atoms.has('initial_charges'): charges = atoms.get_initial_charges() if cell is None: cell = atoms.get_cell() if celldisp is None: celldisp = atoms.get_celldisp() if pbc is None: pbc = atoms.get_pbc() if constraint is None: constraint = [c.copy() for c in atoms.constraints] if calculator is None: calculator = atoms.calc if info is None: info = copy.deepcopy(atoms.info) self.arrays = {} if symbols is None: if numbers is None: if positions is not None: natoms = len(positions) elif scaled_positions is not None: natoms = len(scaled_positions) else: natoms = 0 numbers = np.zeros(natoms, int) self.new_array('numbers', numbers, int) else: if numbers is not None: raise TypeError( 'Use only one of "symbols" and "numbers".') else: self.new_array('numbers', symbols2numbers(symbols), int) if self.numbers.ndim != 1: raise ValueError('"numbers" must be 1-dimensional.') if cell is None: cell = np.zeros((3, 3)) self.set_cell(cell) if celldisp is None: celldisp = np.zeros(shape=(3, 1)) self.set_celldisp(celldisp) if positions is None: if scaled_positions is None: positions = np.zeros((len(self.arrays['numbers']), 3)) else: assert self.cell.rank == 3 positions = np.dot(scaled_positions, self.cell) else: if scaled_positions is not None: raise TypeError( 'Use only one of "symbols" and "numbers".') self.new_array('positions', positions, float, (3,)) self.set_constraint(constraint) self.set_tags(default(tags, 0)) self.set_masses(default(masses, None)) self.set_initial_magnetic_moments(default(magmoms, 0.0)) self.set_initial_charges(default(charges, 0.0)) if pbc is None: pbc = False self.set_pbc(pbc) self.set_momenta(default(momenta, (0.0, 0.0, 0.0)), apply_constraint=False) if velocities is not None: if momenta is None: self.set_velocities(velocities) else: raise TypeError( 'Use only one of "momenta" and "velocities".') if info is None: self.info = {} else: self.info = dict(info) self.calc = calculator @property def symbols(self): """Get chemical symbols as a :class:`ase.symbols.Symbols` object. The object works like ``atoms.numbers`` except its values are strings. It supports in-place editing.""" return Symbols(self.numbers) @symbols.setter def symbols(self, obj): new_symbols = Symbols.fromsymbols(obj) self.numbers[:] = new_symbols.numbers @deprecated(DeprecationWarning('Please use atoms.calc = calc')) def set_calculator(self, calc=None): """Attach calculator object. Please use the equivalent atoms.calc = calc instead of this method.""" self.calc = calc @deprecated(DeprecationWarning('Please use atoms.calc')) def get_calculator(self): """Get currently attached calculator object. Please use the equivalent atoms.calc instead of atoms.get_calculator().""" return self.calc @property def calc(self): """Calculator object.""" return self._calc @calc.setter def calc(self, calc): self._calc = calc if hasattr(calc, 'set_atoms'): calc.set_atoms(self) @calc.deleter # type: ignore @deprecated(DeprecationWarning('Please use atoms.calc = None')) def calc(self): self._calc = None @property # type: ignore @deprecated('Please use atoms.cell.rank instead') def number_of_lattice_vectors(self): """Number of (non-zero) lattice vectors.""" return self.cell.rank def set_constraint(self, constraint=None): """Apply one or more constrains. The *constraint* argument must be one constraint object or a list of constraint objects.""" if constraint is None: self._constraints = [] else: if isinstance(constraint, list): self._constraints = constraint elif isinstance(constraint, tuple): self._constraints = list(constraint) else: self._constraints = [constraint] def _get_constraints(self): return self._constraints def _del_constraints(self): self._constraints = [] constraints = property(_get_constraints, set_constraint, _del_constraints, 'Constraints of the atoms.') def set_cell(self, cell, scale_atoms=False, apply_constraint=True): """Set unit cell vectors. Parameters: cell: 3x3 matrix or length 3 or 6 vector Unit cell. A 3x3 matrix (the three unit cell vectors) or just three numbers for an orthorhombic cell. Another option is 6 numbers, which describes unit cell with lengths of unit cell vectors and with angles between them (in degrees), in following order: [len(a), len(b), len(c), angle(b,c), angle(a,c), angle(a,b)]. First vector will lie in x-direction, second in xy-plane, and the third one in z-positive subspace. scale_atoms: bool Fix atomic positions or move atoms with the unit cell? Default behavior is to *not* move the atoms (scale_atoms=False). apply_constraint: bool Whether to apply constraints to the given cell. Examples: Two equivalent ways to define an orthorhombic cell: >>> atoms = Atoms('He') >>> a, b, c = 7, 7.5, 8 >>> atoms.set_cell([a, b, c]) >>> atoms.set_cell([(a, 0, 0), (0, b, 0), (0, 0, c)]) FCC unit cell: >>> atoms.set_cell([(0, b, b), (b, 0, b), (b, b, 0)]) Hexagonal unit cell: >>> atoms.set_cell([a, a, c, 90, 90, 120]) Rhombohedral unit cell: >>> alpha = 77 >>> atoms.set_cell([a, a, a, alpha, alpha, alpha]) """ # Override pbcs if and only if given a Cell object: cell = Cell.new(cell) # XXX not working well during initialize due to missing _constraints if apply_constraint and hasattr(self, '_constraints'): for constraint in self.constraints: if hasattr(constraint, 'adjust_cell'): constraint.adjust_cell(self, cell) if scale_atoms: M = np.linalg.solve(self.cell.complete(), cell.complete()) self.positions[:] = np.dot(self.positions, M) self.cell[:] = cell def set_celldisp(self, celldisp): """Set the unit cell displacement vectors.""" celldisp = np.array(celldisp, float) self._celldisp = celldisp def get_celldisp(self): """Get the unit cell displacement vectors.""" return self._celldisp.copy() def get_cell(self, complete=False): """Get the three unit cell vectors as a `class`:ase.cell.Cell` object. The Cell object resembles a 3x3 ndarray, and cell[i, j] is the jth Cartesian coordinate of the ith cell vector.""" if complete: cell = self.cell.complete() else: cell = self.cell.copy() return cell @deprecated('Please use atoms.cell.cellpar() instead') def get_cell_lengths_and_angles(self): """Get unit cell parameters. Sequence of 6 numbers. First three are unit cell vector lengths and second three are angles between them:: [len(a), len(b), len(c), angle(b,c), angle(a,c), angle(a,b)] in degrees. """ return self.cell.cellpar() @deprecated('Please use atoms.cell.reciprocal()') def get_reciprocal_cell(self): """Get the three reciprocal lattice vectors as a 3x3 ndarray. Note that the commonly used factor of 2 pi for Fourier transforms is not included here.""" return self.cell.reciprocal() @property def pbc(self): """Reference to pbc-flags for in-place manipulations.""" return self._pbc @pbc.setter def pbc(self, pbc): self._pbc[:] = pbc def set_pbc(self, pbc): """Set periodic boundary condition flags.""" self.pbc = pbc def get_pbc(self): """Get periodic boundary condition flags.""" return self.pbc.copy() def new_array(self, name, a, dtype=None, shape=None): """Add new array. If *shape* is not *None*, the shape of *a* will be checked.""" if dtype is not None: a = np.array(a, dtype, order='C') if len(a) == 0 and shape is not None: a.shape = (-1,) + shape else: if not a.flags['C_CONTIGUOUS']: a = np.ascontiguousarray(a) else: a = a.copy() if name in self.arrays: raise RuntimeError('Array {} already present'.format(name)) for b in self.arrays.values(): if len(a) != len(b): raise ValueError('Array "%s" has wrong length: %d != %d.' % (name, len(a), len(b))) break if shape is not None and a.shape[1:] != shape: raise ValueError('Array "%s" has wrong shape %s != %s.' % (name, a.shape, (a.shape[0:1] + shape))) self.arrays[name] = a def get_array(self, name, copy=True): """Get an array. Returns a copy unless the optional argument copy is false. """ if copy: return self.arrays[name].copy() else: return self.arrays[name] def set_array(self, name, a, dtype=None, shape=None): """Update array. If *shape* is not *None*, the shape of *a* will be checked. If *a* is *None*, then the array is deleted.""" b = self.arrays.get(name) if b is None: if a is not None: self.new_array(name, a, dtype, shape) else: if a is None: del self.arrays[name] else: a = np.asarray(a) if a.shape != b.shape: raise ValueError('Array "%s" has wrong shape %s != %s.' % (name, a.shape, b.shape)) b[:] = a def has(self, name): """Check for existence of array. name must be one of: 'tags', 'momenta', 'masses', 'initial_magmoms', 'initial_charges'.""" # XXX extend has to calculator properties return name in self.arrays def set_atomic_numbers(self, numbers): """Set atomic numbers.""" self.set_array('numbers', numbers, int, ()) def get_atomic_numbers(self): """Get integer array of atomic numbers.""" return self.arrays['numbers'].copy() def get_chemical_symbols(self): """Get list of chemical symbol strings. Equivalent to ``list(atoms.symbols)``.""" return list(self.symbols) def set_chemical_symbols(self, symbols): """Set chemical symbols.""" self.set_array('numbers', symbols2numbers(symbols), int, ()) def get_chemical_formula(self, mode='hill', empirical=False): """Get the chemical formula as a string based on the chemical symbols. Parameters: mode: str There are four different modes available: 'all': The list of chemical symbols are contracted to a string, e.g. ['C', 'H', 'H', 'H', 'O', 'H'] becomes 'CHHHOH'. 'reduce': The same as 'all' where repeated elements are contracted to a single symbol and a number, e.g. 'CHHHOCHHH' is reduced to 'CH3OCH3'. 'hill': The list of chemical symbols are contracted to a string following the Hill notation (alphabetical order with C and H first), e.g. 'CHHHOCHHH' is reduced to 'C2H6O' and 'SOOHOHO' to 'H2O4S'. This is default. 'metal': The list of chemical symbols (alphabetical metals, and alphabetical non-metals) empirical, bool (optional, default=False) Divide the symbol counts by their greatest common divisor to yield an empirical formula. Only for mode `metal` and `hill`. """ return self.symbols.get_chemical_formula(mode, empirical) def set_tags(self, tags): """Set tags for all atoms. If only one tag is supplied, it is applied to all atoms.""" if isinstance(tags, int): tags = [tags] * len(self) self.set_array('tags', tags, int, ()) def get_tags(self): """Get integer array of tags.""" if 'tags' in self.arrays: return self.arrays['tags'].copy() else: return np.zeros(len(self), int) def set_momenta(self, momenta, apply_constraint=True): """Set momenta.""" if (apply_constraint and len(self.constraints) > 0 and momenta is not None): momenta = np.array(momenta) # modify a copy for constraint in self.constraints: if hasattr(constraint, 'adjust_momenta'): constraint.adjust_momenta(self, momenta) self.set_array('momenta', momenta, float, (3,)) def set_velocities(self, velocities): """Set the momenta by specifying the velocities.""" self.set_momenta(self.get_masses()[:, np.newaxis] * velocities) def get_momenta(self): """Get array of momenta.""" if 'momenta' in self.arrays: return self.arrays['momenta'].copy() else: return np.zeros((len(self), 3)) def set_masses(self, masses='defaults'): """Set atomic masses in atomic mass units. The array masses should contain a list of masses. In case the masses argument is not given or for those elements of the masses list that are None, standard values are set.""" if isinstance(masses, str): if masses == 'defaults': masses = atomic_masses[self.arrays['numbers']] elif masses == 'most_common': masses = atomic_masses_common[self.arrays['numbers']] elif masses is None: pass elif not isinstance(masses, np.ndarray): masses = list(masses) for i, mass in enumerate(masses): if mass is None: masses[i] = atomic_masses[self.numbers[i]] self.set_array('masses', masses, float, ()) def get_masses(self): """Get array of masses in atomic mass units.""" if 'masses' in self.arrays: return self.arrays['masses'].copy() else: return atomic_masses[self.arrays['numbers']] def set_initial_magnetic_moments(self, magmoms=None): """Set the initial magnetic moments. Use either one or three numbers for every atom (collinear or non-collinear spins).""" if magmoms is None: self.set_array('initial_magmoms', None) else: magmoms = np.asarray(magmoms) self.set_array('initial_magmoms', magmoms, float, magmoms.shape[1:]) def get_initial_magnetic_moments(self): """Get array of initial magnetic moments.""" if 'initial_magmoms' in self.arrays: return self.arrays['initial_magmoms'].copy() else: return np.zeros(len(self)) def get_magnetic_moments(self): """Get calculated local magnetic moments.""" if self._calc is None: raise RuntimeError('Atoms object has no calculator.') return self._calc.get_magnetic_moments(self) def get_magnetic_moment(self): """Get calculated total magnetic moment.""" if self._calc is None: raise RuntimeError('Atoms object has no calculator.') return self._calc.get_magnetic_moment(self) def set_initial_charges(self, charges=None): """Set the initial charges.""" if charges is None: self.set_array('initial_charges', None) else: self.set_array('initial_charges', charges, float, ()) def get_initial_charges(self): """Get array of initial charges.""" if 'initial_charges' in self.arrays: return self.arrays['initial_charges'].copy() else: return np.zeros(len(self)) def get_charges(self): """Get calculated charges.""" if self._calc is None: raise RuntimeError('Atoms object has no calculator.') try: return self._calc.get_charges(self) except AttributeError: from ase.calculators.calculator import PropertyNotImplementedError raise PropertyNotImplementedError def set_positions(self, newpositions, apply_constraint=True): """Set positions, honoring any constraints. To ignore constraints, use *apply_constraint=False*.""" if self.constraints and apply_constraint: newpositions = np.array(newpositions, float) for constraint in self.constraints: constraint.adjust_positions(self, newpositions) self.set_array('positions', newpositions, shape=(3,)) def get_positions(self, wrap=False, **wrap_kw): """Get array of positions. Parameters: wrap: bool wrap atoms back to the cell before returning positions wrap_kw: (keyword=value) pairs optional keywords `pbc`, `center`, `pretty_translation`, `eps`, see :func:`ase.geometry.wrap_positions` """ if wrap: if 'pbc' not in wrap_kw: wrap_kw['pbc'] = self.pbc return wrap_positions(self.positions, self.cell, **wrap_kw) else: return self.arrays['positions'].copy() def get_potential_energy(self, force_consistent=False, apply_constraint=True): """Calculate potential energy. Ask the attached calculator to calculate the potential energy and apply constraints. Use *apply_constraint=False* to get the raw forces. When supported by the calculator, either the energy extrapolated to zero Kelvin or the energy consistent with the forces (the free energy) can be returned. """ if self._calc is None: raise RuntimeError('Atoms object has no calculator.') if force_consistent: energy = self._calc.get_potential_energy( self, force_consistent=force_consistent) else: energy = self._calc.get_potential_energy(self) if apply_constraint: for constraint in self.constraints: if hasattr(constraint, 'adjust_potential_energy'): energy += constraint.adjust_potential_energy(self) return energy def get_properties(self, properties): """This method is experimental; currently for internal use.""" # XXX Something about constraints. if self._calc is None: raise RuntimeError('Atoms object has no calculator.') return self._calc.calculate_properties(self, properties) def get_potential_energies(self): """Calculate the potential energies of all the atoms. Only available with calculators supporting per-atom energies (e.g. classical potentials). """ if self._calc is None: raise RuntimeError('Atoms object has no calculator.') return self._calc.get_potential_energies(self) def get_kinetic_energy(self): """Get the kinetic energy.""" momenta = self.arrays.get('momenta') if momenta is None: return 0.0 return 0.5 * np.vdot(momenta, self.get_velocities()) def get_velocities(self): """Get array of velocities.""" momenta = self.get_momenta() masses = self.get_masses() return momenta / masses[:, np.newaxis] def get_total_energy(self): """Get the total energy - potential plus kinetic energy.""" return self.get_potential_energy() + self.get_kinetic_energy() def get_forces(self, apply_constraint=True, md=False): """Calculate atomic forces. Ask the attached calculator to calculate the forces and apply constraints. Use *apply_constraint=False* to get the raw forces. For molecular dynamics (md=True) we don't apply the constraint to the forces but to the momenta. When holonomic constraints for rigid linear triatomic molecules are present, ask the constraints to redistribute the forces within each triple defined in the constraints (required for molecular dynamics with this type of constraints).""" if self._calc is None: raise RuntimeError('Atoms object has no calculator.') forces = self._calc.get_forces(self) if apply_constraint: # We need a special md flag here because for MD we want # to skip real constraints but include special "constraints" # Like Hookean. for constraint in self.constraints: if md and hasattr(constraint, 'redistribute_forces_md'): constraint.redistribute_forces_md(self, forces) if not md or hasattr(constraint, 'adjust_potential_energy'): constraint.adjust_forces(self, forces) return forces # Informs calculators (e.g. Asap) that ideal gas contribution is added here. _ase_handles_dynamic_stress = True def get_stress(self, voigt=True, apply_constraint=True, include_ideal_gas=False): """Calculate stress tensor. Returns an array of the six independent components of the symmetric stress tensor, in the traditional Voigt order (xx, yy, zz, yz, xz, xy) or as a 3x3 matrix. Default is Voigt order. The ideal gas contribution to the stresses is added if the atoms have momenta and ``include_ideal_gas`` is set to True. """ if self._calc is None: raise RuntimeError('Atoms object has no calculator.') stress = self._calc.get_stress(self) shape = stress.shape if shape == (3, 3): # Convert to the Voigt form before possibly applying # constraints and adding the dynamic part of the stress # (the "ideal gas contribution"). stress = full_3x3_to_voigt_6_stress(stress) else: assert shape == (6,) if apply_constraint: for constraint in self.constraints: if hasattr(constraint, 'adjust_stress'): constraint.adjust_stress(self, stress) # Add ideal gas contribution, if applicable if include_ideal_gas and self.has('momenta'): stresscomp = np.array([[0, 5, 4], [5, 1, 3], [4, 3, 2]]) p = self.get_momenta() masses = self.get_masses() invmass = 1.0 / masses invvol = 1.0 / self.get_volume() for alpha in range(3): for beta in range(alpha, 3): stress[stresscomp[alpha, beta]] -= ( p[:, alpha] * p[:, beta] * invmass).sum() * invvol if voigt: return stress else: return voigt_6_to_full_3x3_stress(stress) def get_stresses(self, include_ideal_gas=False, voigt=True): """Calculate the stress-tensor of all the atoms. Only available with calculators supporting per-atom energies and stresses (e.g. classical potentials). Even for such calculators there is a certain arbitrariness in defining per-atom stresses. The ideal gas contribution to the stresses is added if the atoms have momenta and ``include_ideal_gas`` is set to True. """ if self._calc is None: raise RuntimeError('Atoms object has no calculator.') stresses = self._calc.get_stresses(self) # make sure `stresses` are in voigt form if np.shape(stresses)[1:] == (3, 3): stresses_voigt = [full_3x3_to_voigt_6_stress(s) for s in stresses] stresses = np.array(stresses_voigt) # REMARK: The ideal gas contribution is intensive, i.e., the volume # is divided out. We currently don't check if `stresses` are intensive # as well, i.e., if `a.get_stresses.sum(axis=0) == a.get_stress()`. # It might be good to check this here, but adds computational overhead. if include_ideal_gas and self.has('momenta'): stresscomp = np.array([[0, 5, 4], [5, 1, 3], [4, 3, 2]]) if hasattr(self._calc, 'get_atomic_volumes'): invvol = 1.0 / self._calc.get_atomic_volumes() else: invvol = self.get_global_number_of_atoms() / self.get_volume() p = self.get_momenta() invmass = 1.0 / self.get_masses() for alpha in range(3): for beta in range(alpha, 3): stresses[:, stresscomp[alpha, beta]] -= ( p[:, alpha] * p[:, beta] * invmass * invvol) if voigt: return stresses else: stresses_3x3 = [voigt_6_to_full_3x3_stress(s) for s in stresses] return np.array(stresses_3x3) def get_dipole_moment(self): """Calculate the electric dipole moment for the atoms object. Only available for calculators which has a get_dipole_moment() method.""" if self._calc is None: raise RuntimeError('Atoms object has no calculator.') return self._calc.get_dipole_moment(self) def copy(self): """Return a copy.""" atoms = self.__class__(cell=self.cell, pbc=self.pbc, info=self.info, celldisp=self._celldisp.copy()) atoms.arrays = {} for name, a in self.arrays.items(): atoms.arrays[name] = a.copy() atoms.constraints = copy.deepcopy(self.constraints) return atoms def todict(self): """For basic JSON (non-database) support.""" d = dict(self.arrays) d['cell'] = np.asarray(self.cell) d['pbc'] = self.pbc if self._celldisp.any(): d['celldisp'] = self._celldisp if self.constraints: d['constraints'] = self.constraints if self.info: d['info'] = self.info # Calculator... trouble. return d @classmethod def fromdict(cls, dct): """Rebuild atoms object from dictionary representation (todict).""" dct = dct.copy() kw = {} for name in ['numbers', 'positions', 'cell', 'pbc']: kw[name] = dct.pop(name) constraints = dct.pop('constraints', None) if constraints: from ase.constraints import dict2constraint constraints = [dict2constraint(d) for d in constraints] info = dct.pop('info', None) atoms = cls(constraint=constraints, celldisp=dct.pop('celldisp', None), info=info, **kw) natoms = len(atoms) # Some arrays are named differently from the atoms __init__ keywords. # Also, there may be custom arrays. Hence we set them directly: for name, arr in dct.items(): assert len(arr) == natoms, name assert isinstance(arr, np.ndarray) atoms.arrays[name] = arr return atoms def __len__(self): return len(self.arrays['positions']) def get_number_of_atoms(self): """Deprecated, please do not use. You probably want len(atoms). Or if your atoms are distributed, use (and see) get_global_number_of_atoms().""" import warnings warnings.warn('Use get_global_number_of_atoms() instead', np.VisibleDeprecationWarning) return len(self) def get_global_number_of_atoms(self): """Returns the global number of atoms in a distributed-atoms parallel simulation. DO NOT USE UNLESS YOU KNOW WHAT YOU ARE DOING! Equivalent to len(atoms) in the standard ASE Atoms class. You should normally use len(atoms) instead. This function's only purpose is to make compatibility between ASE and Asap easier to maintain by having a few places in ASE use this function instead. It is typically only when counting the global number of degrees of freedom or in similar situations. """ return len(self) def __repr__(self): tokens = [] N = len(self) if N <= 60: symbols = self.get_chemical_formula('reduce') else: symbols = self.get_chemical_formula('hill') tokens.append("symbols='{0}'".format(symbols)) if self.pbc.any() and not self.pbc.all(): tokens.append('pbc={0}'.format(self.pbc.tolist())) else: tokens.append('pbc={0}'.format(self.pbc[0])) cell = self.cell if cell: if cell.orthorhombic: cell = cell.lengths().tolist() else: cell = cell.tolist() tokens.append('cell={0}'.format(cell)) for name in sorted(self.arrays): if name in ['numbers', 'positions']: continue tokens.append('{0}=...'.format(name)) if self.constraints: if len(self.constraints) == 1: constraint = self.constraints[0] else: constraint = self.constraints tokens.append('constraint={0}'.format(repr(constraint))) if self._calc is not None: tokens.append('calculator={0}(...)' .format(self._calc.__class__.__name__)) return '{0}({1})'.format(self.__class__.__name__, ', '.join(tokens)) def __add__(self, other): atoms = self.copy() atoms += other return atoms def extend(self, other): """Extend atoms object by appending atoms from *other*.""" if isinstance(other, Atom): other = self.__class__([other]) n1 = len(self) n2 = len(other) for name, a1 in self.arrays.items(): a = np.zeros((n1 + n2,) + a1.shape[1:], a1.dtype) a[:n1] = a1 if name == 'masses': a2 = other.get_masses() else: a2 = other.arrays.get(name) if a2 is not None: a[n1:] = a2 self.arrays[name] = a for name, a2 in other.arrays.items(): if name in self.arrays: continue a = np.empty((n1 + n2,) + a2.shape[1:], a2.dtype) a[n1:] = a2 if name == 'masses': a[:n1] = self.get_masses()[:n1] else: a[:n1] = 0 self.set_array(name, a) def __iadd__(self, other): self.extend(other) return self def append(self, atom): """Append atom to end.""" self.extend(self.__class__([atom])) def __iter__(self): for i in range(len(self)): yield self[i] def __getitem__(self, i): """Return a subset of the atoms. i -- scalar integer, list of integers, or slice object describing which atoms to return. If i is a scalar, return an Atom object. If i is a list or a slice, return an Atoms object with the same cell, pbc, and other associated info as the original Atoms object. The indices of the constraints will be shuffled so that they match the indexing in the subset returned. """ if isinstance(i, numbers.Integral): natoms = len(self) if i < -natoms or i >= natoms: raise IndexError('Index out of range.') return Atom(atoms=self, index=i) elif not isinstance(i, slice): i = np.array(i) # if i is a mask if i.dtype == bool: if len(i) != len(self): raise IndexError('Length of mask {} must equal ' 'number of atoms {}' .format(len(i), len(self))) i = np.arange(len(self))[i] import copy conadd = [] # Constraints need to be deepcopied, but only the relevant ones. for con in copy.deepcopy(self.constraints): try: con.index_shuffle(self, i) except (IndexError, NotImplementedError): pass else: conadd.append(con) atoms = self.__class__(cell=self.cell, pbc=self.pbc, info=self.info, # should be communicated to the slice as well celldisp=self._celldisp) # TODO: Do we need to shuffle indices in adsorbate_info too? atoms.arrays = {} for name, a in self.arrays.items(): atoms.arrays[name] = a[i].copy() atoms.constraints = conadd return atoms def __delitem__(self, i): from ase.constraints import FixAtoms for c in self._constraints: if not isinstance(c, FixAtoms): raise RuntimeError('Remove constraint using set_constraint() ' 'before deleting atoms.') if isinstance(i, list) and len(i) > 0: # Make sure a list of booleans will work correctly and not be # interpreted at 0 and 1 indices. i = np.array(i) if len(self._constraints) > 0: n = len(self) i = np.arange(n)[i] if isinstance(i, int): i = [i] constraints = [] for c in self._constraints: c = c.delete_atoms(i, n) if c is not None: constraints.append(c) self.constraints = constraints mask = np.ones(len(self), bool) mask[i] = False for name, a in self.arrays.items(): self.arrays[name] = a[mask] def pop(self, i=-1): """Remove and return atom at index *i* (default last).""" atom = self[i] atom.cut_reference_to_atoms() del self[i] return atom def __imul__(self, m): """In-place repeat of atoms.""" if isinstance(m, int): m = (m, m, m) for x, vec in zip(m, self.cell): if x != 1 and not vec.any(): raise ValueError('Cannot repeat along undefined lattice ' 'vector') M = np.product(m) n = len(self) for name, a in self.arrays.items(): self.arrays[name] = np.tile(a, (M,) + (1,) * (len(a.shape) - 1)) positions = self.arrays['positions'] i0 = 0 for m0 in range(m[0]): for m1 in range(m[1]): for m2 in range(m[2]): i1 = i0 + n positions[i0:i1] += np.dot((m0, m1, m2), self.cell) i0 = i1 if self.constraints is not None: self.constraints = [c.repeat(m, n) for c in self.constraints] self.cell = np.array([m[c] * self.cell[c] for c in range(3)]) return self def repeat(self, rep): """Create new repeated atoms object. The *rep* argument should be a sequence of three positive integers like *(2,3,1)* or a single integer (*r*) equivalent to *(r,r,r)*.""" atoms = self.copy() atoms *= rep return atoms def __mul__(self, rep): return self.repeat(rep) def translate(self, displacement): """Translate atomic positions. The displacement argument can be a float an xyz vector or an nx3 array (where n is the number of atoms).""" self.arrays['positions'] += np.array(displacement) def center(self, vacuum=None, axis=(0, 1, 2), about=None): """Center atoms in unit cell. Centers the atoms in the unit cell, so there is the same amount of vacuum on all sides. vacuum: float (default: None) If specified adjust the amount of vacuum when centering. If vacuum=10.0 there will thus be 10 Angstrom of vacuum on each side. axis: int or sequence of ints Axis or axes to act on. Default: Act on all axes. about: float or array (default: None) If specified, center the atoms about . I.e., about=(0., 0., 0.) (or just "about=0.", interpreted identically), to center about the origin. """ # Find the orientations of the faces of the unit cell cell = self.cell.complete() dirs = np.zeros_like(cell) lengths = cell.lengths() for i in range(3): dirs[i] = np.cross(cell[i - 1], cell[i - 2]) dirs[i] /= np.linalg.norm(dirs[i]) if dirs[i] @ cell[i] < 0.0: dirs[i] *= -1 if isinstance(axis, int): axes = (axis,) else: axes = axis # Now, decide how much each basis vector should be made longer pos = self.positions longer = np.zeros(3) shift = np.zeros(3) for i in axes: if len(pos): scalarprod = pos @ dirs[i] p0 = scalarprod.min() p1 = scalarprod.max() else: p0 = 0 p1 = 0 height = cell[i] @ dirs[i] if vacuum is not None: lng = (p1 - p0 + 2 * vacuum) - height else: lng = 0.0 # Do not change unit cell size! top = lng + height - p1 shf = 0.5 * (top - p0) cosphi = cell[i] @ dirs[i] / lengths[i] longer[i] = lng / cosphi shift[i] = shf / cosphi # Now, do it! translation = np.zeros(3) for i in axes: nowlen = lengths[i] if vacuum is not None: self.cell[i] = cell[i] * (1 + longer[i] / nowlen) translation += shift[i] * cell[i] / nowlen # We calculated translations using the completed cell, # so directions without cell vectors will have been centered # along a "fake" vector of length 1. # Therefore, we adjust by -0.5: if not any(self.cell[i]): translation[i] -= 0.5 # Optionally, translate to center about a point in space. if about is not None: for vector in self.cell: translation -= vector / 2.0 translation += about self.positions += translation def get_center_of_mass(self, scaled=False): """Get the center of mass. If scaled=True the center of mass in scaled coordinates is returned.""" masses = self.get_masses() com = masses @ self.positions / masses.sum() if scaled: return self.cell.scaled_positions(com) else: return com def set_center_of_mass(self, com, scaled=False): """Set the center of mass. If scaled=True the center of mass is expected in scaled coordinates. Constraints are considered for scaled=False. """ old_com = self.get_center_of_mass(scaled=scaled) difference = old_com - com if scaled: self.set_scaled_positions(self.get_scaled_positions() + difference) else: self.set_positions(self.get_positions() + difference) def get_moments_of_inertia(self, vectors=False): """Get the moments of inertia along the principal axes. The three principal moments of inertia are computed from the eigenvalues of the symmetric inertial tensor. Periodic boundary conditions are ignored. Units of the moments of inertia are amu*angstrom**2. """ com = self.get_center_of_mass() positions = self.get_positions() positions -= com # translate center of mass to origin masses = self.get_masses() # Initialize elements of the inertial tensor I11 = I22 = I33 = I12 = I13 = I23 = 0.0 for i in range(len(self)): x, y, z = positions[i] m = masses[i] I11 += m * (y ** 2 + z ** 2) I22 += m * (x ** 2 + z ** 2) I33 += m * (x ** 2 + y ** 2) I12 += -m * x * y I13 += -m * x * z I23 += -m * y * z I = np.array([[I11, I12, I13], [I12, I22, I23], [I13, I23, I33]]) evals, evecs = np.linalg.eigh(I) if vectors: return evals, evecs.transpose() else: return evals def get_angular_momentum(self): """Get total angular momentum with respect to the center of mass.""" com = self.get_center_of_mass() positions = self.get_positions() positions -= com # translate center of mass to origin return np.cross(positions, self.get_momenta()).sum(0) def rotate(self, a, v, center=(0, 0, 0), rotate_cell=False): """Rotate atoms based on a vector and an angle, or two vectors. Parameters: a = None: Angle that the atoms is rotated around the vector 'v'. 'a' can also be a vector and then 'a' is rotated into 'v'. v: Vector to rotate the atoms around. Vectors can be given as strings: 'x', '-x', 'y', ... . center = (0, 0, 0): The center is kept fixed under the rotation. Use 'COM' to fix the center of mass, 'COP' to fix the center of positions or 'COU' to fix the center of cell. rotate_cell = False: If true the cell is also rotated. Examples: Rotate 90 degrees around the z-axis, so that the x-axis is rotated into the y-axis: >>> atoms = Atoms() >>> atoms.rotate(90, 'z') >>> atoms.rotate(90, (0, 0, 1)) >>> atoms.rotate(-90, '-z') >>> atoms.rotate('x', 'y') >>> atoms.rotate((1, 0, 0), (0, 1, 0)) """ if not isinstance(a, numbers.Real): a, v = v, a norm = np.linalg.norm v = string2vector(v) normv = norm(v) if normv == 0.0: raise ZeroDivisionError('Cannot rotate: norm(v) == 0') if isinstance(a, numbers.Real): a *= pi / 180 v /= normv c = cos(a) s = sin(a) else: v2 = string2vector(a) v /= normv normv2 = np.linalg.norm(v2) if normv2 == 0: raise ZeroDivisionError('Cannot rotate: norm(a) == 0') v2 /= norm(v2) c = np.dot(v, v2) v = np.cross(v, v2) s = norm(v) # In case *v* and *a* are parallel, np.cross(v, v2) vanish # and can't be used as a rotation axis. However, in this # case any rotation axis perpendicular to v2 will do. eps = 1e-7 if s < eps: v = np.cross((0, 0, 1), v2) if norm(v) < eps: v = np.cross((1, 0, 0), v2) assert norm(v) >= eps elif s > 0: v /= s center = self._centering_as_array(center) p = self.arrays['positions'] - center self.arrays['positions'][:] = (c * p - np.cross(p, s * v) + np.outer(np.dot(p, v), (1.0 - c) * v) + center) if rotate_cell: rotcell = self.get_cell() rotcell[:] = (c * rotcell - np.cross(rotcell, s * v) + np.outer(np.dot(rotcell, v), (1.0 - c) * v)) self.set_cell(rotcell) def _centering_as_array(self, center): if isinstance(center, str): if center.lower() == 'com': center = self.get_center_of_mass() elif center.lower() == 'cop': center = self.get_positions().mean(axis=0) elif center.lower() == 'cou': center = self.get_cell().sum(axis=0) / 2 else: raise ValueError('Cannot interpret center') else: center = np.array(center, float) return center def euler_rotate(self, phi=0.0, theta=0.0, psi=0.0, center=(0, 0, 0)): """Rotate atoms via Euler angles (in degrees). See e.g http://mathworld.wolfram.com/EulerAngles.html for explanation. Parameters: center : The point to rotate about. A sequence of length 3 with the coordinates, or 'COM' to select the center of mass, 'COP' to select center of positions or 'COU' to select center of cell. phi : The 1st rotation angle around the z axis. theta : Rotation around the x axis. psi : 2nd rotation around the z axis. """ center = self._centering_as_array(center) phi *= pi / 180 theta *= pi / 180 psi *= pi / 180 # First move the molecule to the origin In contrast to MATLAB, # numpy broadcasts the smaller array to the larger row-wise, # so there is no need to play with the Kronecker product. rcoords = self.positions - center # First Euler rotation about z in matrix form D = np.array(((cos(phi), sin(phi), 0.), (-sin(phi), cos(phi), 0.), (0., 0., 1.))) # Second Euler rotation about x: C = np.array(((1., 0., 0.), (0., cos(theta), sin(theta)), (0., -sin(theta), cos(theta)))) # Third Euler rotation, 2nd rotation about z: B = np.array(((cos(psi), sin(psi), 0.), (-sin(psi), cos(psi), 0.), (0., 0., 1.))) # Total Euler rotation A = np.dot(B, np.dot(C, D)) # Do the rotation rcoords = np.dot(A, np.transpose(rcoords)) # Move back to the rotation point self.positions = np.transpose(rcoords) + center def get_dihedral(self, a0, a1, a2, a3, mic=False): """Calculate dihedral angle. Calculate dihedral angle (in degrees) between the vectors a0->a1 and a2->a3. Use mic=True to use the Minimum Image Convention and calculate the angle across periodic boundaries. """ return self.get_dihedrals([[a0, a1, a2, a3]], mic=mic)[0] def get_dihedrals(self, indices, mic=False): """Calculate dihedral angles. Calculate dihedral angles (in degrees) between the list of vectors a0->a1 and a2->a3, where a0, a1, a2 and a3 are in each row of indices. Use mic=True to use the Minimum Image Convention and calculate the angles across periodic boundaries. """ indices = np.array(indices) assert indices.shape[1] == 4 a0s = self.positions[indices[:, 0]] a1s = self.positions[indices[:, 1]] a2s = self.positions[indices[:, 2]] a3s = self.positions[indices[:, 3]] # vectors 0->1, 1->2, 2->3 v0 = a1s - a0s v1 = a2s - a1s v2 = a3s - a2s cell = None pbc = None if mic: cell = self.cell pbc = self.pbc return get_dihedrals(v0, v1, v2, cell=cell, pbc=pbc) def _masked_rotate(self, center, axis, diff, mask): # do rotation of subgroup by copying it to temporary atoms object # and then rotating that # # recursive object definition might not be the most elegant thing, # more generally useful might be a rotation function with a mask? group = self.__class__() for i in range(len(self)): if mask[i]: group += self[i] group.translate(-center) group.rotate(diff * 180 / pi, axis) group.translate(center) # set positions in original atoms object j = 0 for i in range(len(self)): if mask[i]: self.positions[i] = group[j].position j += 1 def set_dihedral(self, a1, a2, a3, a4, angle, mask=None, indices=None): """Set the dihedral angle (degrees) between vectors a1->a2 and a3->a4 by changing the atom indexed by a4. If mask is not None, all the atoms described in mask (read: the entire subgroup) are moved. Alternatively to the mask, the indices of the atoms to be rotated can be supplied. If both *mask* and *indices* are given, *indices* overwrites *mask*. **Important**: If *mask* or *indices* is given and does not contain *a4*, *a4* will NOT be moved. In most cases you therefore want to include *a4* in *mask*/*indices*. Example: the following defines a very crude ethane-like molecule and twists one half of it by 30 degrees. >>> atoms = Atoms('HHCCHH', [[-1, 1, 0], [-1, -1, 0], [0, 0, 0], ... [1, 0, 0], [2, 1, 0], [2, -1, 0]]) >>> atoms.set_dihedral(1, 2, 3, 4, 210, mask=[0, 0, 0, 1, 1, 1]) """ angle *= pi / 180 # if not provided, set mask to the last atom in the # dihedral description if mask is None and indices is None: mask = np.zeros(len(self)) mask[a4] = 1 elif indices is not None: mask = [index in indices for index in range(len(self))] # compute necessary in dihedral change, from current value current = self.get_dihedral(a1, a2, a3, a4) * pi / 180 diff = angle - current axis = self.positions[a3] - self.positions[a2] center = self.positions[a3] self._masked_rotate(center, axis, diff, mask) def rotate_dihedral(self, a1, a2, a3, a4, angle=None, mask=None, indices=None): """Rotate dihedral angle. Same usage as in :meth:`ase.Atoms.set_dihedral`: Rotate a group by a predefined dihedral angle, starting from its current configuration. """ start = self.get_dihedral(a1, a2, a3, a4) self.set_dihedral(a1, a2, a3, a4, angle + start, mask, indices) def get_angle(self, a1, a2, a3, mic=False): """Get angle formed by three atoms. Calculate angle in degrees between the vectors a2->a1 and a2->a3. Use mic=True to use the Minimum Image Convention and calculate the angle across periodic boundaries. """ return self.get_angles([[a1, a2, a3]], mic=mic)[0] def get_angles(self, indices, mic=False): """Get angle formed by three atoms for multiple groupings. Calculate angle in degrees between vectors between atoms a2->a1 and a2->a3, where a1, a2, and a3 are in each row of indices. Use mic=True to use the Minimum Image Convention and calculate the angle across periodic boundaries. """ indices = np.array(indices) assert indices.shape[1] == 3 a1s = self.positions[indices[:, 0]] a2s = self.positions[indices[:, 1]] a3s = self.positions[indices[:, 2]] v12 = a1s - a2s v32 = a3s - a2s cell = None pbc = None if mic: cell = self.cell pbc = self.pbc return get_angles(v12, v32, cell=cell, pbc=pbc) def set_angle(self, a1, a2=None, a3=None, angle=None, mask=None, indices=None, add=False): """Set angle (in degrees) formed by three atoms. Sets the angle between vectors *a2*->*a1* and *a2*->*a3*. If *add* is `True`, the angle will be changed by the value given. Same usage as in :meth:`ase.Atoms.set_dihedral`. If *mask* and *indices* are given, *indices* overwrites *mask*. If *mask* and *indices* are not set, only *a3* is moved.""" if any(a is None for a in [a2, a3, angle]): raise ValueError('a2, a3, and angle must not be None') # If not provided, set mask to the last atom in the angle description if mask is None and indices is None: mask = np.zeros(len(self)) mask[a3] = 1 elif indices is not None: mask = [index in indices for index in range(len(self))] if add: diff = angle else: # Compute necessary in angle change, from current value diff = angle - self.get_angle(a1, a2, a3) diff *= pi / 180 # Do rotation of subgroup by copying it to temporary atoms object and # then rotating that v10 = self.positions[a1] - self.positions[a2] v12 = self.positions[a3] - self.positions[a2] v10 /= np.linalg.norm(v10) v12 /= np.linalg.norm(v12) axis = np.cross(v10, v12) center = self.positions[a2] self._masked_rotate(center, axis, diff, mask) def rattle(self, stdev=0.001, seed=None, rng=None): """Randomly displace atoms. This method adds random displacements to the atomic positions, taking a possible constraint into account. The random numbers are drawn from a normal distribution of standard deviation stdev. For a parallel calculation, it is important to use the same seed on all processors! """ if seed is not None and rng is not None: raise ValueError('Please do not provide both seed and rng.') if rng is None: if seed is None: seed = 42 rng = np.random.RandomState(seed) positions = self.arrays['positions'] self.set_positions(positions + rng.normal(scale=stdev, size=positions.shape)) def get_distance(self, a0, a1, mic=False, vector=False): """Return distance between two atoms. Use mic=True to use the Minimum Image Convention. vector=True gives the distance vector (from a0 to a1). """ return self.get_distances(a0, [a1], mic=mic, vector=vector)[0] def get_distances(self, a, indices, mic=False, vector=False): """Return distances of atom No.i with a list of atoms. Use mic=True to use the Minimum Image Convention. vector=True gives the distance vector (from a to self[indices]). """ R = self.arrays['positions'] p1 = [R[a]] p2 = R[indices] cell = None pbc = None if mic: cell = self.cell pbc = self.pbc D, D_len = get_distances(p1, p2, cell=cell, pbc=pbc) if vector: D.shape = (-1, 3) return D else: D_len.shape = (-1,) return D_len def get_all_distances(self, mic=False, vector=False): """Return distances of all of the atoms with all of the atoms. Use mic=True to use the Minimum Image Convention. """ R = self.arrays['positions'] cell = None pbc = None if mic: cell = self.cell pbc = self.pbc D, D_len = get_distances(R, cell=cell, pbc=pbc) if vector: return D else: return D_len def set_distance(self, a0, a1, distance, fix=0.5, mic=False, mask=None, indices=None, add=False, factor=False): """Set the distance between two atoms. Set the distance between atoms *a0* and *a1* to *distance*. By default, the center of the two atoms will be fixed. Use *fix=0* to fix the first atom, *fix=1* to fix the second atom and *fix=0.5* (default) to fix the center of the bond. If *mask* or *indices* are set (*mask* overwrites *indices*), only the atoms defined there are moved (see :meth:`ase.Atoms.set_dihedral`). When *add* is true, the distance is changed by the value given. In combination with *factor* True, the value given is a factor scaling the distance. It is assumed that the atoms in *mask*/*indices* move together with *a1*. If *fix=1*, only *a0* will therefore be moved.""" if a0 % len(self) == a1 % len(self): raise ValueError('a0 and a1 must not be the same') if add: oldDist = self.get_distance(a0, a1, mic=mic) if factor: newDist = oldDist * distance else: newDist = oldDist + distance self.set_distance(a0, a1, newDist, fix=fix, mic=mic, mask=mask, indices=indices, add=False, factor=False) return R = self.arrays['positions'] D = np.array([R[a1] - R[a0]]) if mic: D, D_len = find_mic(D, self.cell, self.pbc) else: D_len = np.array([np.sqrt((D**2).sum())]) x = 1.0 - distance / D_len[0] if mask is None and indices is None: indices = [a0, a1] elif mask: indices = [i for i in range(len(self)) if mask[i]] for i in indices: if i == a0: R[a0] += (x * fix) * D[0] else: R[i] -= (x * (1.0 - fix)) * D[0] def get_scaled_positions(self, wrap=True): """Get positions relative to unit cell. If wrap is True, atoms outside the unit cell will be wrapped into the cell in those directions with periodic boundary conditions so that the scaled coordinates are between zero and one. If any cell vectors are zero, the corresponding coordinates are evaluated as if the cell were completed using ``cell.complete()``. This means coordinates will be Cartesian as long as the non-zero cell vectors span a Cartesian axis or plane.""" fractional = self.cell.scaled_positions(self.positions) if wrap: for i, periodic in enumerate(self.pbc): if periodic: # Yes, we need to do it twice. # See the scaled_positions.py test. fractional[:, i] %= 1.0 fractional[:, i] %= 1.0 return fractional def set_scaled_positions(self, scaled): """Set positions relative to unit cell.""" self.positions[:] = self.cell.cartesian_positions(scaled) def wrap(self, **wrap_kw): """Wrap positions to unit cell. Parameters: wrap_kw: (keyword=value) pairs optional keywords `pbc`, `center`, `pretty_translation`, `eps`, see :func:`ase.geometry.wrap_positions` """ if 'pbc' not in wrap_kw: wrap_kw['pbc'] = self.pbc self.positions[:] = self.get_positions(wrap=True, **wrap_kw) def get_temperature(self): """Get the temperature in Kelvin.""" dof = len(self) * 3 for constraint in self._constraints: dof -= constraint.get_removed_dof(self) ekin = self.get_kinetic_energy() return 2 * ekin / (dof * units.kB) def __eq__(self, other): """Check for identity of two atoms objects. Identity means: same positions, atomic numbers, unit cell and periodic boundary conditions.""" if not isinstance(other, Atoms): return False a = self.arrays b = other.arrays return (len(self) == len(other) and (a['positions'] == b['positions']).all() and (a['numbers'] == b['numbers']).all() and (self.cell == other.cell).all() and (self.pbc == other.pbc).all()) def __ne__(self, other): """Check if two atoms objects are not equal. Any differences in positions, atomic numbers, unit cell or periodic boundary condtions make atoms objects not equal. """ eq = self.__eq__(other) if eq is NotImplemented: return eq else: return not eq # @deprecated('Please use atoms.cell.volume') # We kind of want to deprecate this, but the ValueError behaviour # might be desirable. Should we do this? def get_volume(self): """Get volume of unit cell.""" if self.cell.rank != 3: raise ValueError( 'You have {0} lattice vectors: volume not defined' .format(self.cell.rank)) return self.cell.volume def _get_positions(self): """Return reference to positions-array for in-place manipulations.""" return self.arrays['positions'] def _set_positions(self, pos): """Set positions directly, bypassing constraints.""" self.arrays['positions'][:] = pos positions = property(_get_positions, _set_positions, doc='Attribute for direct ' + 'manipulation of the positions.') def _get_atomic_numbers(self): """Return reference to atomic numbers for in-place manipulations.""" return self.arrays['numbers'] numbers = property(_get_atomic_numbers, set_atomic_numbers, doc='Attribute for direct ' + 'manipulation of the atomic numbers.') @property def cell(self): """The :class:`ase.cell.Cell` for direct manipulation.""" return self._cellobj @cell.setter def cell(self, cell): cell = Cell.ascell(cell) self._cellobj[:] = cell def write(self, filename, format=None, **kwargs): """Write atoms object to a file. see ase.io.write for formats. kwargs are passed to ase.io.write. """ from ase.io import write write(filename, self, format, **kwargs) def iterimages(self): yield self def edit(self): """Modify atoms interactively through ASE's GUI viewer. Conflicts leading to undesirable behaviour might arise when matplotlib has been pre-imported with certain incompatible backends and while trying to use the plot feature inside the interactive GUI. To circumvent, please set matplotlib.use('gtk') before calling this method. """ from ase.gui.images import Images from ase.gui.gui import GUI images = Images([self]) gui = GUI(images) gui.run() def string2vector(v): if isinstance(v, str): if v[0] == '-': return -string2vector(v[1:]) w = np.zeros(3) w['xyz'.index(v)] = 1.0 return w return np.array(v, float) def default(data, dflt): """Helper function for setting default values.""" if data is None: return None elif isinstance(data, (list, tuple)): newdata = [] allnone = True for x in data: if x is None: newdata.append(dflt) else: newdata.append(x) allnone = False if allnone: return None return newdata else: return data ase-3.22.1/ase/autoneb.py000066400000000000000000000573341415166253600151530ustar00rootroot00000000000000from ase.io import Trajectory from ase.io import read from ase.neb import NEB from ase.optimize import BFGS from ase.optimize import FIRE from ase.calculators.singlepoint import SinglePointCalculator import ase.parallel as mpi import numpy as np import shutil import os import types from math import log from math import exp from contextlib import ExitStack class AutoNEB: """AutoNEB object. The AutoNEB algorithm streamlines the execution of NEB and CI-NEB calculations following the algorithm described in: E. L. Kolsbjerg, M. N. Groves, and B. Hammer, J. Chem. Phys, 145, 094107, 2016. (doi: 10.1063/1.4961868) The user supplies at minimum the two end-points and possibly also some intermediate images. The stages are: 1) Define a set of images and name them sequentially. Must at least have a relaxed starting and ending image User can supply intermediate guesses which do not need to have previously determined energies (probably from another NEB calculation with a lower level of theory) 2) AutoNEB will first evaluate the user provided intermediate images 3) AutoNEB will then add additional images dynamically until n_max is reached 4) A climbing image will attempt to locate the saddle point 5) All the images between the highest point and the starting point are further relaxed to smooth the path 6) All the images between the highest point and the ending point are further relaxed to smooth the path Step 4 and 5-6 are optional steps! Parameters: attach_calculators: Function which adds valid calculators to the list of images supplied. prefix: string All files that the AutoNEB method reads and writes are prefixed with this string n_simul: int The number of relaxations run in parallel. n_max: int The number of images along the NEB path when done. This number includes the two end-points. Important: due to the dynamic adding of images around the peak n_max must be updated if the NEB is restarted. climb: boolean Should a CI-NEB calculation be done at the top-point fmax: float or list of floats The maximum force along the NEB path maxsteps: int The maximum number of steps in each NEB relaxation. If a list is given the first number of steps is used in the build-up and final scan phase; the second number of steps is used in the CI step after all images have been inserted. k: float The spring constant along the NEB path method: str (see neb.py) Choice betweeen three method: 'aseneb', standard ase NEB implementation 'improvedtangent', published NEB implementation 'eb', full spring force implementation (default) optimizer: str Which optimizer to use in the relaxation. Valid values are 'BFGS' and 'FIRE' (default) space_energy_ratio: float The preference for new images to be added in a big energy gab with a preference around the peak or in the biggest geometric gab. A space_energy_ratio set to 1 will only considder geometric gabs while one set to 0 will result in only images for energy resolution. The AutoNEB method uses a fixed file-naming convention. The initial images should have the naming prefix000.traj, prefix001.traj, ... up until the final image in prefix00N.traj Images are dynamically added in between the first and last image until n_max images have been reached. When doing the i'th NEB optimization a set of files prefixXXXiter00i.traj exists with XXX ranging from 000 to the N images currently in the NEB. The most recent NEB path can always be monitored by: $ ase-gui -n -1 neb???.traj """ def __init__(self, attach_calculators, prefix, n_simul, n_max, iter_folder='AutoNEB_iter', fmax=0.025, maxsteps=10000, k=0.1, climb=True, method='eb', optimizer='FIRE', remove_rotation_and_translation=False, space_energy_ratio=0.5, world=None, parallel=True, smooth_curve=False, interpolate_method='idpp'): self.attach_calculators = attach_calculators self.prefix = prefix self.n_simul = n_simul self.n_max = n_max self.climb = climb self.all_images = [] self.parallel = parallel self.maxsteps = maxsteps self.fmax = fmax self.k = k self.method = method self.remove_rotation_and_translation = remove_rotation_and_translation self.space_energy_ratio = space_energy_ratio if interpolate_method not in ['idpp', 'linear']: self.interpolate_method = 'idpp' print('Interpolation method not implementet.', 'Using the IDPP method.') else: self.interpolate_method = interpolate_method if world is None: world = mpi.world self.world = world self.smooth_curve = smooth_curve if optimizer == 'BFGS': self.optimizer = BFGS elif optimizer == 'FIRE': self.optimizer = FIRE else: raise Exception('Optimizer needs to be BFGS or FIRE') self.iter_folder = iter_folder if not os.path.exists(self.iter_folder) and self.world.rank == 0: os.makedirs(self.iter_folder) def execute_one_neb(self, n_cur, to_run, climb=False, many_steps=False): with ExitStack() as exitstack: self._execute_one_neb(exitstack, n_cur, to_run, climb=climb, many_steps=many_steps) def _execute_one_neb(self, exitstack, n_cur, to_run, climb=False, many_steps=False): '''Internal method which executes one NEB optimization.''' closelater = exitstack.enter_context self.iteration += 1 # First we copy around all the images we are not using in this # neb (for reproducability purposes) if self.world.rank == 0: for i in range(n_cur): if i not in to_run[1: -1]: filename = '%s%03d.traj' % (self.prefix, i) with Trajectory(filename, mode='w', atoms=self.all_images[i]) as traj: traj.write() filename_ref = self.iter_folder + \ '/%s%03diter%03d.traj' % (self.prefix, i, self.iteration) if os.path.isfile(filename): shutil.copy2(filename, filename_ref) if self.world.rank == 0: print('Now starting iteration %d on ' % self.iteration, to_run) # Attach calculators to all the images we will include in the NEB self.attach_calculators([self.all_images[i] for i in to_run[1: -1]]) neb = NEB([self.all_images[i] for i in to_run], k=[self.k[i] for i in to_run[0:-1]], method=self.method, parallel=self.parallel, remove_rotation_and_translation=self .remove_rotation_and_translation, climb=climb) # Do the actual NEB calculation qn = closelater( self.optimizer(neb, logfile=self.iter_folder + '/%s_log_iter%03d.log' % (self.prefix, self.iteration)) ) # Find the ranks which are masters for each their calculation if self.parallel: nneb = to_run[0] nim = len(to_run) - 2 n = self.world.size // nim # number of cpu's per image j = 1 + self.world.rank // n # my image number assert nim * n == self.world.size traj = closelater(Trajectory( '%s%03d.traj' % (self.prefix, j + nneb), 'w', self.all_images[j + nneb], master=(self.world.rank % n == 0) )) filename_ref = self.iter_folder + \ '/%s%03diter%03d.traj' % (self.prefix, j + nneb, self.iteration) trajhist = closelater(Trajectory( filename_ref, 'w', self.all_images[j + nneb], master=(self.world.rank % n == 0) )) qn.attach(traj) qn.attach(trajhist) else: num = 1 for i, j in enumerate(to_run[1: -1]): filename_ref = self.iter_folder + \ '/%s%03diter%03d.traj' % (self.prefix, j, self.iteration) trajhist = closelater(Trajectory( filename_ref, 'w', self.all_images[j] )) qn.attach(seriel_writer(trajhist, i, num).write) traj = closelater(Trajectory( '%s%03d.traj' % (self.prefix, j), 'w', self.all_images[j] )) qn.attach(seriel_writer(traj, i, num).write) num += 1 if isinstance(self.maxsteps, (list, tuple)) and many_steps: steps = self.maxsteps[1] elif isinstance(self.maxsteps, (list, tuple)) and not many_steps: steps = self.maxsteps[0] else: steps = self.maxsteps if isinstance(self.fmax, (list, tuple)) and many_steps: fmax = self.fmax[1] elif isinstance(self.fmax, (list, tuple)) and not many_steps: fmax = self.fmax[0] else: fmax = self.fmax qn.run(fmax=fmax, steps=steps) # Remove the calculators and replace them with single # point calculators and update all the nodes for # preperration for next iteration neb.distribute = types.MethodType(store_E_and_F_in_spc, neb) neb.distribute() def run(self): '''Run the AutoNEB optimization algorithm.''' n_cur = self.__initialize__() while len(self.all_images) < self.n_simul + 2: if isinstance(self.k, (float, int)): self.k = [self.k] * (len(self.all_images) - 1) if self.world.rank == 0: print('Now adding images for initial run') # Insert a new image where the distance between two images is # the largest spring_lengths = [] for j in range(n_cur - 1): spring_vec = self.all_images[j + 1].get_positions() - \ self.all_images[j].get_positions() spring_lengths.append(np.linalg.norm(spring_vec)) jmax = np.argmax(spring_lengths) if self.world.rank == 0: print('Max length between images is at ', jmax) # The interpolation used to make initial guesses # If only start and end images supplied make all img at ones if len(self.all_images) == 2: n_between = self.n_simul else: n_between = 1 toInterpolate = [self.all_images[jmax]] for i in range(n_between): toInterpolate += [toInterpolate[0].copy()] toInterpolate += [self.all_images[jmax + 1]] neb = NEB(toInterpolate) neb.interpolate(method=self.interpolate_method) tmp = self.all_images[:jmax + 1] tmp += toInterpolate[1:-1] tmp.extend(self.all_images[jmax + 1:]) self.all_images = tmp # Expect springs to be in equilibrium k_tmp = self.k[:jmax] k_tmp += [self.k[jmax] * (n_between + 1)] * (n_between + 1) k_tmp.extend(self.k[jmax + 1:]) self.k = k_tmp # Run the NEB calculation with the new image included n_cur += n_between # Determine if any images do not have a valid energy yet energies = self.get_energies() n_non_valid_energies = len([e for e in energies if e != e]) if self.world.rank == 0: print('Start of evaluation of the initial images') while n_non_valid_energies != 0: if isinstance(self.k, (float, int)): self.k = [self.k] * (len(self.all_images) - 1) # First do one run since some energie are non-determined to_run, climb_safe = self.which_images_to_run_on() self.execute_one_neb(n_cur, to_run, climb=False) energies = self.get_energies() n_non_valid_energies = len([e for e in energies if e != e]) if self.world.rank == 0: print('Finished initialisation phase.') # Then add one image at a time until we have n_max images while n_cur < self.n_max: if isinstance(self.k, (float, int)): self.k = [self.k] * (len(self.all_images) - 1) # Insert a new image where the distance between two images # is the largest OR where a higher energy reselution is needed if self.world.rank == 0: print('****Now adding another image until n_max is reached', '({0}/{1})****'.format(n_cur, self.n_max)) spring_lengths = [] for j in range(n_cur - 1): spring_vec = self.all_images[j + 1].get_positions() - \ self.all_images[j].get_positions() spring_lengths.append(np.linalg.norm(spring_vec)) total_vec = self.all_images[0].get_positions() - \ self.all_images[-1].get_positions() tl = np.linalg.norm(total_vec) fR = max(spring_lengths) / tl e = self.get_energies() ed = [] emin = min(e) enorm = max(e) - emin for j in range(n_cur - 1): delta_E = (e[j + 1] - e[j]) * (e[j + 1] + e[j] - 2 * emin) / 2 / enorm ed.append(abs(delta_E)) gR = max(ed) / enorm if fR / gR > self.space_energy_ratio: jmax = np.argmax(spring_lengths) t = 'spring length!' else: jmax = np.argmax(ed) t = 'energy difference between neighbours!' if self.world.rank == 0: print('Adding image between {0} and'.format(jmax), '{0}. New image point is selected'.format(jmax + 1), 'on the basis of the biggest ' + t) toInterpolate = [self.all_images[jmax]] toInterpolate += [toInterpolate[0].copy()] toInterpolate += [self.all_images[jmax + 1]] neb = NEB(toInterpolate) neb.interpolate(method=self.interpolate_method) tmp = self.all_images[:jmax + 1] tmp += toInterpolate[1:-1] tmp.extend(self.all_images[jmax + 1:]) self.all_images = tmp # Expect springs to be in equilibrium k_tmp = self.k[:jmax] k_tmp += [self.k[jmax] * 2] * 2 k_tmp.extend(self.k[jmax + 1:]) self.k = k_tmp # Run the NEB calculation with the new image included n_cur += 1 to_run, climb_safe = self.which_images_to_run_on() self.execute_one_neb(n_cur, to_run, climb=False) if self.world.rank == 0: print('n_max images has been reached') # Do a single climb around the top-point if requested if self.climb: if isinstance(self.k, (float, int)): self.k = [self.k] * (len(self.all_images) - 1) if self.world.rank == 0: print('****Now doing the CI-NEB calculation****') to_run, climb_safe = self.which_images_to_run_on() assert climb_safe, 'climb_safe should be true at this point!' self.execute_one_neb(n_cur, to_run, climb=True, many_steps=True) if not self.smooth_curve: return self.all_images # If a smooth_curve is requsted ajust the springs to follow two # gaussian distributions e = self.get_energies() peak = self.get_highest_energy_index() k_max = 10 d1 = np.linalg.norm(self.all_images[peak].get_positions() - self.all_images[0].get_positions()) d2 = np.linalg.norm(self.all_images[peak].get_positions() - self.all_images[-1].get_positions()) l1 = -d1 ** 2 / log(0.2) l2 = -d2 ** 2 / log(0.2) x1 = [] x2 = [] for i in range(peak): v = (self.all_images[i].get_positions() + self.all_images[i + 1].get_positions()) / 2 - \ self.all_images[0].get_positions() x1.append(np.linalg.norm(v)) for i in range(peak, len(self.all_images) - 1): v = (self.all_images[i].get_positions() + self.all_images[i + 1].get_positions()) / 2 - \ self.all_images[0].get_positions() x2.append(np.linalg.norm(v)) k_tmp = [] for x in x1: k_tmp.append(k_max * exp(-((x - d1) ** 2) / l1)) for x in x2: k_tmp.append(k_max * exp(-((x - d1) ** 2) / l2)) self.k = k_tmp # Roll back to start from the top-point if self.world.rank == 0: print('Now moving from top to start') highest_energy_index = self.get_highest_energy_index() nneb = highest_energy_index - self.n_simul - 1 while nneb >= 0: self.execute_one_neb(n_cur, range(nneb, nneb + self.n_simul + 2), climb=False) nneb -= 1 # Roll forward from the top-point until the end nneb = self.get_highest_energy_index() if self.world.rank == 0: print('Now moving from top to end') while nneb <= self.n_max - self.n_simul - 2: self.execute_one_neb(n_cur, range(nneb, nneb + self.n_simul + 2), climb=False) nneb += 1 return self.all_images def __initialize__(self): '''Load files from the filesystem.''' if not os.path.isfile('%s000.traj' % self.prefix): raise IOError('No file with name %s000.traj' % self.prefix, 'was found. Should contain initial image') # Find the images that exist index_exists = [i for i in range(self.n_max) if os.path.isfile('%s%03d.traj' % (self.prefix, i))] n_cur = index_exists[-1] + 1 if self.world.rank == 0: print('The NEB initially has %d images ' % len(index_exists), '(including the end-points)') if len(index_exists) == 1: raise Exception('Only a start point exists') for i in range(len(index_exists)): if i != index_exists[i]: raise Exception('Files must be ordered sequentially', 'without gaps.') if self.world.rank == 0: for i in index_exists: filename_ref = self.iter_folder + \ '/%s%03diter000.traj' % (self.prefix, i) if os.path.isfile(filename_ref): try: os.rename(filename_ref, filename_ref + '.bak') except IOError: pass filename = '%s%03d.traj' % (self.prefix, i) try: shutil.copy2(filename, filename_ref) except IOError: pass # Wait for file system on all nodes is syncronized self.world.barrier() # And now lets read in the configurations for i in range(n_cur): if i in index_exists: filename = '%s%03d.traj' % (self.prefix, i) newim = read(filename) self.all_images.append(newim) else: self.all_images.append(self.all_images[0].copy()) self.iteration = 0 return n_cur def get_energies(self): """Utility method to extract all energies and insert np.NaN at invalid images.""" energies = [] for a in self.all_images: try: energies.append(a.get_potential_energy()) except RuntimeError: energies.append(np.NaN) return energies def get_energies_one_image(self, image): """Utility method to extract energy of an image and return np.NaN if invalid.""" try: energy = image.get_potential_energy() except RuntimeError: energy = np.NaN return energy def get_highest_energy_index(self): """Find the index of the image with the highest energy.""" energies = self.get_energies() valid_entries = [(i, e) for i, e in enumerate(energies) if e == e] highest_energy_index = max(valid_entries, key=lambda x: x[1])[0] return highest_energy_index def which_images_to_run_on(self): """Determine which set of images to do a NEB at. The priority is to first include all images without valid energies, secondly include the highest energy image.""" n_cur = len(self.all_images) energies = self.get_energies() # Find out which image is the first one missing the energy and # which is the last one missing the energy first_missing = n_cur last_missing = 0 n_missing = 0 for i in range(1, n_cur - 1): if energies[i] != energies[i]: n_missing += 1 first_missing = min(first_missing, i) last_missing = max(last_missing, i) highest_energy_index = self.get_highest_energy_index() nneb = highest_energy_index - 1 - self.n_simul // 2 nneb = max(nneb, 0) nneb = min(nneb, n_cur - self.n_simul - 2) nneb = min(nneb, first_missing - 1) nneb = max(nneb + self.n_simul, last_missing) - self.n_simul to_use = range(nneb, nneb + self.n_simul + 2) while self.get_energies_one_image(self.all_images[to_use[0]]) != \ self.get_energies_one_image(self.all_images[to_use[0]]): to_use[0] -= 1 while self.get_energies_one_image(self.all_images[to_use[-1]]) != \ self.get_energies_one_image(self.all_images[to_use[-1]]): to_use[-1] += 1 return to_use, (highest_energy_index in to_use[1: -1]) class seriel_writer: def __init__(self, traj, i, num): self.traj = traj self.i = i self.num = num def write(self): if self.num % (self.i + 1) == 0: self.traj.write() def store_E_and_F_in_spc(self): """Collect the energies and forces on all nodes and store as single point calculators""" # Make sure energies and forces are known on all nodes self.get_forces() images = self.images if self.parallel: energy = np.empty(1) forces = np.empty((self.natoms, 3)) for i in range(1, self.nimages - 1): # Determine which node is the leading for image i root = (i - 1) * self.world.size // (self.nimages - 2) # If on this node, extract the calculated numbers if self.world.rank == root: energy[0] = images[i].get_potential_energy() forces = images[i].get_forces() # Distribute these numbers to other nodes self.world.broadcast(energy, root) self.world.broadcast(forces, root) # On all nodes, remove the calculator, keep only energy # and force in single point calculator self.images[i].calc = SinglePointCalculator( self.images[i], energy=energy[0], forces=forces) ase-3.22.1/ase/build/000077500000000000000000000000001415166253600142275ustar00rootroot00000000000000ase-3.22.1/ase/build/__init__.py000066400000000000000000000033601415166253600163420ustar00rootroot00000000000000from ase.build.rotate import minimize_rotation_and_translation from ase.build.surface import ( add_adsorbate, add_vacuum, bcc100, bcc110, bcc111, diamond100, diamond111, fcc100, fcc110, fcc111, fcc211, hcp0001, hcp10m10, mx2, graphene) from ase.build.bulk import bulk from ase.build.general_surface import surface from ase.build.molecule import molecule from ase.build.root import (hcp0001_root, fcc111_root, bcc111_root, root_surface, root_surface_analysis) from ase.build.tube import nanotube from ase.build.ribbon import graphene_nanoribbon from ase.build.tools import (cut, stack, sort, minimize_tilt, niggli_reduce, rotate) from ase.build.connected import (connected_atoms, connected_indices, separate, split_bond) from ase.build.supercells import ( get_deviation_from_optimal_cell_shape, find_optimal_cell_shape, make_supercell) __all__ = ['minimize_rotation_and_translation', 'add_adsorbate', 'add_vacuum', 'bcc100', 'bcc110', 'bcc111', 'diamond100', 'diamond111', 'fcc100', 'fcc110', 'fcc111', 'fcc211', 'hcp0001', 'hcp10m10', 'mx2', 'graphene', 'bulk', 'surface', 'molecule', 'hcp0001_root', 'fcc111_root', 'bcc111_root', 'root_surface', 'root_surface_analysis', 'nanotube', 'graphene_nanoribbon', 'cut', 'stack', 'sort', 'minimize_tilt', 'niggli_reduce', 'rotate', 'connected_atoms', 'connected_indices', 'separate', 'split_bond', 'get_deviation_from_optimal_cell_shape', 'find_optimal_cell_shape', 'find_optimal_cell_shape_pure_python', 'make_supercell'] ase-3.22.1/ase/build/attach.py000066400000000000000000000067251415166253600160570ustar00rootroot00000000000000import numpy as np from ase.parallel import world, broadcast from ase.geometry import get_distances def random_unit_vector(rng): """Random unit vector equally distributed on the sphere Parameter --------- rng: random number generator object """ ct = -1 + 2 * rng.rand() phi = 2 * np.pi * rng.rand() st = np.sqrt(1 - ct**2) return np.array([st * np.cos(phi), st * np.sin(phi), ct]) def nearest(atoms1, atoms2, cell=None, pbc=None): """Return indices of nearest atoms""" p1 = atoms1.get_positions() p2 = atoms2.get_positions() vd_aac, d2_aa = get_distances(p1, p2, cell, pbc) i1, i2 = np.argwhere(d2_aa == d2_aa.min())[0] return i1, i2, vd_aac[i1, i2] def attach(atoms1, atoms2, distance, direction=(1, 0, 0), maxiter=50, accuracy=1e-5): """Attach two structures Parameters ---------- atoms1: Atoms cell and pbc of this object are used atoms2: Atoms distance: float minimal distance (Angstrom) direction: unit vector (3 floats) relative direction between center of masses maxiter: int maximal number of iterations to get required distance, default 100 accuracy: float required accuracy for minimal distance (Angstrom), default 1e-5 """ atoms = atoms1.copy() atoms2 = atoms2.copy() direction = np.array(direction, dtype=float) direction /= np.linalg.norm(direction) assert len(direction) == 3 dist2 = distance**2 i1, i2, dv_c = nearest(atoms, atoms2, atoms.cell, atoms.pbc) for i in range(maxiter): dv2 = (dv_c**2).sum() vcost = np.dot(dv_c, direction) a = np.sqrt(max(0, dist2 - dv2 + vcost**2)) move = a - vcost if abs(move) < accuracy: atoms += atoms2 return atoms # we need to move atoms2.translate(direction * move) i1, i2, dv_c = nearest(atoms, atoms2, atoms.cell, atoms.pbc) raise RuntimeError('attach did not converge') def attach_randomly(atoms1, atoms2, distance, rng=np.random): """Randomly attach two structures with a given minimal distance Parameters ---------- atoms1: Atoms object atoms2: Atoms object distance: float Required distance rng: random number generator object defaults to np.random.RandomState() Returns ------- Joined structure as an atoms object. """ atoms2 = atoms2.copy() atoms2.rotate('x', random_unit_vector(rng), center=atoms2.get_center_of_mass()) return attach(atoms1, atoms2, distance, direction=random_unit_vector(rng)) def attach_randomly_and_broadcast(atoms1, atoms2, distance, rng=np.random, comm=world): """Randomly attach two structures with a given minimal distance and ensure that these are distributed. Parameters ---------- atoms1: Atoms object atoms2: Atoms object distance: float Required distance rng: random number generator object defaults to np.random.RandomState() comm: communicator to distribute Communicator to distribute the structure, default: world Returns ------- Joined structure as an atoms object. """ if comm.rank == 0: joined = attach_randomly(atoms1, atoms2, distance, rng) broadcast(joined, 0, comm=comm) else: joined = broadcast(None, 0, comm) return joined ase-3.22.1/ase/build/bulk.py000066400000000000000000000303451415166253600155430ustar00rootroot00000000000000from math import sqrt from ase.atoms import Atoms from ase.symbols import string2symbols from ase.data import reference_states, atomic_numbers, chemical_symbols from ase.utils import plural def incompatible_cell(*, want, have): return RuntimeError('Cannot create {} cell for {} structure' .format(want, have)) def bulk(name, crystalstructure=None, a=None, b=None, c=None, *, alpha=None, covera=None, u=None, orthorhombic=False, cubic=False, basis=None): """Creating bulk systems. Crystal structure and lattice constant(s) will be guessed if not provided. name: str Chemical symbol or symbols as in 'MgO' or 'NaCl'. crystalstructure: str Must be one of sc, fcc, bcc, tetragonal, bct, hcp, rhombohedral, orthorhombic, mlc, diamond, zincblende, rocksalt, cesiumchloride, fluorite or wurtzite. a: float Lattice constant. b: float Lattice constant. If only a and b is given, b will be interpreted as c instead. c: float Lattice constant. alpha: float Angle in degrees for rhombohedral lattice. covera: float c/a ratio used for hcp. Default is ideal ratio: sqrt(8/3). u: float Internal coordinate for Wurtzite structure. orthorhombic: bool Construct orthorhombic unit cell instead of primitive cell which is the default. cubic: bool Construct cubic unit cell if possible. """ if c is None and b is not None: # If user passes (a, b) positionally, we want it as (a, c) instead: c, b = b, c if covera is not None and c is not None: raise ValueError("Don't specify both c and c/a!") xref = None ref = {} if name in chemical_symbols: Z = atomic_numbers[name] ref = reference_states[Z] if ref is not None: xref = ref['symmetry'] # If user did not specify crystal structure, and no basis # is given, and the reference state says we need one, but # does not have one, then we can't proceed. if (crystalstructure is None and basis is None and 'basis' in ref and ref['basis'] is None): # XXX This is getting much too complicated, we need to split # this function up. A lot. raise RuntimeError('This structure requires an atomic basis') if ref is None: ref = {} # easier to 'get' things from empty dictionary than None if xref == 'cubic': # P and Mn are listed as 'cubic' but the lattice constants # are 7 and 9. They must be something other than simple cubic # then. We used to just return the cubic one but that must # have been wrong somehow. --askhl raise RuntimeError('Only simple cubic ("sc") supported') # Mapping of name to number of atoms in primitive cell. structures = {'sc': 1, 'fcc': 1, 'bcc': 1, 'tetragonal': 1, 'bct': 1, 'hcp': 1, 'rhombohedral': 1, 'orthorhombic': 1, 'mcl': 1, 'diamond': 1, 'zincblende': 2, 'rocksalt': 2, 'cesiumchloride': 2, 'fluorite': 3, 'wurtzite': 2} if crystalstructure is None: crystalstructure = xref if crystalstructure not in structures: raise ValueError('No suitable reference data for bulk {}.' ' Reference data: {}' .format(name, ref)) if crystalstructure not in structures: raise ValueError('Unknown structure: {}.' .format(crystalstructure)) # Check name: natoms = len(string2symbols(name)) natoms0 = structures[crystalstructure] if natoms != natoms0: raise ValueError('Please specify {} for {} and not {}' .format(plural(natoms0, 'atom'), crystalstructure, natoms)) if alpha is None: alpha = ref.get('alpha') if a is None: if xref != crystalstructure: raise ValueError('You need to specify the lattice constant.') try: a = ref['a'] except KeyError: raise KeyError('No reference lattice parameter "a" for "{}"' .format(name)) if b is None: bovera = ref.get('b/a') if bovera is not None and a is not None: b = bovera * a if crystalstructure in ['hcp', 'wurtzite']: if cubic: raise incompatible_cell(want='cubic', have=crystalstructure) if c is not None: covera = c / a elif covera is None: if xref == crystalstructure: covera = ref['c/a'] else: covera = sqrt(8 / 3) if covera is None: covera = ref.get('c/a') if c is None and covera is not None: c = covera * a if orthorhombic and crystalstructure not in ['sc', 'tetragonal', 'orthorhombic']: return _orthorhombic_bulk(name, crystalstructure, a, covera, u) if cubic and crystalstructure in ['bcc', 'cesiumchloride']: return _orthorhombic_bulk(name, crystalstructure, a, covera) if cubic and crystalstructure != 'sc': return _cubic_bulk(name, crystalstructure, a) if crystalstructure == 'sc': atoms = Atoms(name, cell=(a, a, a), pbc=True) elif crystalstructure == 'fcc': b = a / 2 atoms = Atoms(name, cell=[(0, b, b), (b, 0, b), (b, b, 0)], pbc=True) elif crystalstructure == 'bcc': b = a / 2 atoms = Atoms(name, cell=[(-b, b, b), (b, -b, b), (b, b, -b)], pbc=True) elif crystalstructure == 'hcp': atoms = Atoms(2 * name, scaled_positions=[(0, 0, 0), (1 / 3, 2 / 3, 0.5)], cell=[(a, 0, 0), (-a / 2, a * sqrt(3) / 2, 0), (0, 0, covera * a)], pbc=True) elif crystalstructure == 'diamond': atoms = bulk(2 * name, 'zincblende', a) elif crystalstructure == 'zincblende': s1, s2 = string2symbols(name) atoms = bulk(s1, 'fcc', a) + bulk(s2, 'fcc', a) atoms.positions[1] += a / 4 elif crystalstructure == 'rocksalt': s1, s2 = string2symbols(name) atoms = bulk(s1, 'fcc', a) + bulk(s2, 'fcc', a) atoms.positions[1, 0] += a / 2 elif crystalstructure == 'cesiumchloride': s1, s2 = string2symbols(name) atoms = bulk(s1, 'sc', a) + bulk(s2, 'sc', a) atoms.positions[1, :] += a / 2 elif crystalstructure == 'fluorite': s1, s2, s3 = string2symbols(name) atoms = bulk(s1, 'fcc', a) + bulk(s2, 'fcc', a) + bulk(s3, 'fcc', a) atoms.positions[1, :] += a / 4 atoms.positions[2, :] += a * 3 / 4 elif crystalstructure == 'wurtzite': u = u or 0.25 + 1 / 3 / covera**2 atoms = Atoms(2 * name, scaled_positions=[(0, 0, 0), (1 / 3, 2 / 3, 0.5 - u), (1 / 3, 2 / 3, 0.5), (0, 0, 1 - u)], cell=[(a, 0, 0), (-a / 2, a * sqrt(3) / 2, 0), (0, 0, a * covera)], pbc=True) elif crystalstructure == 'bct': from ase.lattice import BCT if basis is None: basis = ref.get('basis') if basis is not None: natoms = len(basis) lat = BCT(a=a, c=c) atoms = Atoms([name] * natoms, cell=lat.tocell(), pbc=True, scaled_positions=basis) elif crystalstructure == 'rhombohedral': atoms = _build_rhl(name, a, alpha, basis) elif crystalstructure == 'orthorhombic': atoms = Atoms(name, cell=[a, b, c], pbc=True) else: raise ValueError('Unknown crystal structure: ' + crystalstructure) if orthorhombic: assert atoms.cell.orthorhombic if cubic: assert abs(atoms.cell.angles() - 90).all() < 1e-10 return atoms def _build_rhl(name, a, alpha, basis): from ase.lattice import RHL lat = RHL(a, alpha) cell = lat.tocell() if basis is None: # RHL: Given by A&M as scaled coordinates "x" of cell.sum(0): basis_x = reference_states[atomic_numbers[name]]['basis_x'] basis = basis_x[:, None].repeat(3, axis=1) natoms = len(basis) return Atoms([name] * natoms, cell=cell, scaled_positions=basis, pbc=True) def _orthorhombic_bulk(name, crystalstructure, a, covera=None, u=None): if crystalstructure == 'fcc': b = a / sqrt(2) atoms = Atoms(2 * name, cell=(b, b, a), pbc=True, scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)]) elif crystalstructure == 'bcc': atoms = Atoms(2 * name, cell=(a, a, a), pbc=True, scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)]) elif crystalstructure == 'hcp': atoms = Atoms(4 * name, cell=(a, a * sqrt(3), covera * a), scaled_positions=[(0, 0, 0), (0.5, 0.5, 0), (0.5, 1 / 6, 0.5), (0, 2 / 3, 0.5)], pbc=True) elif crystalstructure == 'diamond': atoms = _orthorhombic_bulk(2 * name, 'zincblende', a) elif crystalstructure == 'zincblende': s1, s2 = string2symbols(name) b = a / sqrt(2) atoms = Atoms(2 * name, cell=(b, b, a), pbc=True, scaled_positions=[(0, 0, 0), (0.5, 0, 0.25), (0.5, 0.5, 0.5), (0, 0.5, 0.75)]) elif crystalstructure == 'rocksalt': s1, s2 = string2symbols(name) b = a / sqrt(2) atoms = Atoms(2 * name, cell=(b, b, a), pbc=True, scaled_positions=[(0, 0, 0), (0.5, 0.5, 0), (0.5, 0.5, 0.5), (0, 0, 0.5)]) elif crystalstructure == 'cesiumchloride': atoms = Atoms(name, cell=(a, a, a), pbc=True, scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)]) elif crystalstructure == 'wurtzite': u = u or 0.25 + 1 / 3 / covera**2 atoms = Atoms(4 * name, cell=(a, a * 3**0.5, covera * a), scaled_positions=[(0, 0, 0), (0, 1 / 3, 0.5 - u), (0, 1 / 3, 0.5), (0, 0, 1 - u), (0.5, 0.5, 0), (0.5, 5 / 6, 0.5 - u), (0.5, 5 / 6, 0.5), (0.5, 0.5, 1 - u)], pbc=True) else: raise incompatible_cell(want='orthorhombic', have=crystalstructure) return atoms def _cubic_bulk(name, crystalstructure, a): if crystalstructure == 'fcc': atoms = Atoms(4 * name, cell=(a, a, a), pbc=True, scaled_positions=[(0, 0, 0), (0, 0.5, 0.5), (0.5, 0, 0.5), (0.5, 0.5, 0)]) elif crystalstructure == 'diamond': atoms = _cubic_bulk(2 * name, 'zincblende', a) elif crystalstructure == 'zincblende': atoms = Atoms(4 * name, cell=(a, a, a), pbc=True, scaled_positions=[(0, 0, 0), (0.25, 0.25, 0.25), (0, 0.5, 0.5), (0.25, 0.75, 0.75), (0.5, 0, 0.5), (0.75, 0.25, 0.75), (0.5, 0.5, 0), (0.75, 0.75, 0.25)]) elif crystalstructure == 'rocksalt': atoms = Atoms(4 * name, cell=(a, a, a), pbc=True, scaled_positions=[(0, 0, 0), (0.5, 0, 0), (0, 0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0, 0.5), (0, 0, 0.5), (0.5, 0.5, 0), (0, 0.5, 0)]) else: raise incompatible_cell(want='cubic', have=crystalstructure) return atoms ase-3.22.1/ase/build/connected.py000066400000000000000000000045621415166253600165520ustar00rootroot00000000000000from ase.atoms import Atoms from ase.data import covalent_radii from ase.neighborlist import NeighborList def connected_atoms(atoms, index, dmax=None, scale=1.5): """Find all atoms connected to atoms[index] and return them.""" return atoms[connected_indices(atoms, index, dmax, scale)] def connected_indices(atoms, index, dmax=None, scale=1.5): """Find atoms connected to atoms[index] and return their indices. If dmax is not None: Atoms are defined to be connected if they are nearer than dmax to each other. If dmax is None: Atoms are defined to be connected if they are nearer than the sum of their covalent radii * scale to each other. """ if index < 0: index = len(atoms) + index # set neighbor lists if dmax is None: # define neighbors according to covalent radii radii = scale * covalent_radii[atoms.get_atomic_numbers()] else: # define neighbors according to distance radii = [0.5 * dmax] * len(atoms) nl = NeighborList(radii, skin=0, self_interaction=False, bothways=True) nl.update(atoms) connected = [index] + list(nl.get_neighbors(index)[0]) isolated = False while not isolated: isolated = True for i in connected: for j in nl.get_neighbors(i)[0]: if j not in connected: connected.append(j) isolated = False return connected def separate(atoms, **kwargs): """Split atoms into separated entities Returns: List of Atoms object that connected_indices calls connected. """ indices = list(range(len(atoms))) separated = [] while indices: my_indcs = connected_indices(atoms, indices[0], **kwargs) separated.append(Atoms(cell=atoms.cell, pbc=atoms.pbc)) for i in my_indcs: separated[-1].append(atoms[i]) del indices[indices.index(i)] return separated def split_bond(atoms, index1, index2): """Split atoms by a bond specified by indices""" assert index1 != index2 if index2 > index1: shift = 0, 1 else: shift = 1, 0 atoms_copy = atoms.copy() del atoms_copy[index2] atoms1 = connected_atoms(atoms_copy, index1 - shift[0]) atoms_copy = atoms.copy() del atoms_copy[index1] atoms2 = connected_atoms(atoms_copy, index2 - shift[1]) return atoms1, atoms2 ase-3.22.1/ase/build/general_surface.py000066400000000000000000000070731415166253600177350ustar00rootroot00000000000000from math import gcd import numpy as np from numpy.linalg import norm, solve from ase.build import bulk def surface(lattice, indices, layers, vacuum=None, tol=1e-10, periodic=False): """Create surface from a given lattice and Miller indices. lattice: Atoms object or str Bulk lattice structure of alloy or pure metal. Note that the unit-cell must be the conventional cell - not the primitive cell. One can also give the chemical symbol as a string, in which case the correct bulk lattice will be generated automatically. indices: sequence of three int Surface normal in Miller indices (h,k,l). layers: int Number of equivalent layers of the slab. vacuum: float Amount of vacuum added on both sides of the slab. periodic: bool Whether the surface is periodic in the normal to the surface """ indices = np.asarray(indices) if indices.shape != (3,) or not indices.any() or indices.dtype != int: raise ValueError('%s is an invalid surface type' % indices) if isinstance(lattice, str): lattice = bulk(lattice, cubic=True) h, k, l = indices h0, k0, l0 = (indices == 0) if h0 and k0 or h0 and l0 or k0 and l0: # if two indices are zero if not h0: c1, c2, c3 = [(0, 1, 0), (0, 0, 1), (1, 0, 0)] if not k0: c1, c2, c3 = [(0, 0, 1), (1, 0, 0), (0, 1, 0)] if not l0: c1, c2, c3 = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] else: p, q = ext_gcd(k, l) a1, a2, a3 = lattice.cell # constants describing the dot product of basis c1 and c2: # dot(c1,c2) = k1+i*k2, i in Z k1 = np.dot(p * (k * a1 - h * a2) + q * (l * a1 - h * a3), l * a2 - k * a3) k2 = np.dot(l * (k * a1 - h * a2) - k * (l * a1 - h * a3), l * a2 - k * a3) if abs(k2) > tol: i = -int(round(k1 / k2)) # i corresponding to the optimal basis p, q = p + i * l, q - i * k a, b = ext_gcd(p * k + q * l, h) c1 = (p * k + q * l, -p * h, -q * h) c2 = np.array((0, l, -k)) // abs(gcd(l, k)) c3 = (b, a * p, a * q) surf = build(lattice, np.array([c1, c2, c3]), layers, tol, periodic) if vacuum is not None: surf.center(vacuum=vacuum, axis=2) return surf def build(lattice, basis, layers, tol, periodic): surf = lattice.copy() scaled = solve(basis.T, surf.get_scaled_positions().T).T scaled -= np.floor(scaled + tol) surf.set_scaled_positions(scaled) surf.set_cell(np.dot(basis, surf.cell), scale_atoms=True) surf *= (1, 1, layers) a1, a2, a3 = surf.cell surf.set_cell([a1, a2, np.cross(a1, a2) * np.dot(a3, np.cross(a1, a2)) / norm(np.cross(a1, a2))**2]) # Change unit cell to have the x-axis parallel with a surface vector # and z perpendicular to the surface: a1, a2, a3 = surf.cell surf.set_cell([(norm(a1), 0, 0), (np.dot(a1, a2) / norm(a1), np.sqrt(norm(a2)**2 - (np.dot(a1, a2) / norm(a1))**2), 0), (0, 0, norm(a3))], scale_atoms=True) surf.pbc = (True, True, periodic) # Move atoms into the unit cell: scaled = surf.get_scaled_positions() scaled[:, :2] %= 1 surf.set_scaled_positions(scaled) if not periodic: surf.cell[2] = 0.0 return surf def ext_gcd(a, b): if b == 0: return 1, 0 elif a % b == 0: return 0, 1 else: x, y = ext_gcd(b, a % b) return y, x - y * (a // b) ase-3.22.1/ase/build/molecule.py000066400000000000000000000200141415166253600164030ustar00rootroot00000000000000from ase.atoms import Atoms from ase.collections import g2 def molecule(name, vacuum=None, **kwargs): """Create an atomic structure from a database. This is a helper function to easily create molecules from the g2 and extra databases. Parameters ---------- name : str Name of the molecule to build. vacuum : float, optional Amount of vacuum to pad the molecule with on all sides. Additional keyword arguments (kwargs) can be supplied, which are passed to ase.Atoms. Returns ------- ase.atoms.Atoms An ASE Atoms object corresponding to the specified molecule. Notes ----- To see a list of allowed names, try: >>> from ase.collections import g2 >>> print(g2.names) >>> from ase.build.molecule import extra >>> print(extra.keys()) Examples -------- >>> from ase.build import molecule >>> atoms = molecule('H2O') """ if name in extra: kwargs.update(extra[name]) mol = Atoms(**kwargs) else: mol = g2[name] if kwargs: mol = Atoms(mol, **kwargs) if vacuum is not None: mol.center(vacuum=vacuum) return mol extra = { 'Be2': { 'symbols': 'BeBe', 'positions': [[0, 0, 1.0106], [0, 0, -1.0106]]}, 'C7NH5': { 'symbols': 'C7NH5', 'positions': [[-1.593581, -1.142601, 0.], [-2.235542, 0.095555, 0.], [-0.204885, -1.210726, 0.], [0.549645, -0.025355, 0.], [1.976332, -0.085321, 0.], [-0.099258, 1.220706, 0.], [-1.488628, 1.273345, 0.], [3.136871, -0.128138, 0.], [-2.177996, -2.060896, 0.], [-3.323594, 0.141242, 0.], [0.301694, -2.173705, 0.], [0.488716, 2.136782, 0.], [-1.987765, 2.240495, 0.]]}, 'BDA': { # 1,4-Benzodiamine # aka p-Aminoaniline; p-Benzenediamine; p-Diaminobenzene; # p-Phenylenediamine; Paraphenylen-diamine # PBE-gpaw relaxed 'symbols': 'C6H4N2H4', 'positions': [[0.004212, 1.406347, 0.061073], [1.193490, 0.687096, 0.029481], [1.190824, -0.690400, -0.028344], [0.000295, -1.406191, -0.059503], [-1.186974, -0.685668, -0.045413], [-1.185376, 0.690203, 0.009452], [2.147124, 1.219997, 0.064477], [2.141593, -1.227477, -0.054266], [-2.138408, -1.222814, -0.095050], [-2.137740, 1.226930, 0.023036], [-0.006314, 2.776024, 0.186278], [-0.007340, -2.777839, -0.159936], [0.844710, -3.256543, 0.110098], [-0.854965, -3.253324, 0.130125], [0.845826, 3.267270, -0.055549], [-0.854666, 3.254654, -0.092676]]}, 'biphenyl': { # PBE-gpaw relaxed 'symbols': 'C6H5C6H5', 'positions': [[-0.74081, -0.00000, -0.00003], [-1.46261, -1.20370, -0.00993], [-2.85531, -1.20350, -0.00663], [-3.55761, -0.00000, -0.00003], [-2.85531, 1.20350, 0.00667], [-1.46261, 1.20370, 0.00997], [-0.92071, -2.14850, 0.00967], [-3.38981, -2.15110, -0.00083], [-4.64571, -0.00000, -0.00003], [-3.38981, 2.15110, 0.00077], [-0.92071, 2.14850, -0.00963], [3.55849, -0.00000, -0.00003], [2.85509, -0.86640, -0.83553], [1.46289, -0.87000, -0.83153], [0.73969, -0.00000, -0.00003], [1.46289, 0.87000, 0.83157], [2.85509, 0.86640, 0.83547], [4.64659, -0.00000, -0.00003], [3.39189, -1.53770, -1.50253], [0.91869, -1.53310, -1.50263], [0.91869, 1.53310, 1.50267], [3.39189, 1.53770, 1.50257]]}, 'C60': { # Buckminsterfullerene, I*h symm. # The Buckyball has two degrees of freedom, the C-C bond, and the # C=C bond. This is an LDA-gpaw relaxed structure with bond lengths # 1.437 and 1.385. # Experimentally, the two bond lengths are 1.45 and 1.40 Angstrom. 'symbols': 'C60', 'positions': [[2.2101953, 0.5866631, 2.6669504], [3.1076393, 0.1577008, 1.6300286], [1.3284430, -0.3158939, 3.2363232], [3.0908709, -1.1585005, 1.2014240], [3.1879245, -1.4574599, -0.1997005], [3.2214623, 1.2230966, 0.6739440], [3.3161210, 0.9351586, -0.6765151], [3.2984981, -0.4301142, -1.1204138], [-0.4480842, 1.3591484, 3.2081020], [0.4672056, 2.2949830, 2.6175264], [-0.0256575, 0.0764219, 3.5086259], [1.7727917, 1.9176584, 2.3529691], [2.3954623, 2.3095689, 1.1189539], [-0.2610195, 3.0820935, 1.6623117], [0.3407726, 3.4592388, 0.4745968], [1.6951171, 3.0692446, 0.1976623], [-2.1258394, -0.8458853, 2.6700963], [-2.5620990, 0.4855202, 2.3531715], [-0.8781521, -1.0461985, 3.2367302], [-1.7415096, 1.5679963, 2.6197333], [-1.6262468, 2.6357030, 1.6641811], [-3.2984810, 0.4301871, 1.1204208], [-3.1879469, 1.4573895, 0.1996030], [-2.3360261, 2.5813627, 0.4760912], [-0.5005210, -2.9797771, 1.7940308], [-1.7944338, -2.7729087, 1.2047891], [-0.0514245, -2.1328841, 2.7938830], [-2.5891471, -1.7225828, 1.6329715], [-3.3160705, -0.9350636, 0.6765268], [-1.6951919, -3.0692581, -0.1976564], [-2.3954901, -2.3096853, -1.1189862], [-3.2214182, -1.2231835, -0.6739581], [2.1758234, -2.0946263, 1.7922529], [1.7118619, -2.9749681, 0.7557198], [1.3130656, -1.6829416, 2.7943892], [0.3959024, -3.4051395, 0.7557638], [-0.3408219, -3.4591883, -0.4745610], [2.3360057, -2.5814499, -0.4761050], [1.6263757, -2.6357349, -1.6642309], [0.2611352, -3.0821271, -1.6622618], [-2.2100844, -0.5868636, -2.6670300], [-1.7726970, -1.9178969, -2.3530466], [-0.4670723, -2.2950509, -2.6175105], [-1.3283500, 0.3157683, -3.2362375], [-2.1759882, 2.0945383, -1.7923294], [-3.0909663, 1.1583472, -1.2015749], [-3.1076090, -0.1578453, -1.6301627], [-1.3131365, 1.6828292, -2.7943639], [0.5003224, 2.9799637, -1.7940203], [-0.3961148, 3.4052817, -0.7557272], [-1.7120629, 2.9749122, -0.7557988], [0.0512824, 2.1329478, -2.7937450], [2.1258630, 0.8460809, -2.6700534], [2.5891853, 1.7227742, -1.6329562], [1.7943010, 2.7730684, -1.2048262], [0.8781323, 1.0463514, -3.2365313], [0.4482452, -1.3591061, -3.2080510], [1.7416948, -1.5679557, -2.6197714], [2.5621724, -0.4853529, -2.3532026], [0.0257904, -0.0763567, -3.5084446]]}} ase-3.22.1/ase/build/ribbon.py000066400000000000000000000132651415166253600160630ustar00rootroot00000000000000from math import sqrt import numpy as np from ase.atoms import Atoms def graphene_nanoribbon(n, m, type='zigzag', saturated=False, C_H=1.09, C_C=1.42, vacuum=None, magnetic=False, initial_mag=1.12, sheet=False, main_element='C', saturate_element='H'): """Create a graphene nanoribbon. Creates a graphene nanoribbon in the x-z plane, with the nanoribbon running along the z axis. Parameters: n: int The width of the nanoribbon. For armchair nanoribbons, this n may be half-integer to repeat by half a cell. m: int The length of the nanoribbon. type: str The orientation of the ribbon. Must be either 'zigzag' or 'armchair'. saturated: bool If true, hydrogen atoms are placed along the edge. C_H: float Carbon-hydrogen bond length. Default: 1.09 Angstrom. C_C: float Carbon-carbon bond length. Default: 1.42 Angstrom. vacuum: None (default) or float Amount of vacuum added to non-periodic directions, if present. magnetic: bool Make the edges magnetic. initial_mag: float Magnitude of magnetic moment if magnetic. sheet: bool If true, make an infinite sheet instead of a ribbon (default: False) """ if m % 1 != 0: raise ValueError('m must be integer') if type == 'zigzag' and n % 1 != 0: raise ValueError('n must be an integer for zigzag ribbons') b = sqrt(3) * C_C / 4 arm_unit = Atoms(main_element + '4', pbc=(1, 0, 1), cell=[4 * b, 0, 3 * C_C]) arm_unit.positions = [[0, 0, 0], [b * 2, 0, C_C / 2.], [b * 2, 0, 3 * C_C / 2.], [0, 0, 2 * C_C]] arm_unit_half = Atoms(main_element + '2', pbc=(1, 0, 1), cell=[2 * b, 0, 3 * C_C]) arm_unit_half.positions = [[b * 2, 0, C_C / 2.], [b * 2, 0, 3 * C_C / 2.]] zz_unit = Atoms(main_element + '2', pbc=(1, 0, 1), cell=[3 * C_C / 2.0, 0, b * 4]) zz_unit.positions = [[0, 0, 0], [C_C / 2.0, 0, b * 2]] atoms = Atoms() if type == 'zigzag': edge_index0 = np.arange(m) * 2 edge_index1 = (n - 1) * m * 2 + np.arange(m) * 2 + 1 if magnetic: mms = np.zeros(m * n * 2) for i in edge_index0: mms[i] = initial_mag for i in edge_index1: mms[i] = -initial_mag for i in range(n): layer = zz_unit.repeat((1, 1, m)) layer.positions[:, 0] += 3 * C_C / 2 * i if i % 2 == 1: layer.positions[:, 2] += 2 * b layer[-1].position[2] -= b * 4 * m atoms += layer xmin = atoms.positions[0, 0] if magnetic: atoms.set_initial_magnetic_moments(mms) if saturated: H_atoms0 = Atoms(saturate_element + str(m)) H_atoms0.positions = atoms[edge_index0].positions H_atoms0.positions[:, 0] -= C_H H_atoms1 = Atoms(saturate_element + str(m)) H_atoms1.positions = atoms[edge_index1].positions H_atoms1.positions[:, 0] += C_H atoms += H_atoms0 + H_atoms1 atoms.cell = [n * 3 * C_C / 2, 0, m * 4 * b] elif type == 'armchair': n *= 2 n_int = int(round(n)) if abs(n_int - n) > 1e-10: raise ValueError( 'The argument n has to be half-integer for armchair ribbons.') n = n_int for i in range(n // 2): layer = arm_unit.repeat((1, 1, m)) layer.positions[:, 0] -= 4 * b * i atoms += layer if n % 2: layer = arm_unit_half.repeat((1, 1, m)) layer.positions[:, 0] -= 4 * b * (n // 2) atoms += layer xmin = atoms.positions[-1, 0] if saturated: if n % 2: arm_right_saturation = Atoms(saturate_element + '2', pbc=(1, 0, 1), cell=[2 * b, 0, 3 * C_C]) arm_right_saturation.positions = [ [- sqrt(3) / 2 * C_H, 0, C_C / 2 - C_H * 0.5], [- sqrt(3) / 2 * C_H, 0, 3 * C_C / 2.0 + C_H * 0.5]] else: arm_right_saturation = Atoms(saturate_element + '2', pbc=(1, 0, 1), cell=[4 * b, 0, 3 * C_C]) arm_right_saturation.positions = [ [- sqrt(3) / 2 * C_H, 0, C_H * 0.5], [- sqrt(3) / 2 * C_H, 0, 2 * C_C - C_H * 0.5]] arm_left_saturation = Atoms(saturate_element + '2', pbc=(1, 0, 1), cell=[4 * b, 0, 3 * C_C]) arm_left_saturation.positions = [ [b * 2 + sqrt(3) / 2 * C_H, 0, C_C / 2 - C_H * 0.5], [b * 2 + sqrt(3) / 2 * C_H, 0, 3 * C_C / 2.0 + C_H * 0.5]] arm_right_saturation.positions[:, 0] -= 4 * b * (n / 2.0 - 1) atoms += arm_right_saturation.repeat((1, 1, m)) atoms += arm_left_saturation.repeat((1, 1, m)) atoms.cell = [b * 4 * n / 2.0, 0, 3 * C_C * m] atoms.set_pbc([sheet, False, True]) # The ribbon was 'built' from x=0 towards negative x. # Move the ribbon to positive x: atoms.positions[:, 0] -= xmin if not sheet: atoms.cell[0] = 0.0 if vacuum is not None: atoms.center(vacuum, axis=1) if not sheet: atoms.center(vacuum, axis=0) return atoms ase-3.22.1/ase/build/root.py000066400000000000000000000161011415166253600155630ustar00rootroot00000000000000from math import log10, atan2, cos, sin from ase.build import hcp0001, fcc111, bcc111 import numpy as np def hcp0001_root(symbol, root, size, a=None, c=None, vacuum=None, orthogonal=False): """HCP(0001) surface maniupulated to have a x unit side length of *root* before repeating. This also results in *root* number of repetitions of the cell. The first 20 valid roots for nonorthogonal are... 1, 3, 4, 7, 9, 12, 13, 16, 19, 21, 25, 27, 28, 31, 36, 37, 39, 43, 48, 49""" atoms = hcp0001(symbol=symbol, size=(1, 1, size[2]), a=a, c=c, vacuum=vacuum, orthogonal=orthogonal) atoms = root_surface(atoms, root) atoms *= (size[0], size[1], 1) return atoms def fcc111_root(symbol, root, size, a=None, vacuum=None, orthogonal=False): """FCC(111) surface maniupulated to have a x unit side length of *root* before repeating. This also results in *root* number of repetitions of the cell. The first 20 valid roots for nonorthogonal are... 1, 3, 4, 7, 9, 12, 13, 16, 19, 21, 25, 27, 28, 31, 36, 37, 39, 43, 48, 49""" atoms = fcc111(symbol=symbol, size=(1, 1, size[2]), a=a, vacuum=vacuum, orthogonal=orthogonal) atoms = root_surface(atoms, root) atoms *= (size[0], size[1], 1) return atoms def bcc111_root(symbol, root, size, a=None, vacuum=None, orthogonal=False): """BCC(111) surface maniupulated to have a x unit side length of *root* before repeating. This also results in *root* number of repetitions of the cell. The first 20 valid roots for nonorthogonal are... 1, 3, 4, 7, 9, 12, 13, 16, 19, 21, 25, 27, 28, 31, 36, 37, 39, 43, 48, 49""" atoms = bcc111(symbol=symbol, size=(1, 1, size[2]), a=a, vacuum=vacuum, orthogonal=orthogonal) atoms = root_surface(atoms, root) atoms *= (size[0], size[1], 1) return atoms def point_in_cell_2d(point, cell, eps=1e-8): """This function takes a 2D slice of the cell in the XY plane and calculates if a point should lie in it. This is used as a more accurate method of ensuring we find all of the correct cell repetitions in the root surface code. The Z axis is totally ignored but for most uses this should be fine. """ # Define area of a triangle def tri_area(t1, t2, t3): t1x, t1y = t1[0:2] t2x, t2y = t2[0:2] t3x, t3y = t3[0:2] return abs(t1x * (t2y - t3y) + t2x * (t3y - t1y) + t3x * (t1y - t2y)) / 2 # c0, c1, c2, c3 define a parallelogram c0 = (0, 0) c1 = cell[0, 0:2] c2 = cell[1, 0:2] c3 = c1 + c2 # Get area of parallelogram cA = tri_area(c0, c1, c2) + tri_area(c1, c2, c3) # Get area of triangles formed from adjacent vertices of parallelogram and # point in question. pA = tri_area(point, c0, c1) + tri_area(point, c1, c2) + tri_area(point, c2, c3) + tri_area(point, c3, c0) # If combined area of triangles from point is larger than area of # parallelogram, point is not inside parallelogram. return pA <= cA + eps def _root_cell_normalization(primitive_slab): """Returns the scaling factor for x axis and cell normalized by that factor""" xscale = np.linalg.norm(primitive_slab.cell[0, 0:2]) cell_vectors = primitive_slab.cell[0:2, 0:2] / xscale return xscale, cell_vectors def _root_surface_analysis(primitive_slab, root, eps=1e-8): """A tool to analyze a slab and look for valid roots that exist, up to the given root. This is useful for generating all possible cells without prior knowledge. *primitive slab* is the primitive cell to analyze. *root* is the desired root to find, and all below. This is the internal function which gives extra data to root_surface. """ # Setup parameters for cell searching logeps = int(-log10(eps)) xscale, cell_vectors = _root_cell_normalization(primitive_slab) # Allocate grid for cell search search points = np.indices((root + 1, root + 1)).T.reshape(-1, 2) # Find points corresponding to full cells cell_points = [cell_vectors[0] * x + cell_vectors[1] * y for x, y in points] # Find point close to the desired cell (floating point error possible) roots = np.around(np.linalg.norm(cell_points, axis=1)**2, logeps) valid_roots = np.nonzero(roots == root)[0] if len(valid_roots) == 0: raise ValueError("Invalid root {} for cell {}".format(root, cell_vectors)) int_roots = np.array([int(this_root) for this_root in roots if this_root.is_integer() and this_root <= root]) return cell_points, cell_points[np.nonzero(roots == root)[0][0]], set(int_roots[1:]) def root_surface_analysis(primitive_slab, root, eps=1e-8): """A tool to analyze a slab and look for valid roots that exist, up to the given root. This is useful for generating all possible cells without prior knowledge. *primitive slab* is the primitive cell to analyze. *root* is the desired root to find, and all below.""" return _root_surface_analysis(primitive_slab=primitive_slab, root=root, eps=eps)[2] def root_surface(primitive_slab, root, eps=1e-8): """Creates a cell from a primitive cell that repeats along the x and y axis in a way consisent with the primitive cell, that has been cut to have a side length of *root*. *primitive cell* should be a primitive 2d cell of your slab, repeated as needed in the z direction. *root* should be determined using an analysis tool such as the root_surface_analysis function, or prior knowledge. It should always be a whole number as it represents the number of repetitions.""" atoms = primitive_slab.copy() xscale, cell_vectors = _root_cell_normalization(primitive_slab) # Do root surface analysis cell_points, root_point, roots = _root_surface_analysis(primitive_slab, root, eps=eps) # Find new cell root_angle = -atan2(root_point[1], root_point[0]) root_rotation = [[cos(root_angle), -sin(root_angle)], [sin(root_angle), cos(root_angle)]] root_scale = np.linalg.norm(root_point) cell = np.array([np.dot(x, root_rotation) * root_scale for x in cell_vectors]) # Find all cell centers within the cell shift = cell_vectors.sum(axis=0) / 2 cell_points = [point for point in cell_points if point_in_cell_2d(point+shift, cell, eps=eps)] # Setup new cell atoms.rotate(root_angle, v="z") atoms *= (root, root, 1) atoms.cell[0:2, 0:2] = cell * xscale atoms.center() # Remove all extra atoms del atoms[[atom.index for atom in atoms if not point_in_cell_2d(atom.position, atoms.cell, eps=eps)]] # Rotate cell back to original orientation standard_rotation = [[cos(-root_angle), -sin(-root_angle), 0], [sin(-root_angle), cos(-root_angle), 0], [0, 0, 1]] new_cell = np.array([np.dot(x, standard_rotation) for x in atoms.cell]) new_positions = np.array([np.dot(x, standard_rotation) for x in atoms.positions]) atoms.cell = new_cell atoms.positions = new_positions return atoms ase-3.22.1/ase/build/rotate.py000066400000000000000000000046301415166253600161020ustar00rootroot00000000000000import numpy as np def rotation_matrix_from_points(m0, m1): """Returns a rigid transformation/rotation matrix that minimizes the RMSD between two set of points. m0 and m1 should be (3, npoints) numpy arrays with coordinates as columns:: (x1 x2 x3 ... xN y1 y2 y3 ... yN z1 z2 z3 ... zN) The centeroids should be set to origin prior to computing the rotation matrix. The rotation matrix is computed using quaternion algebra as detailed in:: Melander et al. J. Chem. Theory Comput., 2015, 11,1055 """ v0 = np.copy(m0) v1 = np.copy(m1) # compute the rotation quaternion R11, R22, R33 = np.sum(v0 * v1, axis=1) R12, R23, R31 = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1) R13, R21, R32 = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1) f = [[R11 + R22 + R33, R23 - R32, R31 - R13, R12 - R21], [R23 - R32, R11 - R22 - R33, R12 + R21, R13 + R31], [R31 - R13, R12 + R21, -R11 + R22 - R33, R23 + R32], [R12 - R21, R13 + R31, R23 + R32, -R11 - R22 + R33]] F = np.array(f) w, V = np.linalg.eigh(F) # eigenvector corresponding to the most # positive eigenvalue q = V[:, np.argmax(w)] # Rotation matrix from the quaternion q R = quaternion_to_matrix(q) return R def quaternion_to_matrix(q): """Returns a rotation matrix. Computed from a unit quaternion Input as (4,) numpy array. """ q0, q1, q2, q3 = q R_q = [[q0**2 + q1**2 - q2**2 - q3**2, 2 * (q1 * q2 - q0 * q3), 2 * (q1 * q3 + q0 * q2)], [2 * (q1 * q2 + q0 * q3), q0**2 - q1**2 + q2**2 - q3**2, 2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q2 * q3 + q0 * q1), q0**2 - q1**2 - q2**2 + q3**2]] return np.array(R_q) def minimize_rotation_and_translation(target, atoms): """Minimize RMSD between atoms and target. Rotate and translate atoms to best match target. For more details, see:: Melander et al. J. Chem. Theory Comput., 2015, 11,1055 """ p = atoms.get_positions() p0 = target.get_positions() # centeroids to origin c = np.mean(p, axis=0) p -= c c0 = np.mean(p0, axis=0) p0 -= c0 # Compute rotation matrix R = rotation_matrix_from_points(p.T, p0.T) atoms.set_positions(np.dot(p, R.T) + c0) ase-3.22.1/ase/build/supercells.py000066400000000000000000000166411415166253600167720ustar00rootroot00000000000000"""Helper functions for creating supercells.""" import numpy as np from ase import Atoms class SupercellError(Exception): """Use if construction of supercell fails""" def get_deviation_from_optimal_cell_shape(cell, target_shape="sc", norm=None): r""" Calculates the deviation of the given cell metric from the ideal cell metric defining a certain shape. Specifically, the function evaluates the expression `\Delta = || Q \mathbf{h} - \mathbf{h}_{target}||_2`, where `\mathbf{h}` is the input metric (*cell*) and `Q` is a normalization factor (*norm*) while the target metric `\mathbf{h}_{target}` (via *target_shape*) represent simple cubic ('sc') or face-centered cubic ('fcc') cell shapes. Parameters: cell: 2D array of floats Metric given as a (3x3 matrix) of the input structure. target_shape: str Desired supercell shape. Can be 'sc' for simple cubic or 'fcc' for face-centered cubic. norm: float Specify the normalization factor. This is useful to avoid recomputing the normalization factor when computing the deviation for a series of P matrices. """ if target_shape in ["sc", "simple-cubic"]: target_metric = np.eye(3) elif target_shape in ["fcc", "face-centered cubic"]: target_metric = 0.5 * np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]) if not norm: norm = (np.linalg.det(cell) / np.linalg.det(target_metric)) ** ( -1.0 / 3 ) return np.linalg.norm(norm * cell - target_metric) def find_optimal_cell_shape( cell, target_size, target_shape, lower_limit=-2, upper_limit=2, verbose=False, ): """Returns the transformation matrix that produces a supercell corresponding to *target_size* unit cells with metric *cell* that most closely approximates the shape defined by *target_shape*. Parameters: cell: 2D array of floats Metric given as a (3x3 matrix) of the input structure. target_size: integer Size of desired super cell in number of unit cells. target_shape: str Desired supercell shape. Can be 'sc' for simple cubic or 'fcc' for face-centered cubic. lower_limit: int Lower limit of search range. upper_limit: int Upper limit of search range. verbose: bool Set to True to obtain additional information regarding construction of transformation matrix. """ # Set up target metric if target_shape in ["sc", "simple-cubic"]: target_metric = np.eye(3) elif target_shape in ["fcc", "face-centered cubic"]: target_metric = 0.5 * np.array( [[0, 1, 1], [1, 0, 1], [1, 1, 0]], dtype=float ) if verbose: print("target metric (h_target):") print(target_metric) # Normalize cell metric to reduce computation time during looping norm = ( target_size * np.linalg.det(cell) / np.linalg.det(target_metric) ) ** (-1.0 / 3) norm_cell = norm * cell if verbose: print("normalization factor (Q): %g" % norm) # Approximate initial P matrix ideal_P = np.dot(target_metric, np.linalg.inv(norm_cell)) if verbose: print("idealized transformation matrix:") print(ideal_P) starting_P = np.array(np.around(ideal_P, 0), dtype=int) if verbose: print("closest integer transformation matrix (P_0):") print(starting_P) # Prepare run. from itertools import product best_score = 1e6 optimal_P = None for dP in product(range(lower_limit, upper_limit + 1), repeat=9): dP = np.array(dP, dtype=int).reshape(3, 3) P = starting_P + dP if int(np.around(np.linalg.det(P), 0)) != target_size: continue score = get_deviation_from_optimal_cell_shape( np.dot(P, norm_cell), target_shape=target_shape, norm=1.0 ) if score < best_score: best_score = score optimal_P = P if optimal_P is None: print("Failed to find a transformation matrix.") return None # Finalize. if verbose: print("smallest score (|Q P h_p - h_target|_2): %f" % best_score) print("optimal transformation matrix (P_opt):") print(optimal_P) print("supercell metric:") print(np.round(np.dot(optimal_P, cell), 4)) print( "determinant of optimal transformation matrix: %g" % np.linalg.det(optimal_P) ) return optimal_P def make_supercell(prim, P, wrap=True, tol=1e-5): r"""Generate a supercell by applying a general transformation (*P*) to the input configuration (*prim*). The transformation is described by a 3x3 integer matrix `\mathbf{P}`. Specifically, the new cell metric `\mathbf{h}` is given in terms of the metric of the input configuration `\mathbf{h}_p` by `\mathbf{P h}_p = \mathbf{h}`. Parameters: prim: ASE Atoms object Input configuration. P: 3x3 integer matrix Transformation matrix `\mathbf{P}`. wrap: bool wrap in the end tol: float tolerance for wrapping """ supercell_matrix = P supercell = clean_matrix(supercell_matrix @ prim.cell) # cartesian lattice points lattice_points_frac = lattice_points_in_supercell(supercell_matrix) lattice_points = np.dot(lattice_points_frac, supercell) superatoms = Atoms(cell=supercell, pbc=prim.pbc) for lp in lattice_points: shifted_atoms = prim.copy() shifted_atoms.positions += lp superatoms.extend(shifted_atoms) # check number of atoms is correct n_target = int(np.round(np.linalg.det(supercell_matrix) * len(prim))) if n_target != len(superatoms): msg = "Number of atoms in supercell: {}, expected: {}".format( n_target, len(superatoms) ) raise SupercellError(msg) if wrap: superatoms.wrap(eps=tol) return superatoms def lattice_points_in_supercell(supercell_matrix): """Find all lattice points contained in a supercell. Adapted from pymatgen, which is available under MIT license: The MIT License (MIT) Copyright (c) 2011-2012 MIT & The Regents of the University of California, through Lawrence Berkeley National Laboratory """ diagonals = np.array( [ [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ] ) d_points = np.dot(diagonals, supercell_matrix) mins = np.min(d_points, axis=0) maxes = np.max(d_points, axis=0) + 1 ar = np.arange(mins[0], maxes[0])[:, None] * np.array([1, 0, 0])[None, :] br = np.arange(mins[1], maxes[1])[:, None] * np.array([0, 1, 0])[None, :] cr = np.arange(mins[2], maxes[2])[:, None] * np.array([0, 0, 1])[None, :] all_points = ar[:, None, None] + br[None, :, None] + cr[None, None, :] all_points = all_points.reshape((-1, 3)) frac_points = np.dot(all_points, np.linalg.inv(supercell_matrix)) tvects = frac_points[ np.all(frac_points < 1 - 1e-10, axis=1) & np.all(frac_points >= -1e-10, axis=1) ] assert len(tvects) == round(abs(np.linalg.det(supercell_matrix))) return tvects def clean_matrix(matrix, eps=1e-12): """ clean from small values""" matrix = np.array(matrix) for ij in np.ndindex(matrix.shape): if abs(matrix[ij]) < eps: matrix[ij] = 0 return matrix ase-3.22.1/ase/build/surface.py000066400000000000000000000445151415166253600162420ustar00rootroot00000000000000"""Helper functions for creating the most common surfaces and related tasks. The helper functions can create the most common low-index surfaces, add vacuum layers and add adsorbates. """ from math import sqrt from operator import itemgetter import numpy as np from ase.atom import Atom from ase.atoms import Atoms from ase.data import reference_states, atomic_numbers from ase.lattice.cubic import FaceCenteredCubic def fcc100(symbol, size, a=None, vacuum=None, orthogonal=True, periodic=False): """FCC(100) surface. Supported special adsorption sites: 'ontop', 'bridge', 'hollow'.""" if not orthogonal: raise NotImplementedError("Can't do non-orthogonal cell yet!") return _surface(symbol, 'fcc', '100', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def fcc110(symbol, size, a=None, vacuum=None, orthogonal=True, periodic=False): """FCC(110) surface. Supported special adsorption sites: 'ontop', 'longbridge', 'shortbridge', 'hollow'.""" if not orthogonal: raise NotImplementedError("Can't do non-orthogonal cell yet!") return _surface(symbol, 'fcc', '110', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def bcc100(symbol, size, a=None, vacuum=None, orthogonal=True, periodic=False): """BCC(100) surface. Supported special adsorption sites: 'ontop', 'bridge', 'hollow'.""" if not orthogonal: raise NotImplementedError("Can't do non-orthogonal cell yet!") return _surface(symbol, 'bcc', '100', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def bcc110(symbol, size, a=None, vacuum=None, orthogonal=False, periodic=False): """BCC(110) surface. Supported special adsorption sites: 'ontop', 'longbridge', 'shortbridge', 'hollow'. Use *orthogonal=True* to get an orthogonal unit cell - works only for size=(i,j,k) with j even.""" return _surface(symbol, 'bcc', '110', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def bcc111(symbol, size, a=None, vacuum=None, orthogonal=False, periodic=False): """BCC(111) surface. Supported special adsorption sites: 'ontop'. Use *orthogonal=True* to get an orthogonal unit cell - works only for size=(i,j,k) with j even.""" return _surface(symbol, 'bcc', '111', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def fcc111(symbol, size, a=None, vacuum=None, orthogonal=False, periodic=False): """FCC(111) surface. Supported special adsorption sites: 'ontop', 'bridge', 'fcc' and 'hcp'. Use *orthogonal=True* to get an orthogonal unit cell - works only for size=(i,j,k) with j even.""" return _surface(symbol, 'fcc', '111', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def hcp0001(symbol, size, a=None, c=None, vacuum=None, orthogonal=False, periodic=False): """HCP(0001) surface. Supported special adsorption sites: 'ontop', 'bridge', 'fcc' and 'hcp'. Use *orthogonal=True* to get an orthogonal unit cell - works only for size=(i,j,k) with j even.""" return _surface(symbol, 'hcp', '0001', size, a, c, vacuum, periodic=periodic, orthogonal=orthogonal) def hcp10m10(symbol, size, a=None, c=None, vacuum=None, orthogonal=True, periodic=False): """HCP(10m10) surface. Supported special adsorption sites: 'ontop'. Works only for size=(i,j,k) with j even.""" if not orthogonal: raise NotImplementedError("Can't do non-orthogonal cell yet!") return _surface(symbol, 'hcp', '10m10', size, a, c, vacuum, periodic=periodic, orthogonal=orthogonal) def diamond100(symbol, size, a=None, vacuum=None, orthogonal=True, periodic=False): """DIAMOND(100) surface. Supported special adsorption sites: 'ontop'.""" if not orthogonal: raise NotImplementedError("Can't do non-orthogonal cell yet!") return _surface(symbol, 'diamond', '100', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def diamond111(symbol, size, a=None, vacuum=None, orthogonal=False, periodic=False): """DIAMOND(111) surface. Supported special adsorption sites: 'ontop'.""" if orthogonal: raise NotImplementedError("Can't do orthogonal cell yet!") return _surface(symbol, 'diamond', '111', size, a, None, vacuum, periodic=periodic, orthogonal=orthogonal) def add_adsorbate(slab, adsorbate, height, position=(0, 0), offset=None, mol_index=0): """Add an adsorbate to a surface. This function adds an adsorbate to a slab. If the slab is produced by one of the utility functions in ase.build, it is possible to specify the position of the adsorbate by a keyword (the supported keywords depend on which function was used to create the slab). If the adsorbate is a molecule, the atom indexed by the mol_index optional argument is positioned on top of the adsorption position on the surface, and it is the responsibility of the user to orient the adsorbate in a sensible way. This function can be called multiple times to add more than one adsorbate. Parameters: slab: The surface onto which the adsorbate should be added. adsorbate: The adsorbate. Must be one of the following three types: A string containing the chemical symbol for a single atom. An atom object. An atoms object (for a molecular adsorbate). height: Height above the surface. position: The x-y position of the adsorbate, either as a tuple of two numbers or as a keyword (if the surface is produced by one of the functions in ase.build). offset (default: None): Offsets the adsorbate by a number of unit cells. Mostly useful when adding more than one adsorbate. mol_index (default: 0): If the adsorbate is a molecule, index of the atom to be positioned above the location specified by the position argument. Note *position* is given in absolute xy coordinates (or as a keyword), whereas offset is specified in unit cells. This can be used to give the positions in units of the unit cell by using *offset* instead. """ info = slab.info.get('adsorbate_info', {}) pos = np.array([0.0, 0.0]) # (x, y) part spos = np.array([0.0, 0.0]) # part relative to unit cell if offset is not None: spos += np.asarray(offset, float) if isinstance(position, str): # A site-name: if 'sites' not in info: raise TypeError('If the atoms are not made by an ' + 'ase.build function, ' + 'position cannot be a name.') if position not in info['sites']: raise TypeError('Adsorption site %s not supported.' % position) spos += info['sites'][position] else: pos += position if 'cell' in info: cell = info['cell'] else: cell = slab.get_cell()[:2, :2] pos += np.dot(spos, cell) # Convert the adsorbate to an Atoms object if isinstance(adsorbate, Atoms): ads = adsorbate elif isinstance(adsorbate, Atom): ads = Atoms([adsorbate]) else: # Assume it is a string representing a single Atom ads = Atoms([Atom(adsorbate)]) # Get the z-coordinate: if 'top layer atom index' in info: a = info['top layer atom index'] else: a = slab.positions[:, 2].argmax() if 'adsorbate_info' not in slab.info: slab.info['adsorbate_info'] = {} slab.info['adsorbate_info']['top layer atom index'] = a z = slab.positions[a, 2] + height # Move adsorbate into position ads.translate([pos[0], pos[1], z] - ads.positions[mol_index]) # Attach the adsorbate slab.extend(ads) def add_vacuum(atoms, vacuum): """Add vacuum layer to the atoms. Parameters: atoms: Atoms object Most likely created by one of the surface functions. vacuum: float The thickness of the vacuum layer (in Angstrom). """ uc = atoms.get_cell() normal = np.cross(uc[0], uc[1]) costheta = np.dot(normal, uc[2]) / np.sqrt(np.dot(normal, normal) * np.dot(uc[2], uc[2])) length = np.sqrt(np.dot(uc[2], uc[2])) newlength = length + vacuum / costheta uc[2] *= newlength / length atoms.set_cell(uc) def _surface(symbol, structure, face, size, a, c, vacuum, periodic, orthogonal=True): """Function to build often used surfaces. Don't call this function directly - use fcc100, fcc110, bcc111, ...""" Z = atomic_numbers[symbol] if a is None: sym = reference_states[Z]['symmetry'] if sym != structure: raise ValueError("Can't guess lattice constant for %s-%s!" % (structure, symbol)) a = reference_states[Z]['a'] if structure == 'hcp' and c is None: if reference_states[Z]['symmetry'] == 'hcp': c = reference_states[Z]['c/a'] * a else: c = sqrt(8 / 3.0) * a positions = np.empty((size[2], size[1], size[0], 3)) positions[..., 0] = np.arange(size[0]).reshape((1, 1, -1)) positions[..., 1] = np.arange(size[1]).reshape((1, -1, 1)) positions[..., 2] = np.arange(size[2]).reshape((-1, 1, 1)) numbers = np.ones(size[0] * size[1] * size[2], int) * Z tags = np.empty((size[2], size[1], size[0]), int) tags[:] = np.arange(size[2], 0, -1).reshape((-1, 1, 1)) slab = Atoms(numbers, tags=tags.ravel(), pbc=(True, True, periodic), cell=size) surface_cell = None sites = {'ontop': (0, 0)} surf = structure + face if surf == 'fcc100': cell = (sqrt(0.5), sqrt(0.5), 0.5) positions[-2::-2, ..., :2] += 0.5 sites.update({'hollow': (0.5, 0.5), 'bridge': (0.5, 0)}) elif surf == 'diamond100': cell = (sqrt(0.5), sqrt(0.5), 0.5 / 2) positions[-4::-4, ..., :2] += (0.5, 0.5) positions[-3::-4, ..., :2] += (0.0, 0.5) positions[-2::-4, ..., :2] += (0.0, 0.0) positions[-1::-4, ..., :2] += (0.5, 0.0) elif surf == 'fcc110': cell = (1.0, sqrt(0.5), sqrt(0.125)) positions[-2::-2, ..., :2] += 0.5 sites.update({'hollow': (0.5, 0.5), 'longbridge': (0.5, 0), 'shortbridge': (0, 0.5)}) elif surf == 'bcc100': cell = (1.0, 1.0, 0.5) positions[-2::-2, ..., :2] += 0.5 sites.update({'hollow': (0.5, 0.5), 'bridge': (0.5, 0)}) else: if orthogonal and size[1] % 2 == 1: raise ValueError(("Can't make orthorhombic cell with size=%r. " % (tuple(size),)) + 'Second number in size must be even.') if surf == 'fcc111': cell = (sqrt(0.5), sqrt(0.375), 1 / sqrt(3)) if orthogonal: positions[-1::-3, 1::2, :, 0] += 0.5 positions[-2::-3, 1::2, :, 0] += 0.5 positions[-3::-3, 1::2, :, 0] -= 0.5 positions[-2::-3, ..., :2] += (0.0, 2.0 / 3) positions[-3::-3, ..., :2] += (0.5, 1.0 / 3) else: positions[-2::-3, ..., :2] += (-1.0 / 3, 2.0 / 3) positions[-3::-3, ..., :2] += (1.0 / 3, 1.0 / 3) sites.update({'bridge': (0.5, 0), 'fcc': (1.0 / 3, 1.0 / 3), 'hcp': (2.0 / 3, 2.0 / 3)}) elif surf == 'diamond111': cell = (sqrt(0.5), sqrt(0.375), 1 / sqrt(3) / 2) assert not orthogonal positions[-1::-6, ..., :3] += (0.0, 0.0, 0.5) positions[-2::-6, ..., :2] += (0.0, 0.0) positions[-3::-6, ..., :3] += (-1.0 / 3, 2.0 / 3, 0.5) positions[-4::-6, ..., :2] += (-1.0 / 3, 2.0 / 3) positions[-5::-6, ..., :3] += (1.0 / 3, 1.0 / 3, 0.5) positions[-6::-6, ..., :2] += (1.0 / 3, 1.0 / 3) elif surf == 'hcp0001': cell = (1.0, sqrt(0.75), 0.5 * c / a) if orthogonal: positions[:, 1::2, :, 0] += 0.5 positions[-2::-2, ..., :2] += (0.0, 2.0 / 3) else: positions[-2::-2, ..., :2] += (-1.0 / 3, 2.0 / 3) sites.update({'bridge': (0.5, 0), 'fcc': (1.0 / 3, 1.0 / 3), 'hcp': (2.0 / 3, 2.0 / 3)}) elif surf == 'hcp10m10': cell = (1.0, 0.5 * c / a, sqrt(0.75)) assert orthogonal positions[-2::-2, ..., 0] += 0.5 positions[:, ::2, :, 2] += 2.0 / 3 elif surf == 'bcc110': cell = (1.0, sqrt(0.5), sqrt(0.5)) if orthogonal: positions[:, 1::2, :, 0] += 0.5 positions[-2::-2, ..., :2] += (0.0, 1.0) else: positions[-2::-2, ..., :2] += (-0.5, 1.0) sites.update({'shortbridge': (0, 0.5), 'longbridge': (0.5, 0), 'hollow': (0.375, 0.25)}) elif surf == 'bcc111': cell = (sqrt(2), sqrt(1.5), sqrt(3) / 6) if orthogonal: positions[-1::-3, 1::2, :, 0] += 0.5 positions[-2::-3, 1::2, :, 0] += 0.5 positions[-3::-3, 1::2, :, 0] -= 0.5 positions[-2::-3, ..., :2] += (0.0, 2.0 / 3) positions[-3::-3, ..., :2] += (0.5, 1.0 / 3) else: positions[-2::-3, ..., :2] += (-1.0 / 3, 2.0 / 3) positions[-3::-3, ..., :2] += (1.0 / 3, 1.0 / 3) sites.update({'hollow': (1.0 / 3, 1.0 / 3)}) else: 2 / 0 surface_cell = a * np.array([(cell[0], 0), (cell[0] / 2, cell[1])]) if not orthogonal: cell = np.array([(cell[0], 0, 0), (cell[0] / 2, cell[1], 0), (0, 0, cell[2])]) if surface_cell is None: surface_cell = a * np.diag(cell[:2]) if isinstance(cell, tuple): cell = np.diag(cell) slab.set_positions(positions.reshape((-1, 3))) slab.set_cell([a * v * n for v, n in zip(cell, size)], scale_atoms=True) if not periodic: slab.cell[2] = 0.0 if vacuum is not None: slab.center(vacuum, axis=2) if 'adsorbate_info' not in slab.info: slab.info.update({'adsorbate_info': {}}) slab.info['adsorbate_info']['cell'] = surface_cell slab.info['adsorbate_info']['sites'] = sites return slab def fcc211(symbol, size, a=None, vacuum=None, orthogonal=True): """FCC(211) surface. Does not currently support special adsorption sites. Currently only implemented for *orthogonal=True* with size specified as (i, j, k), where i, j, and k are number of atoms in each direction. i must be divisible by 3 to accommodate the step width. """ if not orthogonal: raise NotImplementedError('Only implemented for orthogonal ' 'unit cells.') if size[0] % 3 != 0: raise NotImplementedError('First dimension of size must be ' 'divisible by 3.') atoms = FaceCenteredCubic(symbol, directions=[[1, -1, -1], [0, 2, -2], [2, 1, 1]], miller=(None, None, (2, 1, 1)), latticeconstant=a, size=(1, 1, 1), pbc=True) z = (size[2] + 1) // 2 atoms = atoms.repeat((size[0] // 3, size[1], z)) if size[2] % 2: # Odd: remove bottom layer and shrink cell. remove_list = [atom.index for atom in atoms if atom.z < atoms[1].z] del atoms[remove_list] dz = atoms[0].z atoms.translate((0., 0., -dz)) atoms.cell[2][2] -= dz atoms.cell[2] = 0.0 atoms.pbc[2] = False if vacuum: atoms.center(vacuum, axis=2) # Renumber systematically from top down. orders = [(atom.index, round(atom.x, 3), round(atom.y, 3), -round(atom.z, 3), atom.index) for atom in atoms] orders.sort(key=itemgetter(3, 1, 2)) newatoms = atoms.copy() for index, order in enumerate(orders): newatoms[index].position = atoms[order[0]].position.copy() # Add empty 'sites' dictionary for consistency with other functions newatoms.info['adsorbate_info'] = {'sites': {}} return newatoms def mx2(formula='MoS2', kind='2H', a=3.18, thickness=3.19, size=(1, 1, 1), vacuum=None): """Create three-layer 2D materials with hexagonal structure. For metal dichalcogenites, etc. The kind argument accepts '2H', which gives a mirror plane symmetry and '1T', which gives an inversion symmetry.""" if kind == '2H': basis = [(0, 0, 0), (2 / 3, 1 / 3, 0.5 * thickness), (2 / 3, 1 / 3, -0.5 * thickness)] elif kind == '1T': basis = [(0, 0, 0), (2 / 3, 1 / 3, 0.5 * thickness), (1 / 3, 2 / 3, -0.5 * thickness)] else: raise ValueError('Structure not recognized:', kind) cell = [[a, 0, 0], [-a / 2, a * 3**0.5 / 2, 0], [0, 0, 0]] atoms = Atoms(formula, cell=cell, pbc=(1, 1, 0)) atoms.set_scaled_positions(basis) if vacuum is not None: atoms.center(vacuum, axis=2) atoms = atoms.repeat(size) return atoms def graphene(formula='C2', a=2.460, size=(1, 1, 1), vacuum=None): """Create a graphene monolayer structure.""" cell = [[a, 0, 0], [-a / 2, a * 3**0.5 / 2, 0], [0, 0, 0]] basis = [[0, 0, 0], [2 / 3, 1 / 3, 0]] atoms = Atoms(formula, cell=cell, pbc=(1, 1, 0)) atoms.set_scaled_positions(basis) if vacuum is not None: atoms.center(vacuum, axis=2) atoms = atoms.repeat(size) return atoms def _all_surface_functions(): # Convenient for debugging. d = {} for func in [fcc100, fcc110, bcc100, bcc110, bcc111, fcc111, hcp0001, hcp10m10, diamond100, diamond111, fcc111, mx2, graphene]: d[func.__name__] = func return d ase-3.22.1/ase/build/surfaces_with_termination.py000066400000000000000000000143441415166253600220660ustar00rootroot00000000000000import numpy as np from ase.build.general_surface import surface from ase.geometry import get_layers from ase.symbols import string2symbols def surfaces_with_termination(lattice, indices, layers, vacuum=None, tol=1e-10, termination=None, return_all=False, verbose=False): """Create surface from a given lattice and Miller indices with a given termination Parameters ========== lattice: Atoms object or str Bulk lattice structure of alloy or pure metal. Note that the unit-cell must be the conventional cell - not the primitive cell. One can also give the chemical symbol as a string, in which case the correct bulk lattice will be generated automatically. indices: sequence of three int Surface normal in Miller indices (h,k,l). layers: int Number of equivalent layers of the slab. (not the same as the layers you choose from for terminations) vacuum: float Amount of vacuum added on both sides of the slab. termination: str the atoms you wish to be in the top layer. There may be many such terminations, this function returns all terminations with the same atomic composition. e.g. 'O' will return oxygen terminated surfaces. e.g.'TiO' will return surfaces terminated with layers containing both O and Ti Returns: return_surfs: List a list of surfaces that match the specifications given """ lats = translate_lattice(lattice, indices) return_surfs = [] check = [] check2 = [] for item in lats: too_similar = False surf = surface(item, indices, layers, vacuum=vacuum, tol=tol) surf.wrap(pbc=[True] * 3) # standardize slabs positions = surf.get_scaled_positions().flatten() for i, value in enumerate(positions): if value >= 1 - tol: # move things closer to zero within tol positions[i] -= 1 surf.set_scaled_positions(np.reshape(positions, (len(surf), 3))) #rep = find_z_layers(surf) z_layers, hs = get_layers(surf, (0, 0, 1)) # just z layers matter # get the indicies of the atoms in the highest layer top_layer = [i for i, val in enumerate(z_layers == max(z_layers)) if val] if termination is not None: comp = [surf.get_chemical_symbols()[a] for a in top_layer] term = string2symbols(termination) # list atoms in top layer and not in requested termination check = [a for a in comp if a not in term] # list of atoms in requested termination and not in top layer check2 = [a for a in term if a not in comp] if len(return_surfs) > 0: pos_diff = [a.get_positions() - surf.get_positions() for a in return_surfs] for i, su in enumerate(pos_diff): similarity_test = su.flatten() < tol * 1000 if similarity_test.all(): # checks if surface is too similar to another surface too_similar = True if too_similar: continue if return_all is True: pass elif check != [] or check2 != []: continue return_surfs.append(surf) return return_surfs def translate_lattice(lattice, indices, tol=10**-3): """translates a bulk unit cell along a normal vector given by the a set of miller indices to the next symetric position. This is used to control the termination of the surface in the smart_surface command Parameters: ========== lattice: Atoms object atoms object of the bulk unit cell indices: 1x3 list,tuple, or numpy array the miller indices you wish to cut along. returns: lattice_list: list of Atoms objects a list of all the different translations of the unit cell that will yield different terminations of a surface cut along the miller indices provided. """ lattice_list = [] cell = lattice.get_cell() pt = [0, 0, 0] h, k, l = indices millers = list(indices) for index, item in enumerate(millers): if item == 0: millers[index] = 10**9 # make zeros large numbers elif pt == [0, 0, 0]: # for numerical stability pt = list(cell[index] / float(item) / np.linalg.norm(cell[index])) h1, k1, l1 = millers N = np.array(cell[0] / h1 + cell[1] / k1 + cell[2] / l1) n = N / np.linalg.norm(N) # making a unit vector normal to cut plane # finding distance from cut plan vector d = [np.round(np.dot(n, (a - pt)) * n, 5) for a in lattice.get_scaled_positions()] duplicates = [] for i, item in enumerate(d): g = [True for a in d[i + 1:] if np.linalg.norm(a - item) < tol] if g != []: duplicates.append(i) duplicates.reverse() for i in duplicates: del d[i] # put distance to the plane at the end of the array for i, item in enumerate(d): d[i] = np.append(item, np.dot(n, (lattice.get_scaled_positions()[i] - pt))) d = np.array(d) d = d[d[:, 3].argsort()] # sort by distance to the plane d = [a[:3] for a in d] # remove distance d = list(d) # make it a list again for i in d: """ The above method gives you the boundries of between terminations that will allow you to build a complete set of terminations. However, it does not return all the boundries. Thus you must check both above and below the boundary, and not stray too far from the boundary. If you move too far away, you risk hitting another boundary you did not find. """ lattice1 = lattice.copy() displacement = (h * cell[0] + k * cell[1] + l * cell[2]) \ * (i + 10 ** -8) lattice1.positions -= displacement lattice_list.append(lattice1) lattice1 = lattice.copy() displacement = (h * cell[0] + k * cell[1] + l * cell[2]) \ * (i - 10 ** -8) lattice1.positions -= displacement lattice_list.append(lattice1) return lattice_list ase-3.22.1/ase/build/tools.py000066400000000000000000000553351415166253600157540ustar00rootroot00000000000000import numpy as np def cut(atoms, a=(1, 0, 0), b=(0, 1, 0), c=None, clength=None, origo=(0, 0, 0), nlayers=None, extend=1.0, tolerance=0.01, maxatoms=None): """Cuts out a cell defined by *a*, *b*, *c* and *origo* from a sufficiently repeated copy of *atoms*. Typically, this function is used to create slabs of different sizes and orientations. The vectors *a*, *b* and *c* are in scaled coordinates and defines the returned cell and should normally be integer-valued in order to end up with a periodic structure. However, for systems with sub-translations, like fcc, integer multiples of 1/2 or 1/3 might also make sense for some directions (and will be treated correctly). Parameters: atoms: Atoms instance This should correspond to a repeatable unit cell. a: int | 3 floats The a-vector in scaled coordinates of the cell to cut out. If integer, the a-vector will be the scaled vector from *origo* to the atom with index *a*. b: int | 3 floats The b-vector in scaled coordinates of the cell to cut out. If integer, the b-vector will be the scaled vector from *origo* to the atom with index *b*. c: None | int | 3 floats The c-vector in scaled coordinates of the cell to cut out. if integer, the c-vector will be the scaled vector from *origo* to the atom with index *c*. If *None* it will be along cross(a, b) converted to real space and normalised with the cube root of the volume. Note that this in general is not perpendicular to a and b for non-cubic systems. For cubic systems however, this is redused to c = cross(a, b). clength: None | float If not None, the length of the c-vector will be fixed to *clength* Angstroms. Should not be used together with *nlayers*. origo: int | 3 floats Position of origo of the new cell in scaled coordinates. If integer, the position of the atom with index *origo* is used. nlayers: None | int If *nlayers* is not *None*, the returned cell will have *nlayers* atomic layers in the c-direction. extend: 1 or 3 floats The *extend* argument scales the effective cell in which atoms will be included. It must either be three floats or a single float scaling all 3 directions. By setting to a value just above one, e.g. 1.05, it is possible to all the corner and edge atoms in the returned cell. This will of cause make the returned cell non-repeatable, but is very useful for visualisation. tolerance: float Determines what is defined as a plane. All atoms within *tolerance* Angstroms from a given plane will be considered to belong to that plane. maxatoms: None | int This option is used to auto-tune *tolerance* when *nlayers* is given for high zone axis systems. For high zone axis one needs to reduce *tolerance* in order to distinguise the atomic planes, resulting in the more atoms will be added and eventually MemoryError. A too small *tolerance*, on the other hand, might result in inproper splitting of atomic planes and that too few layers are returned. If *maxatoms* is not None, *tolerance* will automatically be gradually reduced until *nlayers* atomic layers is obtained, when the number of atoms exceeds *maxatoms*. Example: >>> import ase >>> from ase.spacegroup import crystal >>> # Create an aluminium (111) slab with three layers # # First an unit cell of Al >>> a = 4.05 >>> aluminium = crystal('Al', [(0,0,0)], spacegroup=225, ... cellpar=[a, a, a, 90, 90, 90]) >>> # Then cut out the slab >>> al111 = cut(aluminium, (1,-1,0), (0,1,-1), nlayers=3) >>> # Visualisation of the skutterudite unit cell # # Again, create a skutterudite unit cell >>> a = 9.04 >>> skutterudite = crystal( ... ('Co', 'Sb'), ... basis=[(0.25,0.25,0.25), (0.0, 0.335, 0.158)], ... spacegroup=204, ... cellpar=[a, a, a, 90, 90, 90]) >>> # Then use *origo* to put 'Co' at the corners and *extend* to # include all corner and edge atoms. >>> s = cut(skutterudite, origo=(0.25, 0.25, 0.25), extend=1.01) >>> ase.view(s) # doctest: +SKIP """ atoms = atoms.copy() cell = atoms.cell if isinstance(origo, int): origo = atoms.get_scaled_positions()[origo] origo = np.array(origo, dtype=float) scaled = (atoms.get_scaled_positions() - origo) % 1.0 scaled %= 1.0 # needed to ensure that all numbers are *less* than one atoms.set_scaled_positions(scaled) if isinstance(a, int): a = scaled[a] - origo if isinstance(b, int): b = scaled[b] - origo if isinstance(c, int): c = scaled[c] - origo a = np.array(a, dtype=float) b = np.array(b, dtype=float) if c is None: metric = np.dot(cell, cell.T) vol = np.sqrt(np.linalg.det(metric)) h = np.cross(a, b) H = np.linalg.solve(metric.T, h.T) c = vol * H / vol**(1. / 3.) c = np.array(c, dtype=float) if nlayers: # Recursive increase the length of c until we have at least # *nlayers* atomic layers parallel to the a-b plane while True: at = cut(atoms, a, b, c, origo=origo, extend=extend, tolerance=tolerance) scaled = at.get_scaled_positions() d = scaled[:, 2] keys = np.argsort(d) ikeys = np.argsort(keys) tol = tolerance while True: mask = np.concatenate(([True], np.diff(d[keys]) > tol)) tags = np.cumsum(mask)[ikeys] - 1 levels = d[keys][mask] if (maxatoms is None or len(at) < maxatoms or len(levels) > nlayers): break tol *= 0.9 if len(levels) > nlayers: break c *= 2 at.cell[2] *= levels[nlayers] return at[tags < nlayers] newcell = np.dot(np.array([a, b, c]), cell) if nlayers is None and clength is not None: newcell[2, :] *= clength / np.linalg.norm(newcell[2]) # Create a new atoms object, repeated and translated such that # it completely covers the new cell scorners_newcell = np.array([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [0., 1., 1.], [1., 0., 0.], [1., 0., 1.], [1., 1., 0.], [1., 1., 1.]]) corners = np.dot(scorners_newcell, newcell * extend) scorners = np.linalg.solve(cell.T, corners.T).T rep = np.ceil(scorners.ptp(axis=0)).astype('int') + 1 trans = np.dot(np.floor(scorners.min(axis=0)), cell) atoms = atoms.repeat(rep) atoms.translate(trans) atoms.set_cell(newcell) # Mask out atoms outside new cell stol = 0.1 * tolerance # scaled tolerance, XXX maskcell = atoms.cell * extend sp = np.linalg.solve(maskcell.T, (atoms.positions).T).T mask = np.all(np.logical_and(-stol <= sp, sp < 1 - stol), axis=1) atoms = atoms[mask] return atoms class IncompatibleCellError(ValueError): """Exception raised if stacking fails due to incompatible cells between *atoms1* and *atoms2*.""" pass def stack(atoms1, atoms2, axis=2, cell=None, fix=0.5, maxstrain=0.5, distance=None, reorder=False, output_strained=False): """Return a new Atoms instance with *atoms2* stacked on top of *atoms1* along the given axis. Periodicity in all directions is ensured. The size of the final cell is determined by *cell*, except that the length alongh *axis* will be the sum of *atoms1.cell[axis]* and *atoms2.cell[axis]*. If *cell* is None, it will be interpolated between *atoms1* and *atoms2*, where *fix* determines their relative weight. Hence, if *fix* equals zero, the final cell will be determined purely from *atoms1* and if *fix* equals one, it will be determined purely from *atoms2*. An ase.geometry.IncompatibleCellError exception is raised if the cells of *atoms1* and *atoms2* are incompatible, e.g. if the far corner of the unit cell of either *atoms1* or *atoms2* is displaced more than *maxstrain*. Setting *maxstrain* to None disables this check. If *distance* is not None, the size of the final cell, along the direction perpendicular to the interface, will be adjusted such that the distance between the closest atoms in *atoms1* and *atoms2* will be equal to *distance*. This option uses scipy.optimize.fmin() and hence require scipy to be installed. If *reorder* is True, then the atoms will be reordered such that all atoms with the same symbol will follow sequencially after each other, eg: 'Al2MnAl10Fe' -> 'Al12FeMn'. If *output_strained* is True, then the strained versions of *atoms1* and *atoms2* are returned in addition to the stacked structure. Example: >>> import ase >>> from ase.spacegroup import crystal >>> # Create an Ag(110)-Si(110) interface with three atomic layers # on each side. >>> a_ag = 4.09 >>> ag = crystal(['Ag'], basis=[(0,0,0)], spacegroup=225, ... cellpar=[a_ag, a_ag, a_ag, 90., 90., 90.]) >>> ag110 = cut(ag, (0, 0, 3), (-1.5, 1.5, 0), nlayers=3) >>> >>> a_si = 5.43 >>> si = crystal(['Si'], basis=[(0,0,0)], spacegroup=227, ... cellpar=[a_si, a_si, a_si, 90., 90., 90.]) >>> si110 = cut(si, (0, 0, 2), (-1, 1, 0), nlayers=3) >>> >>> interface = stack(ag110, si110, maxstrain=1) >>> ase.view(interface) # doctest: +SKIP >>> # Once more, this time adjusted such that the distance between # the closest Ag and Si atoms will be 2.3 Angstrom (requires scipy). >>> interface2 = stack(ag110, si110, ... maxstrain=1, distance=2.3) # doctest:+ELLIPSIS Optimization terminated successfully. ... >>> ase.view(interface2) # doctest: +SKIP """ atoms1 = atoms1.copy() atoms2 = atoms2.copy() for atoms in [atoms1, atoms2]: if not atoms.cell[axis].any(): atoms.center(vacuum=0.0, axis=axis) if (np.sign(np.linalg.det(atoms1.cell)) != np.sign(np.linalg.det(atoms2.cell))): raise IncompatibleCellError('Cells of *atoms1* and *atoms2* must have ' 'same handedness.') c1 = np.linalg.norm(atoms1.cell[axis]) c2 = np.linalg.norm(atoms2.cell[axis]) if cell is None: cell1 = atoms1.cell.copy() cell2 = atoms2.cell.copy() cell1[axis] /= c1 cell2[axis] /= c2 cell = cell1 + fix * (cell2 - cell1) cell[axis] /= np.linalg.norm(cell[axis]) cell1 = cell.copy() cell2 = cell.copy() cell1[axis] *= c1 cell2[axis] *= c2 if maxstrain: strain1 = np.sqrt(((cell1 - atoms1.cell).sum(axis=0)**2).sum()) strain2 = np.sqrt(((cell2 - atoms2.cell).sum(axis=0)**2).sum()) if strain1 > maxstrain or strain2 > maxstrain: raise IncompatibleCellError( '*maxstrain* exceeded. *atoms1* strained %f and ' '*atoms2* strained %f.' % (strain1, strain2)) atoms1.set_cell(cell1, scale_atoms=True) atoms2.set_cell(cell2, scale_atoms=True) if output_strained: atoms1_strained = atoms1.copy() atoms2_strained = atoms2.copy() if distance is not None: from scipy.optimize import fmin def mindist(pos1, pos2): n1 = len(pos1) n2 = len(pos2) idx1 = np.arange(n1).repeat(n2) idx2 = np.tile(np.arange(n2), n1) return np.sqrt(((pos1[idx1] - pos2[idx2])**2).sum(axis=1).min()) def func(x): t1, t2, h1, h2 = x[0:3], x[3:6], x[6], x[7] pos1 = atoms1.positions + t1 pos2 = atoms2.positions + t2 d1 = mindist(pos1, pos2 + (h1 + 1.0) * atoms1.cell[axis]) d2 = mindist(pos2, pos1 + (h2 + 1.0) * atoms2.cell[axis]) return (d1 - distance)**2 + (d2 - distance)**2 atoms1.center() atoms2.center() x0 = np.zeros((8,)) x = fmin(func, x0) t1, t2, h1, h2 = x[0:3], x[3:6], x[6], x[7] atoms1.translate(t1) atoms2.translate(t2) atoms1.cell[axis] *= 1.0 + h1 atoms2.cell[axis] *= 1.0 + h2 atoms2.translate(atoms1.cell[axis]) atoms1.cell[axis] += atoms2.cell[axis] atoms1.extend(atoms2) if reorder: atoms1 = sort(atoms1) if output_strained: return atoms1, atoms1_strained, atoms2_strained else: return atoms1 def rotation_matrix(a1, a2, b1, b2): """Returns a rotation matrix that rotates the vectors *a1* in the direction of *a2* and *b1* in the direction of *b2*. In the case that the angle between *a2* and *b2* is not the same as between *a1* and *b1*, a proper rotation matrix will anyway be constructed by first rotate *b2* in the *b1*, *b2* plane. """ a1 = np.asarray(a1, dtype=float) / np.linalg.norm(a1) b1 = np.asarray(b1, dtype=float) / np.linalg.norm(b1) c1 = np.cross(a1, b1) c1 /= np.linalg.norm(c1) # clean out rounding errors... a2 = np.asarray(a2, dtype=float) / np.linalg.norm(a2) b2 = np.asarray(b2, dtype=float) / np.linalg.norm(b2) c2 = np.cross(a2, b2) c2 /= np.linalg.norm(c2) # clean out rounding errors... # Calculate rotated *b2* theta = np.arccos(np.dot(a2, b2)) - np.arccos(np.dot(a1, b1)) b3 = np.sin(theta) * a2 + np.cos(theta) * b2 b3 /= np.linalg.norm(b3) # clean out rounding errors... A1 = np.array([a1, b1, c1]) A2 = np.array([a2, b3, c2]) R = np.linalg.solve(A1, A2).T return R def rotate(atoms, a1, a2, b1, b2, rotate_cell=True, center=(0, 0, 0)): """Rotate *atoms*, such that *a1* will be rotated in the direction of *a2* and *b1* in the direction of *b2*. The point at *center* is fixed. Use *center='COM'* to fix the center of mass. If *rotate_cell* is true, the cell will be rotated together with the atoms. Note that the 000-corner of the cell is by definition fixed at origo. Hence, setting *center* to something other than (0, 0, 0) will rotate the atoms out of the cell, even if *rotate_cell* is True. """ if isinstance(center, str) and center.lower() == 'com': center = atoms.get_center_of_mass() R = rotation_matrix(a1, a2, b1, b2) atoms.positions[:] = np.dot(atoms.positions - center, R.T) + center if rotate_cell: atoms.cell[:] = np.dot(atoms.cell, R.T) def minimize_tilt_ij(atoms, modified=1, fixed=0, fold_atoms=True): """Minimize the tilt angle for two given axes. The problem is underdetermined. Therefore one can choose one axis that is kept fixed. """ orgcell_cc = atoms.get_cell() pbc_c = atoms.get_pbc() i = fixed j = modified if not (pbc_c[i] and pbc_c[j]): raise RuntimeError('Axes have to be periodic') prod_cc = np.dot(orgcell_cc, orgcell_cc.T) cell_cc = 1. * orgcell_cc nji = np.floor(- prod_cc[i, j] / prod_cc[i, i] + 0.5) cell_cc[j] = orgcell_cc[j] + nji * cell_cc[i] # sanity check def volume(cell): return np.abs(np.dot(cell[2], np.cross(cell[0], cell[1]))) V = volume(cell_cc) assert(abs(volume(orgcell_cc) - V) / V < 1.e-10) atoms.set_cell(cell_cc) if fold_atoms: atoms.wrap() def minimize_tilt(atoms, order=range(3), fold_atoms=True): """Minimize the tilt angles of the unit cell.""" pbc_c = atoms.get_pbc() for i1, c1 in enumerate(order): for c2 in order[i1 + 1:]: if pbc_c[c1] and pbc_c[c2]: minimize_tilt_ij(atoms, c1, c2, fold_atoms) def niggli_reduce_cell(cell, epsfactor=None): from ase.geometry import cellpar_to_cell if epsfactor is None: epsfactor = 1e-5 eps = epsfactor * abs(np.linalg.det(cell))**(1./3.) cell = np.asarray(cell) I3 = np.eye(3, dtype=int) I6 = np.eye(6, dtype=int) C = I3.copy() D = I6.copy() g0 = np.zeros(6, dtype=float) g0[0] = np.dot(cell[0], cell[0]) g0[1] = np.dot(cell[1], cell[1]) g0[2] = np.dot(cell[2], cell[2]) g0[3] = 2 * np.dot(cell[1], cell[2]) g0[4] = 2 * np.dot(cell[0], cell[2]) g0[5] = 2 * np.dot(cell[0], cell[1]) g = np.dot(D, g0) def lt(x, y, eps=eps): return x < y - eps def gt(x, y, eps=eps): return lt(y, x, eps) def eq(x, y, eps=eps): return not (lt(x, y, eps) or gt(x, y, eps)) for _ in range(10000): if (gt(g[0], g[1]) or (eq(g[0], g[1]) and gt(abs(g[3]), abs(g[4])))): C = np.dot(C, -I3[[1, 0, 2]]) D = np.dot(I6[[1, 0, 2, 4, 3, 5]], D) g = np.dot(D, g0) continue elif (gt(g[1], g[2]) or (eq(g[1], g[2]) and gt(abs(g[4]), abs(g[5])))): C = np.dot(C, -I3[[0, 2, 1]]) D = np.dot(I6[[0, 2, 1, 3, 5, 4]], D) g = np.dot(D, g0) continue lmn = np.array(gt(g[3:], 0, eps=eps/2), dtype=int) lmn -= np.array(lt(g[3:], 0, eps=eps/2), dtype=int) if lmn.prod() == 1: ijk = lmn.copy() for idx in range(3): if ijk[idx] == 0: ijk[idx] = 1 else: ijk = np.ones(3, dtype=int) if np.any(lmn != -1): r = None for idx in range(3): if lmn[idx] == 1: ijk[idx] = -1 elif lmn[idx] == 0: r = idx if ijk.prod() == -1: ijk[r] = -1 C *= ijk[np.newaxis] D[3] *= ijk[1] * ijk[2] D[4] *= ijk[0] * ijk[2] D[5] *= ijk[0] * ijk[1] g = np.dot(D, g0) if (gt(abs(g[3]), g[1]) or (eq(g[3], g[1]) and lt(2 * g[4], g[5])) or (eq(g[3], -g[1]) and lt(g[5], 0))): s = int(np.sign(g[3])) A = I3.copy() A[1, 2] = -s C = np.dot(C, A) B = I6.copy() B[2, 1] = 1 B[2, 3] = -s B[3, 1] = -2 * s B[4, 5] = -s D = np.dot(B, D) g = np.dot(D, g0) elif (gt(abs(g[4]), g[0]) or (eq(g[4], g[0]) and lt(2 * g[3], g[5])) or (eq(g[4], -g[0]) and lt(g[5], 0))): s = int(np.sign(g[4])) A = I3.copy() A[0, 2] = -s C = np.dot(C, A) B = I6.copy() B[2, 0] = 1 B[2, 4] = -s B[3, 5] = -s B[4, 0] = -2 * s D = np.dot(B, D) g = np.dot(D, g0) elif (gt(abs(g[5]), g[0]) or (eq(g[5], g[0]) and lt(2 * g[3], g[4])) or (eq(g[5], -g[0]) and lt(g[4], 0))): s = int(np.sign(g[5])) A = I3.copy() A[0, 1] = -s C = np.dot(C, A) B = I6.copy() B[1, 0] = 1 B[1, 5] = -s B[3, 4] = -s B[5, 0] = -2 * s D = np.dot(B, D) g = np.dot(D, g0) elif (lt(g[[0, 1, 3, 4, 5]].sum(), 0) or (eq(g[[0, 1, 3, 4, 5]].sum(), 0) and gt(2 * (g[0] + g[4]) + g[5], 0))): A = I3.copy() A[:, 2] = 1 C = np.dot(C, A) B = I6.copy() B[2, :] = 1 B[3, 1] = 2 B[3, 5] = 1 B[4, 0] = 2 B[4, 5] = 1 D = np.dot(B, D) g = np.dot(D, g0) else: break else: raise RuntimeError('Niggli reduction not done in 10000 steps!\n' 'cell={}\n' 'operation={}' .format(cell.tolist(), C.tolist())) abc = np.sqrt(g[:3]) # Prevent division by zero e.g. for cell==zeros((3, 3)): abcprod = max(abc.prod(), 1e-100) cosangles = abc * g[3:] / (2 * abcprod) angles = 180 * np.arccos(cosangles) / np.pi newcell = np.array(cellpar_to_cell(np.concatenate([abc, angles])), dtype=float) return newcell, C def update_cell_and_positions(atoms, new_cell, op): """Helper method for transforming cell and positions of atoms object.""" scpos = np.linalg.solve(op, atoms.get_scaled_positions().T).T scpos %= 1.0 scpos %= 1.0 atoms.set_cell(new_cell) atoms.set_scaled_positions(scpos) def niggli_reduce(atoms): """Convert the supplied atoms object's unit cell into its maximally-reduced Niggli unit cell. Even if the unit cell is already maximally reduced, it will be converted into its unique Niggli unit cell. This will also wrap all atoms into the new unit cell. References: Niggli, P. "Krystallographische und strukturtheoretische Grundbegriffe. Handbuch der Experimentalphysik", 1928, Vol. 7, Part 1, 108-176. Krivy, I. and Gruber, B., "A Unified Algorithm for Determining the Reduced (Niggli) Cell", Acta Cryst. 1976, A32, 297-298. Grosse-Kunstleve, R.W.; Sauter, N. K.; and Adams, P. D. "Numerically stable algorithms for the computation of reduced unit cells", Acta Cryst. 2004, A60, 1-6. """ assert all(atoms.pbc), 'Can only reduce 3d periodic unit cells!' new_cell, op = niggli_reduce_cell(atoms.cell) update_cell_and_positions(atoms, new_cell, op) def reduce_lattice(atoms, eps=2e-4): """Reduce atoms object to canonical lattice. This changes the cell and positions such that the atoms object has the canonical form used for defining band paths but is otherwise physically equivalent. The eps parameter is used as a tolerance for determining the cell's Bravais lattice.""" from ase.geometry.bravais_type_engine import identify_lattice niggli_reduce(atoms) lat, op = identify_lattice(atoms.cell, eps=eps) update_cell_and_positions(atoms, lat.tocell(), np.linalg.inv(op)) def sort(atoms, tags=None): """Return a new Atoms object with sorted atomic order. The default is to order according to chemical symbols, but if *tags* is not None, it will be used instead. A stable sorting algorithm is used. Example: >>> from ase.build import bulk >>> # Two unit cells of NaCl: >>> a = 5.64 >>> nacl = bulk('NaCl', 'rocksalt', a=a) * (2, 1, 1) >>> nacl.get_chemical_symbols() ['Na', 'Cl', 'Na', 'Cl'] >>> nacl_sorted = sort(nacl) >>> nacl_sorted.get_chemical_symbols() ['Cl', 'Cl', 'Na', 'Na'] >>> np.all(nacl_sorted.cell == nacl.cell) True """ if tags is None: tags = atoms.get_chemical_symbols() else: tags = list(tags) deco = sorted([(tag, i) for i, tag in enumerate(tags)]) indices = [i for tag, i in deco] return atoms[indices] ase-3.22.1/ase/build/tube.py000066400000000000000000000111071415166253600155400ustar00rootroot00000000000000from math import sqrt, gcd import numpy as np from ase.atoms import Atoms def nanotube(n, m, length=1, bond=1.42, symbol='C', verbose=False, vacuum=None): """Create an atomic structure. Creates a single-walled nanotube whose structure is specified using the standardized (n, m) notation. Parameters ---------- n : int n in the (n, m) notation. m : int m in the (n, m) notation. length : int, optional Length (axial repetitions) of the nanotube. bond : float, optional Bond length between neighboring atoms. symbol : str, optional Chemical element to construct the nanotube from. verbose : bool, optional If True, will display key geometric parameters. Returns ------- ase.atoms.Atoms An ASE Atoms object corresponding to the specified molecule. Examples -------- >>> from ase.build import nanotube >>> atoms1 = nanotube(6, 0, length=4) >>> atoms2 = nanotube(3, 3, length=6, bond=1.4, symbol='Si') """ if n < m: m, n = n, m sign = -1 else: sign = 1 nk = 6000 sq3 = sqrt(3.0) a = sq3 * bond l2 = n * n + m * m + n * m l = sqrt(l2) nd = gcd(n, m) if (n - m) % (3 * nd) == 0: ndr = 3 * nd else: ndr = nd nr = (2 * m + n) // ndr ns = -(2 * n + m) // ndr nn = 2 * l2 // ndr ichk = 0 if nr == 0: n60 = 1 else: n60 = nr * 4 absn = abs(n60) nnp = [] nnq = [] for i in range(-absn, absn + 1): for j in range(-absn, absn + 1): j2 = nr * j - ns * i if j2 == 1: j1 = m * i - n * j if j1 > 0 and j1 < nn: ichk += 1 nnp.append(i) nnq.append(j) if ichk == 0: raise RuntimeError('not found p, q strange!!') if ichk >= 2: raise RuntimeError('more than 1 pair p, q strange!!') nnnp = nnp[0] nnnq = nnq[0] if verbose: print('the symmetry vector is', nnnp, nnnq) lp = nnnp * nnnp + nnnq * nnnq + nnnp * nnnq r = a * sqrt(lp) c = a * l t = sq3 * c / ndr if 2 * nn > nk: raise RuntimeError('parameter nk is too small!') rs = c / (2.0 * np.pi) if verbose: print('radius=', rs, t) q1 = np.arctan((sq3 * m) / (2 * n + m)) q2 = np.arctan((sq3 * nnnq) / (2 * nnnp + nnnq)) q3 = q1 - q2 q4 = 2.0 * np.pi / nn q5 = bond * np.cos((np.pi / 6.0) - q1) / c * 2.0 * np.pi h1 = abs(t) / abs(np.sin(q3)) h2 = bond * np.sin((np.pi / 6.0) - q1) ii = 0 x, y, z = [], [], [] for i in range(nn): x1, y1, z1 = 0, 0, 0 k = np.floor(i * abs(r) / h1) x1 = rs * np.cos(i * q4) y1 = rs * np.sin(i * q4) z1 = (i * abs(r) - k * h1) * np.sin(q3) kk2 = abs(np.floor((z1 + 0.0001) / t)) if z1 >= t - 0.0001: z1 -= t * kk2 elif z1 < 0: z1 += t * kk2 ii += 1 x.append(x1) y.append(y1) z.append(z1) z3 = (i * abs(r) - k * h1) * np.sin(q3) - h2 ii += 1 if z3 >= 0 and z3 < t: x2 = rs * np.cos(i * q4 + q5) y2 = rs * np.sin(i * q4 + q5) z2 = (i * abs(r) - k * h1) * np.sin(q3) - h2 x.append(x2) y.append(y2) z.append(z2) else: x2 = rs * np.cos(i * q4 + q5) y2 = rs * np.sin(i * q4 + q5) z2 = (i * abs(r) - (k + 1) * h1) * np.sin(q3) - h2 kk = abs(np.floor(z2 / t)) if z2 >= t - 0.0001: z2 -= t * kk elif z2 < 0: z2 += t * kk x.append(x2) y.append(y2) z.append(z2) ntotal = 2 * nn X = [] for i in range(ntotal): X.append([x[i], y[i], sign * z[i]]) if length > 1: xx = X[:] for mnp in range(2, length + 1): for i in range(len(xx)): X.append(xx[i][:2] + [xx[i][2] + (mnp - 1) * t]) transvec = t numatom = ntotal * length diameter = rs * 2 chiralangle = np.arctan((sq3 * n) / (2 * m + n)) / np.pi * 180 cell = [[0, 0, 0], [0, 0, 0], [0, 0, length * t]] atoms = Atoms(symbol + str(numatom), positions=X, cell=cell, pbc=[False, False, True]) if vacuum: atoms.center(vacuum, axis=(0, 1)) if verbose: print('translation vector =', transvec) print('diameter = ', diameter) print('chiral angle = ', chiralangle) return atoms ase-3.22.1/ase/calculators/000077500000000000000000000000001415166253600154445ustar00rootroot00000000000000ase-3.22.1/ase/calculators/__init__.py000066400000000000000000000001001415166253600175440ustar00rootroot00000000000000"""Interfaces to different ASE compatible force-calculators.""" ase-3.22.1/ase/calculators/abc.py000066400000000000000000000063231415166253600165470ustar00rootroot00000000000000""" This module defines abstract helper classes with the objective of reducing boilerplace method definitions (i.e. duplication) in calculators. """ from abc import ABC, abstractmethod from typing import Mapping, Any class GetPropertiesMixin(ABC): """Mixin class which provides get_forces(), get_stress() and so on. Inheriting class must implement get_property().""" @abstractmethod def get_property(self, name, atoms=None, allow_calculation=True): """Get the named property.""" def get_potential_energies(self, atoms=None): return self.get_property('energies', atoms) def get_forces(self, atoms=None): return self.get_property('forces', atoms) def get_stress(self, atoms=None): return self.get_property('stress', atoms) def get_stresses(self, atoms=None): """the calculator should return intensive stresses, i.e., such that stresses.sum(axis=0) == stress """ return self.get_property('stresses', atoms) def get_dipole_moment(self, atoms=None): return self.get_property('dipole', atoms) def get_charges(self, atoms=None): return self.get_property('charges', atoms) def get_magnetic_moment(self, atoms=None): return self.get_property('magmom', atoms) def get_magnetic_moments(self, atoms=None): """Calculate magnetic moments projected onto atoms.""" return self.get_property('magmoms', atoms) class GetOutputsMixin(ABC): """Mixin class for providing get_fermi_level() and others. Effectively this class expresses data in calc.results as methods such as get_fermi_level(). Inheriting class must implement _outputmixin_get_results(), typically returning self.results, which must be a mapping using the naming defined in ase.outputs.Properties. """ @abstractmethod def _outputmixin_get_results(self) -> Mapping[str, Any]: """Return Mapping of names to result value. This may be called many times and should hence not be expensive (except possibly the first time).""" def _get(self, name): # Cyclic import, should restructure. from ase.calculators.calculator import PropertyNotPresent dct = self._outputmixin_get_results() try: return dct[name] except KeyError: raise PropertyNotPresent(name) def get_fermi_level(self): return self._get('fermi_level') def get_ibz_k_points(self): return self._get('ibz_kpoints') def get_k_point_weights(self): return self._get('kpoint_weights') def get_eigenvalues(self, kpt=0, spin=0): eigs = self._get('eigenvalues') return eigs[kpt, spin] def _eigshape(self): # We don't need this if we already have a Properties object. return self._get('eigenvalues').shape def get_occupation_numbers(self, kpt=0, spin=0): occs = self._get('occupations') return occs[kpt, spin] def get_number_of_bands(self): return self._eigshape()[2] def get_number_of_spins(self): nspins = self._eigshape()[0] assert nspins in [1, 2] return nspins def get_spin_polarized(self): return self.get_number_of_spins() == 2 ase-3.22.1/ase/calculators/abinit.py000066400000000000000000000111361415166253600172660ustar00rootroot00000000000000"""This module defines an ASE interface to ABINIT. http://www.abinit.org/ """ import re import ase.io.abinit as io from ase.calculators.calculator import FileIOCalculator from subprocess import check_output def get_abinit_version(command): txt = check_output([command, '--version']).decode('ascii') # This allows trailing stuff like betas, rc and so m = re.match(r'\s*(\d\.\d\.\d)', txt) if m is None: raise RuntimeError('Cannot recognize abinit version. ' 'Start of output: {}' .format(txt[:40])) return m.group(1) class Abinit(FileIOCalculator): """Class for doing ABINIT calculations. The default parameters are very close to those that the ABINIT Fortran code would use. These are the exceptions:: calc = Abinit(label='abinit', xc='LDA', ecut=400, toldfe=1e-5) """ implemented_properties = ['energy', 'forces', 'stress', 'magmom'] ignored_changes = {'pbc'} # In abinit, pbc is always effectively True. command = 'abinit < PREFIX.files > PREFIX.log' discard_results_on_any_change = True default_parameters = dict( xc='LDA', smearing=None, kpts=None, raw=None, pps='fhi') def __init__(self, restart=None, ignore_bad_restart_file=FileIOCalculator._deprecated, label='abinit', atoms=None, pp_paths=None, v8_legacy_format=None, **kwargs): """Construct ABINIT-calculator object. Parameters ========== label: str Prefix to use for filenames (label.in, label.txt, ...). Default is 'abinit'. Examples ======== Use default values: >>> h = Atoms('H', calculator=Abinit(ecut=200, toldfe=0.001)) >>> h.center(vacuum=3.0) >>> e = h.get_potential_energy() """ self.v8_legacy_format = v8_legacy_format self.pp_paths = pp_paths FileIOCalculator.__init__(self, restart, ignore_bad_restart_file, label, atoms, **kwargs) def write_input(self, atoms, properties, system_changes): """Write input parameters to files-file.""" io.write_all_inputs( atoms, properties, parameters=self.parameters, pp_paths=self.pp_paths, label=self.label, v8_legacy_format=self.v8_legacy_format) def read(self, label): """Read results from ABINIT's text-output file.""" # XXX I think we should redo the concept of 'restarting'. # It makes sense to load a previous calculation as # # * static, calculator-independent results # * an actual calculator capable of calculating # # Either of which is simpler than our current mechanism which # implies both at the same time. Moreover, we don't need # something like calc.read(label). # # What we need for these two purposes is # # * calc = MyCalculator.read(basefile) # (or maybe it should return Atoms with calc attached) # * results = read_results(basefile, format='abinit') # # where basefile determines the file tree. FileIOCalculator.read(self, label) self.atoms, self.parameters = io.read_ase_and_abinit_inputs(self.label) self.results = io.read_results(self.label, self._output_filename()) def _output_filename(self): if self.v8_legacy_format: ext = '.txt' else: ext = '.abo' return self.label + ext def read_results(self): self.results = io.read_results(self.label, self._output_filename()) def get_number_of_iterations(self): return self.results['niter'] def get_electronic_temperature(self): return self.results['width'] def get_number_of_electrons(self): return self.results['nelect'] def get_number_of_bands(self): return self.results['nbands'] def get_k_point_weights(self): return self.results['kpoint_weights'] def get_bz_k_points(self): raise NotImplementedError def get_ibz_k_points(self): return self.results['ibz_kpoints'] def get_spin_polarized(self): return self.results['eigenvalues'].shape[0] == 2 def get_number_of_spins(self): return len(self.results['eigenvalues']) def get_fermi_level(self): return self.results['fermilevel'] def get_eigenvalues(self, kpt=0, spin=0): return self.results['eigenvalues'][spin, kpt] def get_occupations(self, kpt=0, spin=0): raise NotImplementedError ase-3.22.1/ase/calculators/acemolecule.py000066400000000000000000000263451415166253600203060ustar00rootroot00000000000000# type: ignore import os from copy import deepcopy from ase.io import read from ase.calculators.calculator import ReadError from ase.calculators.calculator import FileIOCalculator class ACE(FileIOCalculator): ''' ACE-Molecule logfile reader It has default parameters of each input section And parameters' type = list of dictionaries ''' name = 'ace' implemented_properties = ['energy', 'forces', 'excitation-energy'] basic_list = [{ 'Type': 'Scaling', 'Scaling': '0.35', 'Basis': 'Sinc', 'Grid': 'Sphere', 'KineticMatrix': 'Finite_Difference', 'DerivativesOrder': '7', 'GeometryFilename': None, 'NumElectrons': None} ] scf_list = [{ 'ExchangeCorrelation': {'XFunctional': 'GGA_X_PBE', 'CFunctional': 'GGA_C_PBE'}, 'NumberOfEigenvalues': None, }] force_list = [{'ForceDerivative': 'Potential'}] tddft_list = [{ 'SortOrbital': 'Order', 'MaximumOrder': '10', 'ExchangeCorrelation': {'XFunctional': 'GGA_X_PBE', 'CFunctional': 'GGA_C_PBE'}, }] order_list = ['BasicInformation', 'Guess', 'Scf'] guess_list = [{}] default_parameters = {'BasicInformation': basic_list, 'Guess': guess_list, 'Scf': scf_list, 'Force': force_list, 'TDDFT': tddft_list, 'order': order_list} def __init__( self, restart=None, ignore_bad_restart_file=FileIOCalculator._deprecated, label='ace', atoms=None, command=None, basisfile=None, **kwargs): FileIOCalculator.__init__(self, restart, ignore_bad_restart_file, label, atoms, command=command, **kwargs) def set(self, **kwargs): '''Update parameters self.parameter member variable. 1. Add default values for repeated parameter sections with self.default_parameters using order. 2. Also add empty dictionary as an indicator for section existence if no relevant default_parameters exist. 3. Update parameters from arguments. Returns ======= Updated parameter ''' new_parameters = deepcopy(self.parameters) changed_parameters = FileIOCalculator.set(self, **kwargs) # Add default values for repeated parameter sections with self.default_parameters using order. # Also add empty dictionary as an indicator for section existence if no relevant default_parameters exist. if 'order' in kwargs: new_parameters['order'] = kwargs['order'] section_sets = set(kwargs['order']) for section_name in section_sets: repeat = kwargs['order'].count(section_name) if section_name in self.default_parameters.keys(): for i in range(repeat-1): new_parameters[section_name] += deepcopy(self.default_parameters[section_name]) else: new_parameters[section_name] = [] for i in range(repeat): new_parameters[section_name].append({}) # Update parameters for section in new_parameters['order']: if section in kwargs.keys(): if isinstance(kwargs[section], dict): kwargs[section] = [kwargs[section]] i = 0 for section_param in kwargs[section]: new_parameters[section][i] = update_parameter(new_parameters[section][i], section_param) i += 1 self.parameters = new_parameters return changed_parameters def read(self, label): FileIOCalculator.read(self, label) filename = self.label + ".log" with open(filename, 'r') as fd: lines = fd.readlines() if 'WARNING' in lines: raise ReadError("Not convergy energy in log file {}.".format(filename)) if '! total energy' not in lines: raise ReadError("Wrong ACE-Molecule log file {}.".format(filename)) if not os.path.isfile(filename): raise ReadError("Wrong ACE-Molecule input file {}.".format(filename)) self.read_results() def write_input(self, atoms, properties=None, system_changes=None): '''Initializes input parameters and xyz files. If force calculation is requested, add Force section to parameters if not exists. Parameters ========== atoms: ASE atoms object. properties: List of properties to be calculated. Should be element of self.implemented_properties. system_chages: Ignored. ''' FileIOCalculator.write_input(self, atoms, properties, system_changes) with open(self.label + '.inp', 'w') as inputfile: xyz_name = "{}.xyz".format(self.label) atoms.write(xyz_name) run_parameters = self.prepare_input(xyz_name, properties) self.write_acemolecule_input(inputfile, run_parameters) def prepare_input(self, geometry_filename, properties): '''Initialize parameters dictionary based on geometry filename and calculated properties. Parameters ========== geometry_filename: Geometry (XYZ format) file path. properties: Properties to be calculated. Returns ======= Updated version of self.parameters; geometry file and optionally Force section are updated. ''' copied_parameters = deepcopy(self.parameters) if properties is not None and "forces" in properties and 'Force' not in copied_parameters['order']: copied_parameters['order'].append('Force') copied_parameters["BasicInformation"][0]["GeometryFilename"] = "{}.xyz".format(self.label) copied_parameters["BasicInformation"][0]["GeometryFormat"] = "xyz" return copied_parameters def read_results(self): '''Read calculation results, speficied by 'quantities' variable, from the log file. quantities ======= energy : obtaing single point energy(eV) from log file forces : obtaing force of each atom form log file excitation-energy : it able to calculate TDDFT. Return value is None. Result is not used. atoms : ASE atoms object ''' filename = self.label + '.log' #quantities = ['energy', 'forces', 'atoms', 'excitation-energy'] #for section_name in quantities: #self.results = read_acemolecule_out(filename) self.results = read(filename, format='acemolecule-out') def write_acemolecule_section(self, fpt, section, depth=0): '''Write parameters in each section of input Parameters ========== fpt: ACE-Moleucle input file object. Should be write mode. section: Dictionary of a parameter section. depth: Nested input depth. ''' for section, section_param in section.items(): if isinstance(section_param, str) or isinstance(section_param, int) or isinstance(section_param, float): fpt.write(' ' * depth + str(section) + " " + str(section_param) + "\n") else: if isinstance(section_param, dict): fpt.write(' ' * depth + "%% " + str(section) + "\n") self.write_acemolecule_section(fpt, section_param, depth + 1) fpt.write(' ' * depth + "%% End\n") if isinstance(section_param, list): for val in section_param: fpt.write(' ' * depth + str(section) + " " + str(val) + "\n") def write_acemolecule_input(self, fpt, param, depth=0): '''Write ACE-Molecule input ACE-Molecule input examples (not minimal) %% BasicInformation Type Scaling Scaling 0.4 Basis Sinc Cell 10.0 Grid Sphere GeometryFormat xyz SpinMultiplicity 3.0 Polarize 1 Centered 0 %% Pseudopotential Pseudopotential 1 UsingDoubleGrid 0 FilterType Sinc Format upf PSFilePath /PATH/TO/UPF PSFileSuffix .pbe-theos.UPF %% End GeometryFilename xyz/C.xyz %% End %% Guess InitialGuess 3 InitialFilenames 001.cube InitialFilenames 002.cube %% End %% Scf IterateMaxCycle 150 ConvergenceType Energy ConvergenceTolerance 0.00001 EnergyDecomposition 1 ComputeInitialEnergy 1 %% Diagonalize Tolerance 0.000001 %% End %% ExchangeCorrelation XFunctional GGA_X_PBE CFunctional GGA_C_PBE %% End %% Mixing MixingMethod 1 MixingType Density MixingParameter 0.5 PulayMixingParameter 0.1 %% End %% End Parameters ========== fpt: File object, should be write mode. param: Dictionary of parameters. Also should contain special 'order' section_name for parameter section ordering. depth: Nested input depth. Notes ===== - Order of parameter section (denoted using %% -- %% BasicInformation, %% Guess, etc.) is important, because it determines calculation order. For example, if Guess section comes after Scf section, calculation will not run because Scf will tries to run without initial Hamiltonian. - Order of each parameter section-section_name pair is not important unless their keys are the same. - Indentation unimportant and capital letters are important. ''' prefix = " " * depth for i in range(len(param['order'])): fpt.write(prefix + "%% " + param['order'][i] + "\n") section_list = param[param['order'][i]] if len(section_list) > 0: section = section_list.pop(0) self.write_acemolecule_section(fpt, section, 1) fpt.write("%% End\n") return def update_parameter(oldpar, newpar): '''Update each section of parameter (oldpar) using newpar keys and values. If section of newpar exist in oldpar, - Replace the section_name with newpar's section_name if oldvar section_name type is not dict. - Append the section_name with newpar's section_name if oldvar section_name type is list. - If oldpar section_name type is dict, it is subsection. So call update_parameter again. otherwise, add the parameter section and section_name from newpar. Parameters ========== oldpar: dictionary of original parameters to be updated. newpar: dictionary containing parameter section and values to update. Return ====== Updated parameter dictionary. ''' for section, section_param in newpar.items(): if section in oldpar: if isinstance(section_param, dict): oldpar[section] = update_parameter(oldpar[section], section_param) else: oldpar[section] = section_param else: oldpar[section] = section_param return oldpar ase-3.22.1/ase/calculators/acn.py000066400000000000000000000173521415166253600165670ustar00rootroot00000000000000import numpy as np import ase.units as units from ase.calculators.calculator import Calculator, all_changes from ase.data import atomic_masses from ase.geometry import find_mic # Electrostatic constant k_c = units.Hartree * units.Bohr # Force field parameters q_me = 0.206 q_c = 0.247 q_n = -0.453 sigma_me = 3.775 sigma_c = 3.650 sigma_n = 3.200 epsilon_me = 0.7824 * units.kJ / units.mol epsilon_c = 0.544 * units.kJ / units.mol epsilon_n = 0.6276 * units.kJ / units.mol r_mec = 1.458 r_cn = 1.157 r_men = r_mec + r_cn m_me = atomic_masses[6] + 3 * atomic_masses[1] m_c = atomic_masses[6] m_n = atomic_masses[7] def combine_lj_lorenz_berthelot(sigma, epsilon): """Combine LJ parameters according to the Lorenz-Berthelot rule""" sigma_c = np.zeros((len(sigma), len(sigma))) epsilon_c = np.zeros_like(sigma_c) for ii in range(len(sigma)): sigma_c[:, ii] = (sigma[ii] + sigma) / 2 epsilon_c[:, ii] = (epsilon[ii] * epsilon) ** 0.5 return sigma_c, epsilon_c class ACN(Calculator): implemented_properties = ['energy', 'forces'] nolabel = True def __init__(self, rc=5.0, width=1.0): """Three-site potential for acetonitrile. Atom sequence must be: MeCNMeCN ... MeCN or NCMeNCMe ... NCMe When performing molecular dynamics (MD), forces are redistributed and only Me and N sites propagated based on a scheme for MD of rigid triatomic molecules from Ciccotti et al. Molecular Physics 1982 (https://doi.org/10.1080/00268978200100942). Apply constraints using the FixLinearTriatomic to fix the geometry of the acetonitrile molecules. rc: float Cutoff radius for Coulomb interactions. width: float Width for cutoff function for Coulomb interactions. References: https://doi.org/10.1080/08927020108024509 """ self.rc = rc self.width = width self.forces = None Calculator.__init__(self) self.sites_per_mol = 3 self.pcpot = None def calculate(self, atoms=None, properties=['energy'], system_changes=all_changes): Calculator.calculate(self, atoms, properties, system_changes) Z = atoms.numbers masses = atoms.get_masses() if Z[0] == 7: n = 0 me = 2 sigma = np.array([sigma_n, sigma_c, sigma_me]) epsilon = np.array([epsilon_n, epsilon_c, epsilon_me]) else: n = 2 me = 0 sigma = np.array([sigma_me, sigma_c, sigma_n]) epsilon = np.array([epsilon_me, epsilon_c, epsilon_n]) assert (Z[n::3] == 7).all(), 'incorrect atoms sequence' assert (Z[1::3] == 6).all(), 'incorrect atoms sequence' assert (masses[n::3] == m_n).all(), 'incorrect masses' assert (masses[1::3] == m_c).all(), 'incorrect masses' assert (masses[me::3] == m_me).all(), 'incorrect masses' R = self.atoms.positions.reshape((-1, 3, 3)) pbc = self.atoms.pbc cd = self.atoms.cell.diagonal() nm = len(R) assert (self.atoms.cell == np.diag(cd)).all(), 'not orthorhombic' assert ((cd >= 2 * self.rc) | ~pbc).all(), 'cutoff too large' charges = self.get_virtual_charges(atoms[:3]) # LJ parameters sigma_co, epsilon_co = combine_lj_lorenz_berthelot(sigma, epsilon) energy = 0.0 self.forces = np.zeros((3 * nm, 3)) for m in range(nm - 1): Dmm = R[m + 1:, 1] - R[m, 1] # MIC PBCs Dmm_min, Dmm_min_len = find_mic(Dmm, atoms.cell, pbc) shift = Dmm_min - Dmm # Smooth cutoff cut, dcut = self.cutoff(Dmm_min_len) for j in range(3): D = R[m + 1:] - R[m, j] + shift[:, np.newaxis] D_len2 = (D**2).sum(axis=2) D_len = D_len2**0.5 # Coulomb interactions e = charges[j] * charges / D_len * k_c energy += np.dot(cut, e).sum() F = (e / D_len2 * cut[:, np.newaxis])[:, :, np.newaxis] * D Fmm = -(e.sum(1) * dcut / Dmm_min_len)[:, np.newaxis] * Dmm_min self.forces[(m + 1) * 3:] += F.reshape((-1, 3)) self.forces[m * 3 + j] -= F.sum(axis=0).sum(axis=0) self.forces[(m + 1) * 3 + 1::3] += Fmm self.forces[m * 3 + 1] -= Fmm.sum(0) # LJ interactions c6 = (sigma_co[:, j]**2 / D_len2)**3 c12 = c6**2 e = 4 * epsilon_co[:, j] * (c12 - c6) energy += np.dot(cut, e).sum() F = (24 * epsilon_co[:, j] * (2 * c12 - c6) / D_len2 * cut[:, np.newaxis])[:, :, np.newaxis] * D Fmm = -(e.sum(1) * dcut / Dmm_min_len)[:, np.newaxis] * Dmm_min self.forces[(m + 1) * 3:] += F.reshape((-1, 3)) self.forces[m * 3 + j] -= F.sum(axis=0).sum(axis=0) self.forces[(m + 1) * 3 + 1::3] += Fmm self.forces[m * 3 + 1] -= Fmm.sum(0) if self.pcpot: e, f = self.pcpot.calculate(np.tile(charges, nm), self.atoms.positions) energy += e self.forces += f self.results['energy'] = energy self.results['forces'] = self.forces def redistribute_forces(self, forces): return forces def get_molcoms(self, nm): molcoms = np.zeros((nm, 3)) for m in range(nm): molcoms[m] = self.atoms[m * 3:(m + 1) * 3].get_center_of_mass() return molcoms def cutoff(self, d): x1 = d > self.rc - self.width x2 = d < self.rc x12 = np.logical_and(x1, x2) y = (d[x12] - self.rc + self.width) / self.width cut = np.zeros(len(d)) # cutoff function cut[x2] = 1.0 cut[x12] -= y**2 * (3.0 - 2.0 * y) dtdd = np.zeros(len(d)) dtdd[x12] -= 6.0 / self.width * y * (1.0 - y) return cut, dtdd def embed(self, charges): """Embed atoms in point-charges.""" self.pcpot = PointChargePotential(charges) return self.pcpot def check_state(self, atoms, tol=1e-15): system_changes = Calculator.check_state(self, atoms, tol) if self.pcpot and self.pcpot.mmpositions is not None: system_changes.append('positions') return system_changes def add_virtual_sites(self, positions): return positions # no virtual sites def get_virtual_charges(self, atoms): charges = np.empty(len(atoms)) Z = atoms.numbers if Z[0] == 7: n = 0 me = 2 else: n = 2 me = 0 charges[me::3] = q_me charges[1::3] = q_c charges[n::3] = q_n return charges class PointChargePotential: def __init__(self, mmcharges): """Point-charge potential for ACN. Only used for testing QMMM. """ self.mmcharges = mmcharges self.mmpositions = None self.mmforces = None def set_positions(self, mmpositions): self.mmpositions = mmpositions def calculate(self, qmcharges, qmpositions): energy = 0.0 self.mmforces = np.zeros_like(self.mmpositions) qmforces = np.zeros_like(qmpositions) for C, R, F in zip(self.mmcharges, self.mmpositions, self.mmforces): d = qmpositions - R r2 = (d**2).sum(1) e = units.Hartree * units.Bohr * C * r2**-0.5 * qmcharges energy += e.sum() f = (e / r2)[:, np.newaxis] * d qmforces += f F -= f.sum(0) self.mmpositions = None return energy, qmforces def get_forces(self, calc): return self.mmforces ase-3.22.1/ase/calculators/aims.py000066400000000000000000001101771415166253600167560ustar00rootroot00000000000000"""This module defines an ASE interface to FHI-aims. Felix Hanke hanke@liverpool.ac.uk Jonas Bjork j.bjork@liverpool.ac.uk Simon P. Rittmeyer simon.rittmeyer@tum.de """ import os import warnings import time from typing import Optional import re import numpy as np from ase.units import Hartree from ase.io.aims import write_aims, read_aims from ase.data import atomic_numbers from ase.calculators.calculator import FileIOCalculator, Parameters, kpts2mp, \ ReadError, PropertyNotImplementedError def get_aims_version(string): match = re.search(r'\s*FHI-aims version\s*:\s*(\S+)', string, re.M) return match.group(1) float_keys = [ 'charge', 'charge_mix_param', 'default_initial_moment', 'fixed_spin_moment', 'hartree_convergence_parameter', 'harmonic_length_scale', 'ini_linear_mix_param', 'ini_spin_mix_parma', 'initial_moment', 'MD_MB_init', 'MD_time_step', 'prec_mix_param', 'set_vacuum_level', 'spin_mix_param', ] exp_keys = [ 'basis_threshold', 'occupation_thr', 'sc_accuracy_eev', 'sc_accuracy_etot', 'sc_accuracy_forces', 'sc_accuracy_rho', 'sc_accuracy_stress', ] string_keys = [ 'communication_type', 'density_update_method', 'KS_method', 'mixer', 'output_level', 'packed_matrix_format', 'relax_unit_cell', 'restart', 'restart_read_only', 'restart_write_only', 'spin', 'total_energy_method', 'qpe_calc', 'xc', 'species_dir', 'run_command', 'plus_u', ] int_keys = [ 'empty_states', 'ini_linear_mixing', 'max_relaxation_steps', 'max_zeroin', 'multiplicity', 'n_max_pulay', 'sc_iter_limit', 'walltime', ] bool_keys = [ 'collect_eigenvectors', 'compute_forces', 'compute_kinetic', 'compute_numerical_stress', 'compute_analytical_stress', 'compute_heat_flux', 'distributed_spline_storage', 'evaluate_work_function', 'final_forces_cleaned', 'hessian_to_restart_geometry', 'load_balancing', 'MD_clean_rotations', 'MD_restart', 'override_illconditioning', 'override_relativity', 'restart_relaxations', 'squeeze_memory', 'symmetry_reduced_k_grid', 'use_density_matrix', 'use_dipole_correction', 'use_local_index', 'use_logsbt', 'vdw_correction_hirshfeld', ] list_keys = [ 'init_hess', 'k_grid', 'k_offset', 'MD_run', 'MD_schedule', 'MD_segment', 'mixer_threshold', 'occupation_type', 'output', 'cube', 'preconditioner', 'relativistic', 'relax_geometry', ] class Aims(FileIOCalculator): # was "command" before the refactoring to dynamical commands __command_default = 'aims.version.serial.x > aims.out' __outfilename_default = 'aims.out' implemented_properties = ['energy', 'forces', 'stress', 'stresses', 'dipole', 'magmom'] def __init__(self, restart=None, ignore_bad_restart_file=FileIOCalculator._deprecated, label=os.curdir, atoms=None, cubes=None, radmul=None, tier=None, aims_command=None, outfilename=None, **kwargs): """Construct the FHI-aims calculator. The keyword arguments (kwargs) can be one of the ASE standard keywords: 'xc', 'kpts' and 'smearing' or any of FHI-aims' native keywords. .. note:: The behavior of command/run_command has been refactored ase X.X.X It is now possible to independently specify the command to call FHI-aims and the outputfile into which stdout is directed. In general, we replaced = + " > " + >> calc = Aims(run_command = "mpiexec -np 4 aims.x > aims.out") can now be achieved with the two arguments >>> calc = Aims(aims_command = "mpiexec -np 4 aims.x" >>> outfilename = "aims.out") Backward compatibility, however, is provided. Also, the command actually used to run FHI-aims is dynamically updated (i.e., the "command" member variable). That is, e.g., >>> calc = Aims() >>> print(calc.command) aims.version.serial.x > aims.out >>> calc.outfilename = "systemX.out" >>> print(calc.command) aims.version.serial.x > systemX.out >>> calc.aims_command = "mpiexec -np 4 aims.version.scalapack.mpi.x" >>> print(calc.command) mpiexec -np 4 aims.version.scalapack.mpi > systemX.out Arguments: cubes: AimsCube object Cube file specification. radmul: int Set radial multiplier for the basis set of all atomic species. tier: int or array of ints Set basis set tier for all atomic species. aims_command : str The full command as executed to run FHI-aims *without* the redirection to stdout. For instance "mpiexec -np 4 aims.x". Note that this is not the same as "command" or "run_command". .. note:: Added in ase X.X.X outfilename : str The file (incl. path) to which stdout is redirected. Defaults to "aims.out" .. note:: Added in ase X.X.X run_command : str, optional (default=None) Same as "command", see FileIOCalculator documentation. .. note:: Deprecated in ase X.X.X outfilename : str, optional (default=aims.out) File into which the stdout of the FHI aims run is piped into. Note that this will be only of any effect, if the does not yet contain a '>' directive. plus_u : dict For DFT+U. Adds a +U term to one specific shell of the species. kwargs : dict Any of the base class arguments. """ # yes, we pop the key and run it through our legacy filters command = kwargs.pop('command', None) # Check for the "run_command" (deprecated keyword) # Consistently, the "command" argument should be used as suggested by the FileIO base class. # For legacy reasons, however, we here also accept "run_command" run_command = kwargs.pop('run_command', None) if run_command: # this warning is debatable... in my eyes it is more consistent to # use 'command' warnings.warn('Argument "run_command" is deprecated and will be replaced with "command". Alternatively, use "aims_command" and "outfile". See documentation for more details.') if command: warnings.warn('Caution! Argument "command" overwrites "run_command.') else: command = run_command # this is the fallback to the default value for empty init if np.all([i is None for i in (command, aims_command, outfilename)]): # we go for the FileIOCalculator default way (env variable) with the former default as fallback command = os.environ.get('ASE_AIMS_COMMAND', Aims.__command_default) # filter the command and set the member variables "aims_command" and "outfilename" self.__init_command(command=command, aims_command=aims_command, outfilename=outfilename) FileIOCalculator.__init__(self, restart, ignore_bad_restart_file, label, atoms, # well, this is not nice, but cannot work around it... command=self.command, **kwargs) self.cubes = cubes self.radmul = radmul self.tier = tier # handling the filtering for dynamical commands with properties, @property # type: ignore def command(self) -> Optional[str]: # type: ignore return self.__command @command.setter def command(self, x): self.__update_command(command=x) @property def aims_command(self): return self.__aims_command @aims_command.setter def aims_command(self, x): self.__update_command(aims_command=x) @property def outfilename(self): return self.__outfilename @outfilename.setter def outfilename(self, x): self.__update_command(outfilename=x) def __init_command(self, command=None, aims_command=None, outfilename=None): """ Create the private variables for which properties are defines and set them accordingly. """ # new class variables due to dynamical command handling self.__aims_command = None self.__outfilename = None self.__command: Optional[str] = None # filter the command and set the member variables "aims_command" and "outfilename" self.__update_command(command=command, aims_command=aims_command, outfilename=outfilename) # legacy handling of the (run_)command behavior a.k.a. a universal setter routine def __update_command(self, command=None, aims_command=None, outfilename=None): """ Abstracted generic setter routine for a dynamic behavior of "command". The command that is actually called on the command line and enters the base class, is = > . This new scheme has been introduced in order to conveniently change the outfile name from the outside while automatically updating the member variable. Obiously, changing conflicts with changing and/or , which thus raises a . This should, however, not happen if this routine is not used outside the property definitions. Parameters ---------- command : str The full command as executed to run FHI-aims. This includes any potential mpiexec call, as well as the redirection of stdout. For instance "mpiexec -np 4 aims.x > aims.out". aims_command : str The full command as executed to run FHI-aims *without* the redirection to stdout. For instance "mpiexec -np 4 aims.x" outfilename : str The file (incl. path) to which stdout is redirected. """ # disentangle the command if given if command: if aims_command: raise ValueError('Cannot specify "command" and "aims_command" simultaneously.') if outfilename: raise ValueError('Cannot specify "command" and "outfilename" simultaneously.') # check if the redirection of stdout is included command_spl = command.split('>') if len(command_spl) > 1: self.__aims_command = command_spl[0].strip() self.__outfilename = command_spl[-1].strip() else: # this should not happen if used correctly # but just to ensure legacy behavior of how "run_command" was handled self.__aims_command = command.strip() self.__outfilename = Aims.__outfilename_default else: if aims_command is not None: self.__aims_command = aims_command elif outfilename is None: # nothing to do here, empty call with 3x None return if outfilename is not None: self.__outfilename = outfilename else: # default to 'aims.out' if not self.outfilename: self.__outfilename = Aims.__outfilename_default self.__command = '{0:s} > {1:s}'.format(self.aims_command, self.outfilename) def set_atoms(self, atoms): self.atoms = atoms def set_label(self, label, update_outfilename=False): msg = "Aims.set_label is not supported anymore, please use `directory`" raise RuntimeError(msg) @property def out(self): return os.path.join(self.label, self.outfilename) def check_state(self, atoms): system_changes = FileIOCalculator.check_state(self, atoms) # Ignore unit cell for molecules: if not atoms.pbc.any() and 'cell' in system_changes: system_changes.remove('cell') return system_changes def set(self, **kwargs): xc = kwargs.get('xc') if xc: kwargs['xc'] = {'LDA': 'pw-lda', 'PBE': 'pbe'}.get(xc, xc) changed_parameters = FileIOCalculator.set(self, **kwargs) if changed_parameters: self.reset() return changed_parameters def write_input(self, atoms, properties=None, system_changes=None, ghosts=None, geo_constrain=None, scaled=None, velocities=None): FileIOCalculator.write_input(self, atoms, properties, system_changes) if geo_constrain is None: geo_constrain = "relax_geometry" in self.parameters if scaled is None: scaled = np.all(atoms.get_pbc()) if velocities is None: velocities = atoms.has('momenta') have_lattice_vectors = atoms.pbc.any() have_k_grid = ('k_grid' in self.parameters or 'kpts' in self.parameters) if have_lattice_vectors and not have_k_grid: raise RuntimeError('Found lattice vectors but no k-grid!') if not have_lattice_vectors and have_k_grid: raise RuntimeError('Found k-grid but no lattice vectors!') write_aims( os.path.join(self.directory, 'geometry.in'), atoms, scaled, geo_constrain, velocities=velocities, ghosts=ghosts ) self.write_control(atoms, os.path.join(self.directory, 'control.in')) self.write_species(atoms, os.path.join(self.directory, 'control.in')) self.parameters.write(os.path.join(self.directory, 'parameters.ase')) def prepare_input_files(self): """ Wrapper function to prepare input filesi, e.g., to a run on a remote machine """ if self.atoms is None: raise ValueError('No atoms object attached') self.write_input(self.atoms) def write_control(self, atoms, filename, debug=False): lim = '#' + '='*79 output = open(filename, 'w') output.write(lim + '\n') for line in ['FHI-aims file: ' + filename, 'Created using the Atomic Simulation Environment (ASE)', time.asctime(), ]: output.write('# ' + line + '\n') if debug: output.write('# \n# List of parameters used to initialize the calculator:',) for p, v in self.parameters.items(): s = '# {} : {}\n'.format(p, v) output.write(s) output.write(lim + '\n') assert not ('kpts' in self.parameters and 'k_grid' in self.parameters) assert not ('smearing' in self.parameters and 'occupation_type' in self.parameters) for key, value in self.parameters.items(): if key == 'kpts': mp = kpts2mp(atoms, self.parameters.kpts) output.write('%-35s%d %d %d\n' % (('k_grid',) + tuple(mp))) dk = 0.5 - 0.5 / np.array(mp) output.write('%-35s%f %f %f\n' % (('k_offset',) + tuple(dk))) elif key == 'species_dir' or key == 'run_command': continue elif key == 'plus_u': continue elif key == 'smearing': name = self.parameters.smearing[0].lower() if name == 'fermi-dirac': name = 'fermi' width = self.parameters.smearing[1] output.write('%-35s%s %f' % ('occupation_type', name, width)) if name == 'methfessel-paxton': order = self.parameters.smearing[2] output.write(' %d' % order) output.write('\n' % order) elif key == 'output': for output_type in value: output.write('%-35s%s\n' % (key, output_type)) elif key == 'vdw_correction_hirshfeld' and value: output.write('%-35s\n' % key) elif key in bool_keys: output.write('%-35s.%s.\n' % (key, repr(bool(value)).lower())) elif isinstance(value, (tuple, list)): output.write('%-35s%s\n' % (key, ' '.join(str(x) for x in value))) elif isinstance(value, str): output.write('%-35s%s\n' % (key, value)) else: output.write('%-35s%r\n' % (key, value)) if self.cubes: self.cubes.write(output) output.write(lim + '\n\n') output.close() def read(self, label=None): if label is None: label = self.label FileIOCalculator.read(self, label) geometry = os.path.join(self.directory, 'geometry.in') control = os.path.join(self.directory, 'control.in') for filename in [geometry, control, self.out]: if not os.path.isfile(filename): raise ReadError self.atoms, symmetry_block = read_aims(geometry, True) self.parameters = Parameters.read(os.path.join(self.directory, 'parameters.ase')) if symmetry_block: self.parameters["symmetry_block"] = symmetry_block self.read_results() def read_results(self): converged = self.read_convergence() if not converged: os.system('tail -20 ' + self.out) raise RuntimeError('FHI-aims did not converge!\n' + 'The last lines of output are printed above ' + 'and should give an indication why.') self.read_energy() if ('compute_forces' in self.parameters or 'sc_accuracy_forces' in self.parameters): self.read_forces() if ('sc_accuracy_stress' in self.parameters or ('compute_numerical_stress' in self.parameters and self.parameters['compute_numerical_stress']) or ('compute_analytical_stress' in self.parameters and self.parameters['compute_analytical_stress']) or ('compute_heat_flux' in self.parameters and self.parameters['compute_heat_flux'])): self.read_stress() if ('compute_heat_flux' in self.parameters and self.parameters['compute_heat_flux']): self.read_stresses() if ('dipole' in self.parameters.get('output', []) and not self.atoms.pbc.any()): self.read_dipole() def write_species(self, atoms, filename): self.ctrlname = filename species_path = self.parameters.get('species_dir') if species_path is None: species_path = os.environ.get('AIMS_SPECIES_DIR') if species_path is None: raise RuntimeError( 'Missing species directory! Use species_dir ' + 'parameter or set $AIMS_SPECIES_DIR environment variable.') control = open(filename, 'a') symbols = atoms.get_chemical_symbols() symbols2 = [] for n, symbol in enumerate(symbols): if symbol not in symbols2: symbols2.append(symbol) if self.tier is not None: if isinstance(self.tier, int): self.tierlist = np.ones(len(symbols2), 'int') * self.tier elif isinstance(self.tier, list): assert len(self.tier) == len(symbols2) self.tierlist = self.tier for i, symbol in enumerate(symbols2): fd = os.path.join(species_path, '%02i_%s_default' % (atomic_numbers[symbol], symbol)) reached_tiers = False for line in open(fd, 'r'): if self.tier is not None: if 'First tier' in line: reached_tiers = True self.targettier = self.tierlist[i] self.foundtarget = False self.do_uncomment = True if reached_tiers: line = self.format_tiers(line) control.write(line) if self.tier is not None and not self.foundtarget: raise RuntimeError( "Basis tier %i not found for element %s" % (self.targettier, symbol)) if self.parameters.get('plus_u') is not None: if symbol in self.parameters.plus_u.keys(): control.write('plus_u %s \n' % self.parameters.plus_u[symbol]) control.close() if self.radmul is not None: self.set_radial_multiplier() def format_tiers(self, line): if 'meV' in line: assert line[0] == '#' if 'tier' in line and 'Further' not in line: tier = line.split(" tier")[0] tier = tier.split('"')[-1] current_tier = self.translate_tier(tier) if current_tier == self.targettier: self.foundtarget = True elif current_tier > self.targettier: self.do_uncomment = False else: self.do_uncomment = False return line elif self.do_uncomment and line[0] == '#': return line[1:] elif not self.do_uncomment and line[0] != '#': return '#' + line else: return line def translate_tier(self, tier): if tier.lower() == 'first': return 1 elif tier.lower() == 'second': return 2 elif tier.lower() == 'third': return 3 elif tier.lower() == 'fourth': return 4 else: return -1 def set_radial_multiplier(self): assert isinstance(self.radmul, int) newctrl = self.ctrlname + '.new' fin = open(self.ctrlname, 'r') fout = open(newctrl, 'w') newline = " radial_multiplier %i\n" % self.radmul for line in fin: if ' radial_multiplier' in line: fout.write(newline) else: fout.write(line) fin.close() fout.close() os.rename(newctrl, self.ctrlname) def get_dipole_moment(self, atoms): if ('dipole' not in self.parameters.get('output', []) or atoms.pbc.any()): raise PropertyNotImplementedError return FileIOCalculator.get_dipole_moment(self, atoms) def get_stress(self, atoms): if ('compute_numerical_stress' not in self.parameters and 'compute_analytical_stress' not in self.parameters): raise PropertyNotImplementedError return FileIOCalculator.get_stress(self, atoms) def get_forces(self, atoms): if ('compute_forces' not in self.parameters and 'sc_accuracy_forces' not in self.parameters): raise PropertyNotImplementedError return FileIOCalculator.get_forces(self, atoms) def read_dipole(self): "Method that reads the electric dipole moment from the output file." for line in open(self.out, 'r'): if line.rfind('Total dipole moment [eAng]') > -1: dipolemoment = np.array([float(f) for f in line.split()[6:9]]) self.results['dipole'] = dipolemoment def read_energy(self): for line in open(self.out, 'r'): if line.rfind('Total energy corrected') > -1: E0 = float(line.split()[5]) elif line.rfind('Total energy uncorrected') > -1: F = float(line.split()[5]) self.results['free_energy'] = F self.results['energy'] = E0 def read_forces(self): """Method that reads forces from the output file. If 'all' is switched on, the forces for all ionic steps in the output file will be returned, in other case only the forces for the last ionic configuration are returned.""" lines = open(self.out, 'r').readlines() forces = np.zeros([len(self.atoms), 3]) for n, line in enumerate(lines): if line.rfind('Total atomic forces') > -1: for iatom in range(len(self.atoms)): data = lines[n + iatom + 1].split() for iforce in range(3): forces[iatom, iforce] = float(data[2 + iforce]) self.results['forces'] = forces def read_stress(self): lines = open(self.out, 'r').readlines() stress = None for n, line in enumerate(lines): if (line.rfind('| Analytical stress tensor') > -1 or line.rfind('Numerical stress tensor') > -1): stress = [] for i in [n + 5, n + 6, n + 7]: data = lines[i].split() stress += [float(data[2]), float(data[3]), float(data[4])] # rearrange in 6-component form and return self.results['stress'] = np.array([stress[0], stress[4], stress[8], stress[5], stress[2], stress[1]]) def read_stresses(self): """ Read stress per atom """ with open(self.out) as fd: next(l for l in fd if 'Per atom stress (eV) used for heat flux calculation' in l) # scroll to boundary next(l for l in fd if '-------------' in l) stresses = [] for l in [next(fd) for _ in range(len(self.atoms))]: # Read stresses and rearrange from # (xx, yy, zz, xy, xz, yz) to (xx, yy, zz, yz, xz, xy) xx, yy, zz, xy, xz, yz = [float(d) for d in l.split()[2:8]] stresses.append([xx, yy, zz, yz, xz, xy]) self.results['stresses'] = np.array(stresses) def get_stresses(self, voigt=False): """ Return stress per atom Returns an array of the six independent components of the symmetric stress tensor per atom, in the traditional Voigt order (xx, yy, zz, yz, xz, xy) or as a 3x3 matrix. Default is 3x3 matrix. """ voigt_stresses = self.results['stresses'] if voigt: return voigt_stresses else: stresses = np.zeros((len(self.atoms), 3, 3)) for ii, stress in enumerate(voigt_stresses): xx, yy, zz, yz, xz, xy = stress stresses[ii] = np.array([(xx, xy, xz), (xy, yy, yz), (xz, yz, zz)]) return stresses def read_convergence(self): converged = False lines = open(self.out, 'r').readlines() for n, line in enumerate(lines): if line.rfind('Have a nice day') > -1: converged = True return converged def get_number_of_iterations(self): return self.read_number_of_iterations() def read_number_of_iterations(self): niter = None lines = open(self.out, 'r').readlines() for n, line in enumerate(lines): if line.rfind('| Number of self-consistency cycles') > -1: niter = int(line.split(':')[-1].strip()) return niter def get_electronic_temperature(self): return self.read_electronic_temperature() def read_electronic_temperature(self): width = None lines = open(self.out, 'r').readlines() for n, line in enumerate(lines): if line.rfind('Occupation type:') > -1: width = float(line.split('=')[-1].strip().split()[0]) return width def get_number_of_electrons(self): return self.read_number_of_electrons() def read_number_of_electrons(self): nelect = None lines = open(self.out, 'r').readlines() for n, line in enumerate(lines): if line.rfind('The structure contains') > -1: nelect = float(line.split()[-2].strip()) return nelect def get_number_of_bands(self): return self.read_number_of_bands() def read_number_of_bands(self): nband = None lines = open(self.out, 'r').readlines() for n, line in enumerate(lines): if line.rfind('Number of Kohn-Sham states') > -1: nband = int(line.split(':')[-1].strip()) return nband def get_k_point_weights(self): return self.read_kpts(mode='k_point_weights') def get_bz_k_points(self): raise NotImplementedError def get_ibz_k_points(self): return self.read_kpts(mode='ibz_k_points') def get_spin_polarized(self): return self.read_number_of_spins() def get_number_of_spins(self): return 1 + self.get_spin_polarized() def get_magnetic_moment(self, atoms=None): return self.read_magnetic_moment() def read_number_of_spins(self): spinpol = None lines = open(self.out, 'r').readlines() for n, line in enumerate(lines): if line.rfind('| Number of spin channels') > -1: spinpol = int(line.split(':')[-1].strip()) - 1 return spinpol def read_magnetic_moment(self): magmom = None if not self.get_spin_polarized(): magmom = 0.0 else: # only for spinpolarized system Magnetisation is printed for line in open(self.out, 'r').readlines(): if line.find('N_up - N_down') != -1: # last one magmom = float(line.split(':')[-1].strip()) return magmom def get_fermi_level(self): return self.read_fermi() def get_eigenvalues(self, kpt=0, spin=0): return self.read_eigenvalues(kpt, spin, 'eigenvalues') def get_occupations(self, kpt=0, spin=0): return self.read_eigenvalues(kpt, spin, 'occupations') def read_fermi(self): E_f = None lines = open(self.out, 'r').readlines() for n, line in enumerate(lines): if line.rfind('| Chemical potential (Fermi level) in eV') > -1: E_f = float(line.split(':')[-1].strip()) return E_f def read_kpts(self, mode='ibz_k_points'): """ Returns list of kpts weights or kpts coordinates. """ values = [] assert mode in ['ibz_k_points', 'k_point_weights'] lines = open(self.out, 'r').readlines() kpts = None kptsstart = None for n, line in enumerate(lines): if line.rfind('| Number of k-points') > -1: kpts = int(line.split(':')[-1].strip()) for n, line in enumerate(lines): if line.rfind('K-points in task') > -1: kptsstart = n # last occurrence of ( assert kpts is not None assert kptsstart is not None text = lines[kptsstart + 1:] values = [] for line in text[:kpts]: if mode == 'ibz_k_points': b = [float(c.strip()) for c in line.split()[4:7]] else: b = float(line.split()[-1]) values.append(b) if len(values) == 0: values = None return np.array(values) def read_eigenvalues(self, kpt=0, spin=0, mode='eigenvalues'): """ Returns list of last eigenvalues, occupations for given kpt and spin. """ values = [] assert mode in ['eigenvalues', 'occupations'] lines = open(self.out, 'r').readlines() # number of kpts kpts = None for n, line in enumerate(lines): if line.rfind('| Number of k-points') > -1: kpts = int(line.split(':')[-1].strip()) break assert kpts is not None assert kpt + 1 <= kpts # find last (eigenvalues) eigvalstart = None for n, line in enumerate(lines): # eigenvalues come after Preliminary charge convergence reached if line.rfind('Preliminary charge convergence reached') > -1: eigvalstart = n break assert eigvalstart is not None lines = lines[eigvalstart:] for n, line in enumerate(lines): if line.rfind('Writing Kohn-Sham eigenvalues') > -1: eigvalstart = n break assert eigvalstart is not None text = lines[eigvalstart + 1:] # remove first 1 line # find the requested k-point nbands = self.read_number_of_bands() sppol = self.get_spin_polarized() beg = ((nbands + 4 + int(sppol) * 1) * kpt * (sppol + 1) + 3 + sppol * 2 + kpt * sppol) if self.get_spin_polarized(): if spin == 0: beg = beg end = beg + nbands else: beg = beg + nbands + 5 end = beg + nbands else: end = beg + nbands values = [] for line in text[beg:end]: # aims prints stars for large values ... line = line.replace('**************', ' 10000') line = line.replace('***************', ' 10000') line = line.replace('****************', ' 10000') b = [float(c.strip()) for c in line.split()[1:]] values.append(b) if mode == 'eigenvalues': values = [Hartree * v[1] for v in values] else: values = [v[0] for v in values] if len(values) == 0: values = None return np.array(values) class AimsCube: "Object to ensure the output of cube files, can be attached to Aims object" def __init__(self, origin=(0, 0, 0), edges=[(0.1, 0.0, 0.0), (0.0, 0.1, 0.0), (0.0, 0.0, 0.1)], points=(50, 50, 50), plots=None): """parameters: origin, edges, points: Same as in the FHI-aims output plots: what to print, same names as in FHI-aims """ self.name = 'AimsCube' self.origin = origin self.edges = edges self.points = points self.plots = plots def ncubes(self): """returns the number of cube files to output """ if self.plots: number = len(self.plots) else: number = 0 return number def set(self, **kwargs): """ set any of the parameters ... """ # NOT IMPLEMENTED AT THE MOMENT! def move_to_base_name(self, basename): """ when output tracking is on or the base namem is not standard, this routine will rename add the base to the cube file output for easier tracking """ for plot in self.plots: found = False cube = plot.split() if (cube[0] == 'total_density' or cube[0] == 'spin_density' or cube[0] == 'delta_density'): found = True old_name = cube[0] + '.cube' new_name = basename + '.' + old_name if cube[0] == 'eigenstate' or cube[0] == 'eigenstate_density': found = True state = int(cube[1]) s_state = cube[1] for i in [10, 100, 1000, 10000]: if state < i: s_state = '0' + s_state old_name = cube[0] + '_' + s_state + '_spin_1.cube' new_name = basename + '.' + old_name if found: os.system('mv ' + old_name + ' ' + new_name) def add_plot(self, name): """ in case you forgot one ... """ self.plots += [name] def write(self, file): """ write the necessary output to the already opened control.in """ file.write('output cube ' + self.plots[0] + '\n') file.write(' cube origin ') for ival in self.origin: file.write(str(ival) + ' ') file.write('\n') for i in range(3): file.write(' cube edge ' + str(self.points[i]) + ' ') for ival in self.edges[i]: file.write(str(ival) + ' ') file.write('\n') if self.ncubes() > 1: for i in range(self.ncubes() - 1): file.write('output cube ' + self.plots[i + 1] + '\n') ase-3.22.1/ase/calculators/amber.py000066400000000000000000000342751415166253600171170ustar00rootroot00000000000000"""This module defines an ASE interface to Amber16. Usage: (Tested only with Amber16, http://ambermd.org/) Before usage, input files (infile, topologyfile, incoordfile) """ import subprocess import numpy as np from ase.calculators.calculator import Calculator, FileIOCalculator import ase.units as units from scipy.io import netcdf class Amber(FileIOCalculator): """Class for doing Amber classical MM calculations. Example: mm.in:: Minimization with Cartesian restraints &cntrl imin=1, maxcyc=200, (invoke minimization) ntpr=5, (print frequency) &end """ implemented_properties = ['energy', 'forces'] discard_results_on_any_change = True def __init__(self, restart=None, ignore_bad_restart_file=FileIOCalculator._deprecated, label='amber', atoms=None, command=None, amber_exe='sander -O ', infile='mm.in', outfile='mm.out', topologyfile='mm.top', incoordfile='mm.crd', outcoordfile='mm_dummy.crd', mdcoordfile=None, **kwargs): """Construct Amber-calculator object. Parameters ========== label: str Name used for all files. May contain a directory. atoms: Atoms object Optional Atoms object to which the calculator will be attached. When restarting, atoms will get its positions and unit-cell updated from file. label: str Prefix to use for filenames (label.in, label.txt, ...). amber_exe: str Name of the amber executable, one can add options like -O and other parameters here infile: str Input filename for amber, contains instuctions about the run outfile: str Logfilename for amber topologyfile: str Name of the amber topology file incoordfile: str Name of the file containing the input coordinates of atoms outcoordfile: str Name of the file containing the output coordinates of atoms this file is not used in case minisation/dynamics is done by ase. It is only relevant if you run MD/optimisation many steps with amber. """ self.out = 'mm.log' self.positions = None self.atoms = None self.set(**kwargs) self.amber_exe = amber_exe self.infile = infile self.outfile = outfile self.topologyfile = topologyfile self.incoordfile = incoordfile self.outcoordfile = outcoordfile self.mdcoordfile = mdcoordfile if command is not None: self.command = command else: self.command = (self.amber_exe + ' -i ' + self.infile + ' -o ' + self.outfile + ' -p ' + self.topologyfile + ' -c ' + self.incoordfile + ' -r ' + self.outcoordfile) if self.mdcoordfile is not None: self.command = self.command + ' -x ' + self.mdcoordfile FileIOCalculator.__init__(self, restart, ignore_bad_restart_file, label, atoms, **kwargs) def write_input(self, atoms=None, properties=None, system_changes=None): """Write updated coordinates to a file.""" FileIOCalculator.write_input(self, atoms, properties, system_changes) self.write_coordinates(atoms) def read_results(self): """ read energy and forces """ self.read_energy() self.read_forces() def write_coordinates(self, atoms, filename=''): """ write amber coordinates in netCDF format, only rectangular unit cells are allowed""" if filename == '': filename = self.incoordfile fout = netcdf.netcdf_file(filename, 'w') # dimension fout.Conventions = 'AMBERRESTART' fout.ConventionVersion = "1.0" fout.title = 'Ase-generated-amber-restart-file' fout.application = "AMBER" fout.program = "ASE" fout.programVersion = "1.0" fout.createDimension('cell_spatial', 3) fout.createDimension('label', 5) fout.createDimension('cell_angular', 3) fout.createDimension('time', 1) time = fout.createVariable('time', 'd', ('time',)) time.units = 'picosecond' time[0] = 0 fout.createDimension('spatial', 3) spatial = fout.createVariable('spatial', 'c', ('spatial',)) spatial[:] = np.asarray(list('xyz')) # spatial = 'xyz' natom = len(atoms) fout.createDimension('atom', natom) coordinates = fout.createVariable('coordinates', 'd', ('atom', 'spatial')) coordinates.units = 'angstrom' coordinates[:] = atoms.get_positions()[:] if atoms.get_velocities() is not None: velocities = fout.createVariable('velocities', 'd', ('atom', 'spatial')) velocities.units = 'angstrom/picosecond' velocities[:] = atoms.get_velocities()[:] # title cell_angular = fout.createVariable('cell_angular', 'c', ('cell_angular', 'label')) cell_angular[0] = np.asarray(list('alpha')) cell_angular[1] = np.asarray(list('beta ')) cell_angular[2] = np.asarray(list('gamma')) # title cell_spatial = fout.createVariable('cell_spatial', 'c', ('cell_spatial',)) cell_spatial[0], cell_spatial[1], cell_spatial[2] = 'a', 'b', 'c' # data cell_lengths = fout.createVariable('cell_lengths', 'd', ('cell_spatial',)) cell_lengths.units = 'angstrom' cell_lengths[0] = atoms.get_cell()[0, 0] cell_lengths[1] = atoms.get_cell()[1, 1] cell_lengths[2] = atoms.get_cell()[2, 2] cell_angles = fout.createVariable('cell_angles', 'd', ('cell_angular',)) box_alpha, box_beta, box_gamma = 90.0, 90.0, 90.0 cell_angles[0] = box_alpha cell_angles[1] = box_beta cell_angles[2] = box_gamma cell_angles.units = 'degree' fout.close() def read_coordinates(self, atoms, filename=''): """Import AMBER16 netCDF restart files. Reads atom positions and velocities (if available), and unit cell (if available) This may be useful if you have run amber many steps and want to read new positions and velocities """ if filename == '': filename = self.outcoordfile from scipy.io import netcdf import numpy as np import ase.units as units fin = netcdf.netcdf_file(filename, 'r') all_coordinates = fin.variables['coordinates'][:] get_last_frame = False if hasattr(all_coordinates, 'ndim'): if all_coordinates.ndim == 3: get_last_frame = True elif hasattr(all_coordinates, 'shape'): if len(all_coordinates.shape) == 3: get_last_frame = True if get_last_frame: all_coordinates = all_coordinates[-1] atoms.set_positions(all_coordinates) if 'velocities' in fin.variables: all_velocities = fin.variables['velocities'][:] / (1000 * units.fs) if get_last_frame: all_velocities = all_velocities[-1] atoms.set_velocities(all_velocities) if 'cell_lengths' in fin.variables: all_abc = fin.variables['cell_lengths'] if get_last_frame: all_abc = all_abc[-1] a, b, c = all_abc all_angles = fin.variables['cell_angles'] if get_last_frame: all_angles = all_angles[-1] alpha, beta, gamma = all_angles if (all(angle > 89.99 for angle in [alpha, beta, gamma]) and all(angle < 90.01 for angle in [alpha, beta, gamma])): atoms.set_cell( np.array([[a, 0, 0], [0, b, 0], [0, 0, c]])) atoms.set_pbc(True) else: raise NotImplementedError('only rectangular cells are' ' implemented in ASE-AMBER') else: atoms.set_pbc(False) def read_energy(self, filename='mden'): """ read total energy from amber file """ with open(filename, 'r') as fd: lines = fd.readlines() self.results['energy'] = \ float(lines[16].split()[2]) * units.kcal / units.mol def read_forces(self, filename='mdfrc'): """ read forces from amber file """ fd = netcdf.netcdf_file(filename, 'r') try: forces = fd.variables['forces'] self.results['forces'] = forces[-1, :, :] \ / units.Ang * units.kcal / units.mol finally: fd.close() def set_charges(self, selection, charges, parmed_filename=None): """ Modify amber topology charges to contain the updated QM charges, needed in QM/MM. Using amber's parmed program to change charges. """ qm_list = list(selection) with open(parmed_filename, 'w') as fout: fout.write('# update the following QM charges \n') for i, charge in zip(qm_list, charges): fout.write('change charge @' + str(i + 1) + ' ' + str(charge) + ' \n') fout.write('# Output the topology file \n') fout.write('outparm ' + self.topologyfile + ' \n') parmed_command = ('parmed -O -i ' + parmed_filename + ' -p ' + self.topologyfile + ' > ' + self.topologyfile + '.log 2>&1') subprocess.check_call(parmed_command, shell=True, cwd=self.directory) def get_virtual_charges(self, atoms): with open(self.topologyfile, 'r') as fd: topology = fd.readlines() for n, line in enumerate(topology): if '%FLAG CHARGE' in line: chargestart = n + 2 lines1 = topology[chargestart:(chargestart + (len(atoms) - 1) // 5 + 1)] mm_charges = [] for line in lines1: for el in line.split(): mm_charges.append(float(el) / 18.2223) charges = np.array(mm_charges) return charges def add_virtual_sites(self, positions): return positions # no virtual sites def redistribute_forces(self, forces): return forces def map(atoms, top): p = np.zeros((2, len(atoms)), dtype="int") elements = atoms.get_chemical_symbols() unique_elements = np.unique(atoms.get_chemical_symbols()) for i in range(len(unique_elements)): idx = 0 for j in range(len(atoms)): if elements[j] == unique_elements[i]: idx += 1 symbol = unique_elements[i] + np.str(idx) for k in range(len(atoms)): if top.atoms[k].name == symbol: p[0, k] = j p[1, j] = k break return p try: import sander have_sander = True except ImportError: have_sander = False class SANDER(Calculator): """ Interface to SANDER using Python interface Requires sander Python bindings from http://ambermd.org/ """ implemented_properties = ['energy', 'forces'] def __init__(self, atoms=None, label=None, top=None, crd=None, mm_options=None, qm_options=None, permutation=None, **kwargs): if not have_sander: raise RuntimeError("sander Python module could not be imported!") Calculator.__init__(self, label, atoms) self.permutation = permutation if qm_options is not None: sander.setup(top, crd.coordinates, crd.box, mm_options, qm_options) else: sander.setup(top, crd.coordinates, crd.box, mm_options) def calculate(self, atoms, properties, system_changes): Calculator.calculate(self, atoms, properties, system_changes) if system_changes: if 'energy' in self.results: del self.results['energy'] if 'forces' in self.results: del self.results['forces'] if 'energy' not in self.results: if self.permutation is None: crd = np.reshape(atoms.get_positions(), (1, len(atoms), 3)) else: crd = np.reshape(atoms.get_positions() [self.permutation[0, :]], (1, len(atoms), 3)) sander.set_positions(crd) e, f = sander.energy_forces() self.results['energy'] = e.tot * units.kcal / units.mol if self.permutation is None: self.results['forces'] = (np.reshape(np.array(f), (len(atoms), 3)) * units.kcal / units.mol) else: ff = np.reshape(np.array(f), (len(atoms), 3)) * \ units.kcal / units.mol self.results['forces'] = ff[self.permutation[1, :]] if 'forces' not in self.results: if self.permutation is None: crd = np.reshape(atoms.get_positions(), (1, len(atoms), 3)) else: crd = np.reshape(atoms.get_positions()[self.permutation[0, :]], (1, len(atoms), 3)) sander.set_positions(crd) e, f = sander.energy_forces() self.results['energy'] = e.tot * units.kcal / units.mol if self.permutation is None: self.results['forces'] = (np.reshape(np.array(f), (len(atoms), 3)) * units.kcal / units.mol) else: ff = np.reshape(np.array(f), (len(atoms), 3)) * \ units.kcal / units.mol self.results['forces'] = ff[self.permutation[1, :]] ase-3.22.1/ase/calculators/autodetect.py000066400000000000000000000062351415166253600201650ustar00rootroot00000000000000import os import shutil import importlib from ase.calculators.calculator import names builtins = {'eam', 'emt', 'ff', 'lj', 'morse', 'tip3p', 'tip4p'} required_envvars = {'abinit': ['ABINIT_PP_PATH'], 'elk': ['ELK_SPECIES_PATH'], 'openmx': ['OPENMX_DFT_DATA_PATH']} default_executables = {'abinit': ['abinit'], 'cp2k': ['cp2k_shell', 'cp2k_shell.psmp', 'cp2k_shell.popt', 'cp2k_shell.ssmp', 'cp2k_shell.sopt'], 'dftb': ['dftb+'], 'elk': ['elk', 'elk-lapw'], 'espresso': ['pw.x'], 'gamess_us': ['rungms'], 'gromacs': ['gmx', 'gmx_d', 'gmx_mpi', 'gmx_mpi_d'], 'lammpsrun': ['lammps', 'lmp', 'lmp_mpi', 'lmp_serial'], 'mopac': ['mopac', 'run_mopac7'], # run_mopac7: debian 'nwchem': ['nwchem'], 'octopus': ['octopus'], 'openmx': ['openmx'], 'psi4': ['psi4'], 'siesta': ['siesta'], } python_modules = {'gpaw': 'gpaw', 'asap': 'asap3', 'lammpslib': 'lammps'} def get_executable_env_var(name): return 'ASE_{}_COMMAND'.format(name.upper()) def detect(name): assert name in names d = {'name': name} if name in builtins: d['type'] = 'builtin' return d if name in python_modules: loader = importlib.find_loader(python_modules[name]) if loader is not None: d['type'] = 'python' d['module'] = python_modules[name] d['path'] = loader.get_filename() return d envvar = get_executable_env_var(name) if envvar in os.environ: d['command'] = os.environ[envvar] d['envvar'] = envvar d['type'] = 'environment' return d if name in default_executables: commands = default_executables[name] for command in commands: fullpath = shutil.which(command) if fullpath: d['command'] = command d['fullpath'] = fullpath d['type'] = 'which' return d def detect_calculators(): configs = {} for name in names: result = detect(name) if result: configs[name] = result return configs def format_configs(configs): messages = [] for name in names: config = configs.get(name) if config is None: state = 'no' else: type = config['type'] if type == 'builtin': state = 'yes, builtin: module ase.calculators.{name}' elif type == 'python': state = 'yes, python: {module} ▶ {path}' elif type == 'which': state = 'yes, shell command: {command} ▶ {fullpath}' else: state = 'yes, environment: ${envvar} ▶ {command}' state = state.format(**config) messages.append('{:<10s} {}'.format(name, state)) return messages ase-3.22.1/ase/calculators/bond_polarizability.py000066400000000000000000000110471415166253600220610ustar00rootroot00000000000000from typing import Tuple import numpy as np from ase.units import Bohr, Ha from ase.data import covalent_radii from ase.neighborlist import NeighborList class LippincottStuttman: # atomic polarizability values from: # Lippincott and Stutman J. Phys. Chem. 68 (1964) 2926-2940 # DOI: 10.1021/j100792a033 # see also: # Marinov and Zotov Phys. Rev. B 55 (1997) 2938-2944 # DOI: 10.1103/PhysRevB.55.2938 # unit: Angstrom^3 atomic_polarizability = { 'B': 1.358, 'C': 0.978, 'N': 0.743, 'O': 0.592, 'Al': 3.918, 'Si': 2.988, } # reduced electronegativity Table I reduced_eletronegativity = { 'B': 0.538, 'C': 0.846, 'N': 0.927, 'O': 1.0, 'Al': 0.533, 'Si': 0.583, } def __call__(self, el1: str, el2: str, length: float) -> Tuple[float, float]: """Bond polarizability Parameters ---------- el1: element string el2: element string length: float Returns ------- alphal: float Parallel component alphap: float Perpendicular component """ alpha1 = self.atomic_polarizability[el1] alpha2 = self.atomic_polarizability[el2] ren1 = self.reduced_eletronegativity[el1] ren2 = self.reduced_eletronegativity[el2] sigma = 1. if el1 != el2: sigma = np.exp(- (ren1 - ren2)**2 / 4) # parallel component alphal = sigma * length**4 / (4**4 * alpha1 * alpha2)**(1. / 6) # XXX consider fractional covalency ? # prependicular component alphap = ((ren1**2 * alpha1 + ren2**2 * alpha2) / (ren1**2 + ren2**2)) # XXX consider fractional covalency ? return alphal, alphap class Linearized: def __init__(self): self._data = { # L. Wirtz, M. Lazzeri, F. Mauri, A. Rubio, # Phys. Rev. B 2005, 71, 241402. # R0 al al' ap ap' 'CC': (1.53, 1.69, 7.43, 0.71, 0.37), 'BN': (1.56, 1.58, 4.22, 0.42, 0.90), } def __call__(self, el1: str, el2: str, length: float) -> Tuple[float, float]: """Bond polarizability Parameters ---------- el1: element string el2: element string length: float Returns ------- alphal: float Parallel component alphap: float Perpendicular component """ if el1 > el2: bond = el2 + el1 else: bond = el1 + el2 assert bond in self._data length0, al, ald, ap, apd = self._data[bond] return al + ald * (length - length0), ap + apd * (length - length0) class BondPolarizability: def __init__(self, model=LippincottStuttman()): self.model = model def __call__(self, *args, **kwargs): """Shorthand for calculate""" return self.calculate(*args, **kwargs) def calculate(self, atoms, radiicut=1.5): """Sum up the bond polarizability from all bonds Parameters ---------- atoms: Atoms object radiicut: float Bonds are counted up to radiicut * (sum of covalent radii of the pairs) Default: 1.5 Returns ------- polarizability tensor with unit (e^2 Angstrom^2 / eV). Multiply with Bohr * Ha to get (Angstrom^3) """ radii = np.array([covalent_radii[z] for z in atoms.numbers]) nl = NeighborList(radii * 1.5, skin=0, self_interaction=False) nl.update(atoms) pos_ac = atoms.get_positions() alpha = 0 for ia, atom in enumerate(atoms): indices, offsets = nl.get_neighbors(ia) pos_ac = atoms.get_positions() - atoms.get_positions()[ia] for ib, offset in zip(indices, offsets): weight = 1 if offset.any(): # this comes from a periodic image weight = 0.5 # count half the bond only dist_c = pos_ac[ib] + np.dot(offset, atoms.get_cell()) dist = np.linalg.norm(dist_c) al, ap = self.model(atom.symbol, atoms[ib].symbol, dist) eye3 = np.eye(3) / 3 alpha += weight * (al + 2 * ap) * eye3 alpha += weight * (al - ap) * ( np.outer(dist_c, dist_c) / dist**2 - eye3) return alpha / Bohr / Ha ase-3.22.1/ase/calculators/calculator.py000066400000000000000000001010301415166253600201420ustar00rootroot00000000000000import os import copy import subprocess from math import pi, sqrt import pathlib from typing import Union, Optional, List, Set, Dict, Any import warnings import numpy as np from ase.cell import Cell from ase.outputs import Properties, all_outputs from ase.utils import jsonable from ase.calculators.abc import GetPropertiesMixin class CalculatorError(RuntimeError): """Base class of error types related to ASE calculators.""" class CalculatorSetupError(CalculatorError): """Calculation cannot be performed with the given parameters. Reasons to raise this errors are: * The calculator is not properly configured (missing executable, environment variables, ...) * The given atoms object is not supported * Calculator parameters are unsupported Typically raised before a calculation.""" class EnvironmentError(CalculatorSetupError): """Raised if calculator is not properly set up with ASE. May be missing an executable or environment variables.""" class InputError(CalculatorSetupError): """Raised if inputs given to the calculator were incorrect. Bad input keywords or values, or missing pseudopotentials. This may be raised before or during calculation, depending on when the problem is detected.""" class CalculationFailed(CalculatorError): """Calculation failed unexpectedly. Reasons to raise this error are: * Calculation did not converge * Calculation ran out of memory * Segmentation fault or other abnormal termination * Arithmetic trouble (singular matrices, NaN, ...) Typically raised during calculation.""" class SCFError(CalculationFailed): """SCF loop did not converge.""" class ReadError(CalculatorError): """Unexpected irrecoverable error while reading calculation results.""" class PropertyNotImplementedError(NotImplementedError): """Raised if a calculator does not implement the requested property.""" class PropertyNotPresent(CalculatorError): """Requested property is missing. Maybe it was never calculated, or for some reason was not extracted with the rest of the results, without being a fatal ReadError.""" def compare_atoms(atoms1, atoms2, tol=1e-15, excluded_properties=None): """Check for system changes since last calculation. Properties in ``excluded_properties`` are not checked.""" if atoms1 is None: system_changes = all_changes[:] else: system_changes = [] properties_to_check = set(all_changes) if excluded_properties: properties_to_check -= set(excluded_properties) # Check properties that aren't in Atoms.arrays but are attributes of # Atoms objects for prop in ['cell', 'pbc']: if prop in properties_to_check: properties_to_check.remove(prop) if not equal(getattr(atoms1, prop), getattr(atoms2, prop), atol=tol): system_changes.append(prop) arrays1 = set(atoms1.arrays) arrays2 = set(atoms2.arrays) # Add any properties that are only in atoms1.arrays or only in # atoms2.arrays (and aren't excluded). Note that if, e.g. arrays1 has # `initial_charges` which is merely zeros and arrays2 does not have # this array, we'll still assume that the system has changed. However, # this should only occur rarely. system_changes += properties_to_check & (arrays1 ^ arrays2) # Finally, check all of the non-excluded properties shared by the atoms # arrays. for prop in properties_to_check & arrays1 & arrays2: if not equal(atoms1.arrays[prop], atoms2.arrays[prop], atol=tol): system_changes.append(prop) return system_changes all_properties = ['energy', 'forces', 'stress', 'stresses', 'dipole', 'charges', 'magmom', 'magmoms', 'free_energy', 'energies'] all_changes = ['positions', 'numbers', 'cell', 'pbc', 'initial_charges', 'initial_magmoms'] # Recognized names of calculators sorted alphabetically: names = ['abinit', 'ace', 'aims', 'amber', 'asap', 'castep', 'cp2k', 'crystal', 'demon', 'demonnano', 'dftb', 'dftd3', 'dmol', 'eam', 'elk', 'emt', 'espresso', 'exciting', 'ff', 'fleur', 'gamess_us', 'gaussian', 'gpaw', 'gromacs', 'gulp', 'hotbit', 'kim', 'lammpslib', 'lammpsrun', 'lj', 'mopac', 'morse', 'nwchem', 'octopus', 'onetep', 'openmx', 'orca', 'psi4', 'qchem', 'siesta', 'tip3p', 'tip4p', 'turbomole', 'vasp'] special = {'cp2k': 'CP2K', 'demonnano': 'DemonNano', 'dftd3': 'DFTD3', 'dmol': 'DMol3', 'eam': 'EAM', 'elk': 'ELK', 'emt': 'EMT', 'crystal': 'CRYSTAL', 'ff': 'ForceField', 'fleur': 'FLEUR', 'gamess_us': 'GAMESSUS', 'gulp': 'GULP', 'kim': 'KIM', 'lammpsrun': 'LAMMPS', 'lammpslib': 'LAMMPSlib', 'lj': 'LennardJones', 'mopac': 'MOPAC', 'morse': 'MorsePotential', 'nwchem': 'NWChem', 'openmx': 'OpenMX', 'orca': 'ORCA', 'qchem': 'QChem', 'tip3p': 'TIP3P', 'tip4p': 'TIP4P'} external_calculators = {} def register_calculator_class(name, cls): """ Add the class into the database. """ assert name not in external_calculators external_calculators[name] = cls names.append(name) names.sort() def get_calculator_class(name): """Return calculator class.""" if name == 'asap': from asap3 import EMT as Calculator elif name == 'gpaw': from gpaw import GPAW as Calculator elif name == 'hotbit': from hotbit import Calculator elif name == 'vasp2': from ase.calculators.vasp import Vasp2 as Calculator elif name == 'ace': from ase.calculators.acemolecule import ACE as Calculator elif name == 'Psi4': from ase.calculators.psi4 import Psi4 as Calculator elif name in external_calculators: Calculator = external_calculators[name] else: classname = special.get(name, name.title()) module = __import__('ase.calculators.' + name, {}, None, [classname]) Calculator = getattr(module, classname) return Calculator def equal(a, b, tol=None, rtol=None, atol=None): """ndarray-enabled comparison function.""" # XXX Known bugs: # * Comparing cell objects (pbc not part of array representation) # * Infinite recursion for cyclic dicts # * Can of worms is open if tol is not None: msg = 'Use `equal(a, b, rtol=..., atol=...)` instead of `tol=...`' warnings.warn(msg, DeprecationWarning) assert rtol is None and atol is None, \ 'Do not use deprecated `tol` with `atol` and/or `rtol`' rtol = tol atol = tol a_is_dict = isinstance(a, dict) b_is_dict = isinstance(b, dict) if a_is_dict or b_is_dict: # Check that both a and b are dicts if not (a_is_dict and b_is_dict): return False if a.keys() != b.keys(): return False return all(equal(a[key], b[key], rtol=rtol, atol=atol) for key in a) if np.shape(a) != np.shape(b): return False if rtol is None and atol is None: return np.array_equal(a, b) if rtol is None: rtol = 0 if atol is None: atol = 0 return np.allclose(a, b, rtol=rtol, atol=atol) def kptdensity2monkhorstpack(atoms, kptdensity=3.5, even=True): """Convert k-point density to Monkhorst-Pack grid size. atoms: Atoms object Contains unit cell and information about boundary conditions. kptdensity: float Required k-point density. Default value is 3.5 point per Ang^-1. even: bool Round up to even numbers. """ recipcell = atoms.cell.reciprocal() kpts = [] for i in range(3): if atoms.pbc[i]: k = 2 * pi * sqrt((recipcell[i]**2).sum()) * kptdensity if even: kpts.append(2 * int(np.ceil(k / 2))) else: kpts.append(int(np.ceil(k))) else: kpts.append(1) return np.array(kpts) def kpts2mp(atoms, kpts, even=False): if kpts is None: return np.array([1, 1, 1]) if isinstance(kpts, (float, int)): return kptdensity2monkhorstpack(atoms, kpts, even) else: return kpts def kpts2sizeandoffsets(size=None, density=None, gamma=None, even=None, atoms=None): """Helper function for selecting k-points. Use either size or density. size: 3 ints Number of k-points. density: float K-point density in units of k-points per Ang^-1. gamma: None or bool Should the Gamma-point be included? Yes / no / don't care: True / False / None. even: None or bool Should the number of k-points be even? Yes / no / don't care: True / False / None. atoms: Atoms object Needed for calculating k-point density. """ if size is not None and density is not None: raise ValueError('Cannot specify k-point mesh size and ' 'density simultaneously') elif density is not None and atoms is None: raise ValueError('Cannot set k-points from "density" unless ' 'Atoms are provided (need BZ dimensions).') if size is None: if density is None: size = [1, 1, 1] else: size = kptdensity2monkhorstpack(atoms, density, None) # Not using the rounding from kptdensity2monkhorstpack as it doesn't do # rounding to odd numbers if even is not None: size = np.array(size) remainder = size % 2 if even: size += remainder else: # Round up to odd numbers size += (1 - remainder) offsets = [0, 0, 0] if atoms is None: pbc = [True, True, True] else: pbc = atoms.pbc if gamma is not None: for i, s in enumerate(size): if pbc[i] and s % 2 != bool(gamma): offsets[i] = 0.5 / s return size, offsets @jsonable('kpoints') class KPoints: def __init__(self, kpts=None): if kpts is None: kpts = np.zeros((1, 3)) self.kpts = kpts def todict(self): return vars(self) def kpts2kpts(kpts, atoms=None): from ase.dft.kpoints import monkhorst_pack if kpts is None: return KPoints() if hasattr(kpts, 'kpts'): return kpts if isinstance(kpts, dict): if 'kpts' in kpts: return KPoints(kpts['kpts']) if 'path' in kpts: cell = Cell.ascell(atoms.cell) return cell.bandpath(pbc=atoms.pbc, **kpts) size, offsets = kpts2sizeandoffsets(atoms=atoms, **kpts) return KPoints(monkhorst_pack(size) + offsets) if isinstance(kpts[0], int): return KPoints(monkhorst_pack(kpts)) return KPoints(np.array(kpts)) def kpts2ndarray(kpts, atoms=None): """Convert kpts keyword to 2-d ndarray of scaled k-points.""" return kpts2kpts(kpts, atoms=atoms).kpts class EigenvalOccupationMixin: """Define 'eigenvalues' and 'occupations' properties on class. eigenvalues and occupations will be arrays of shape (spin, kpts, nbands). Classes must implement the old-fashioned get_eigenvalues and get_occupations methods.""" @property def eigenvalues(self): return self.build_eig_occ_array(self.get_eigenvalues) @property def occupations(self): return self.build_eig_occ_array(self.get_occupation_numbers) def build_eig_occ_array(self, getter): nspins = self.get_number_of_spins() nkpts = len(self.get_ibz_k_points()) nbands = self.get_number_of_bands() arr = np.zeros((nspins, nkpts, nbands)) for s in range(nspins): for k in range(nkpts): arr[s, k, :] = getter(spin=s, kpt=k) return arr class Parameters(dict): """Dictionary for parameters. Special feature: If param is a Parameters instance, then param.xc is a shorthand for param['xc']. """ def __getattr__(self, key): if key not in self: return dict.__getattribute__(self, key) return self[key] def __setattr__(self, key, value): self[key] = value @classmethod def read(cls, filename): """Read parameters from file.""" # We use ast to evaluate literals, avoiding eval() # for security reasons. import ast with open(filename) as fd: txt = fd.read().strip() assert txt.startswith('dict(') assert txt.endswith(')') txt = txt[5:-1] # The tostring() representation "dict(...)" is not actually # a literal, so we manually parse that along with the other # formatting that we did manually: dct = {} for line in txt.splitlines(): key, val = line.split('=', 1) key = key.strip() val = val.strip() if val[-1] == ',': val = val[:-1] dct[key] = ast.literal_eval(val) parameters = cls(dct) return parameters def tostring(self): keys = sorted(self) return 'dict(' + ',\n '.join( '{}={!r}'.format(key, self[key]) for key in keys) + ')\n' def write(self, filename): pathlib.Path(filename).write_text(self.tostring()) class Calculator(GetPropertiesMixin): """Base-class for all ASE calculators. A calculator must raise PropertyNotImplementedError if asked for a property that it can't calculate. So, if calculation of the stress tensor has not been implemented, get_stress(atoms) should raise PropertyNotImplementedError. This can be achieved simply by not including the string 'stress' in the list implemented_properties which is a class member. These are the names of the standard properties: 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom' and 'magmoms'. """ implemented_properties: List[str] = [] 'Properties calculator can handle (energy, forces, ...)' default_parameters: Dict[str, Any] = {} 'Default parameters' ignored_changes: Set[str] = set() 'Properties of Atoms which we ignore for the purposes of cache ' 'invalidation with check_state().' discard_results_on_any_change = False 'Whether we purge the results following any change in the set() method. ' 'Most (file I/O) calculators will probably want this.' _deprecated = object() def __init__(self, restart=None, ignore_bad_restart_file=_deprecated, label=None, atoms=None, directory='.', **kwargs): """Basic calculator implementation. restart: str Prefix for restart file. May contain a directory. Default is None: don't restart. ignore_bad_restart_file: bool Deprecated, please do not use. Passing more than one positional argument to Calculator() is deprecated and will stop working in the future. Ignore broken or missing restart file. By default, it is an error if the restart file is missing or broken. directory: str or PurePath Working directory in which to read and write files and perform calculations. label: str Name used for all files. Not supported by all calculators. May contain a directory, but please use the directory parameter for that instead. atoms: Atoms object Optional Atoms object to which the calculator will be attached. When restarting, atoms will get its positions and unit-cell updated from file. """ self.atoms = None # copy of atoms object from last calculation self.results = {} # calculated properties (energy, forces, ...) self.parameters = None # calculational parameters self._directory = None # Initialize if ignore_bad_restart_file is self._deprecated: ignore_bad_restart_file = False else: warnings.warn(FutureWarning( 'The keyword "ignore_bad_restart_file" is deprecated and ' 'will be removed in a future version of ASE. Passing more ' 'than one positional argument to Calculator is also ' 'deprecated and will stop functioning in the future. ' 'Please pass arguments by keyword (key=value) except ' 'optionally the "restart" keyword.' )) if restart is not None: try: self.read(restart) # read parameters, atoms and results except ReadError: if ignore_bad_restart_file: self.reset() else: raise self.directory = directory self.prefix = None if label is not None: if self.directory == '.' and '/' in label: # We specified directory in label, and nothing in the diretory key self.label = label elif '/' not in label: # We specified our directory in the directory keyword # or not at all self.label = '/'.join((self.directory, label)) else: raise ValueError('Directory redundantly specified though ' 'directory="{}" and label="{}". ' 'Please omit "/" in label.' .format(self.directory, label)) if self.parameters is None: # Use default parameters if they were not read from file: self.parameters = self.get_default_parameters() if atoms is not None: atoms.calc = self if self.atoms is not None: # Atoms were read from file. Update atoms: if not (equal(atoms.numbers, self.atoms.numbers) and (atoms.pbc == self.atoms.pbc).all()): raise CalculatorError('Atoms not compatible with file') atoms.positions = self.atoms.positions atoms.cell = self.atoms.cell self.set(**kwargs) if not hasattr(self, 'name'): self.name = self.__class__.__name__.lower() if not hasattr(self, 'get_spin_polarized'): self.get_spin_polarized = self._deprecated_get_spin_polarized @property def directory(self) -> str: return self._directory @directory.setter def directory(self, directory: Union[str, pathlib.PurePath]): self._directory = str(pathlib.Path(directory)) # Normalize path. @property def label(self): if self.directory == '.': return self.prefix # Generally, label ~ directory/prefix # # We use '/' rather than os.pathsep because # 1) directory/prefix does not represent any actual path # 2) We want the same string to work the same on all platforms if self.prefix is None: return self.directory + '/' return '{}/{}'.format(self.directory, self.prefix) @label.setter def label(self, label): if label is None: self.directory = '.' self.prefix = None return tokens = label.rsplit('/', 1) if len(tokens) == 2: directory, prefix = tokens else: assert len(tokens) == 1 directory = '.' prefix = tokens[0] if prefix == '': prefix = None self.directory = directory self.prefix = prefix def set_label(self, label): """Set label and convert label to directory and prefix. Examples: * label='abc': (directory='.', prefix='abc') * label='dir1/abc': (directory='dir1', prefix='abc') * label=None: (directory='.', prefix=None) """ self.label = label def get_default_parameters(self): return Parameters(copy.deepcopy(self.default_parameters)) def todict(self, skip_default=True): defaults = self.get_default_parameters() dct = {} for key, value in self.parameters.items(): if hasattr(value, 'todict'): value = value.todict() if skip_default: default = defaults.get(key, '_no_default_') if default != '_no_default_' and equal(value, default): continue dct[key] = value return dct def reset(self): """Clear all information from old calculation.""" self.atoms = None self.results = {} def read(self, label): """Read atoms, parameters and calculated properties from output file. Read result from self.label file. Raise ReadError if the file is not there. If the file is corrupted or contains an error message from the calculation, a ReadError should also be raised. In case of succes, these attributes must set: atoms: Atoms object The state of the atoms from last calculation. parameters: Parameters object The parameter dictionary. results: dict Calculated properties like energy and forces. The FileIOCalculator.read() method will typically read atoms and parameters and get the results dict by calling the read_results() method.""" self.set_label(label) def get_atoms(self): if self.atoms is None: raise ValueError('Calculator has no atoms') atoms = self.atoms.copy() atoms.calc = self return atoms @classmethod def read_atoms(cls, restart, **kwargs): return cls(restart=restart, label=restart, **kwargs).get_atoms() def set(self, **kwargs): """Set parameters like set(key1=value1, key2=value2, ...). A dictionary containing the parameters that have been changed is returned. Subclasses must implement a set() method that will look at the chaneged parameters and decide if a call to reset() is needed. If the changed parameters are harmless, like a change in verbosity, then there is no need to call reset(). The special keyword 'parameters' can be used to read parameters from a file.""" if 'parameters' in kwargs: filename = kwargs.pop('parameters') parameters = Parameters.read(filename) parameters.update(kwargs) kwargs = parameters changed_parameters = {} for key, value in kwargs.items(): oldvalue = self.parameters.get(key) if key not in self.parameters or not equal(value, oldvalue): changed_parameters[key] = value self.parameters[key] = value if self.discard_results_on_any_change and changed_parameters: self.reset() return changed_parameters def check_state(self, atoms, tol=1e-15): """Check for any system changes since last calculation.""" return compare_atoms(self.atoms, atoms, tol=tol, excluded_properties=set(self.ignored_changes)) def get_potential_energy(self, atoms=None, force_consistent=False): energy = self.get_property('energy', atoms) if force_consistent: if 'free_energy' not in self.results: name = self.__class__.__name__ # XXX but we don't know why the energy is not there. # We should raise PropertyNotPresent. Discuss raise PropertyNotImplementedError( 'Force consistent/free energy ("free_energy") ' 'not provided by {0} calculator'.format(name)) return self.results['free_energy'] else: return energy def get_property(self, name, atoms=None, allow_calculation=True): if name not in self.implemented_properties: raise PropertyNotImplementedError('{} property not implemented' .format(name)) if atoms is None: atoms = self.atoms system_changes = [] else: system_changes = self.check_state(atoms) if system_changes: self.reset() if name not in self.results: if not allow_calculation: return None self.calculate(atoms, [name], system_changes) if name not in self.results: # For some reason the calculator was not able to do what we want, # and that is OK. raise PropertyNotImplementedError('{} not present in this ' 'calculation'.format(name)) result = self.results[name] if isinstance(result, np.ndarray): result = result.copy() return result def calculation_required(self, atoms, properties): assert not isinstance(properties, str) system_changes = self.check_state(atoms) if system_changes: return True for name in properties: if name not in self.results: return True return False def calculate(self, atoms=None, properties=['energy'], system_changes=all_changes): """Do the calculation. properties: list of str List of what needs to be calculated. Can be any combination of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom' and 'magmoms'. system_changes: list of str List of what has changed since last calculation. Can be any combination of these six: 'positions', 'numbers', 'cell', 'pbc', 'initial_charges' and 'initial_magmoms'. Subclasses need to implement this, but can ignore properties and system_changes if they want. Calculated properties should be inserted into results dictionary like shown in this dummy example:: self.results = {'energy': 0.0, 'forces': np.zeros((len(atoms), 3)), 'stress': np.zeros(6), 'dipole': np.zeros(3), 'charges': np.zeros(len(atoms)), 'magmom': 0.0, 'magmoms': np.zeros(len(atoms))} The subclass implementation should first call this implementation to set the atoms attribute and create any missing directories. """ if atoms is not None: self.atoms = atoms.copy() if not os.path.isdir(self._directory): os.makedirs(self._directory) def calculate_numerical_forces(self, atoms, d=0.001): """Calculate numerical forces using finite difference. All atoms will be displaced by +d and -d in all directions.""" from ase.calculators.test import numeric_force return np.array([[numeric_force(atoms, a, i, d) for i in range(3)] for a in range(len(atoms))]) def calculate_numerical_stress(self, atoms, d=1e-6, voigt=True): """Calculate numerical stress using finite difference.""" stress = np.zeros((3, 3), dtype=float) cell = atoms.cell.copy() V = atoms.get_volume() for i in range(3): x = np.eye(3) x[i, i] += d atoms.set_cell(np.dot(cell, x), scale_atoms=True) eplus = atoms.get_potential_energy(force_consistent=True) x[i, i] -= 2 * d atoms.set_cell(np.dot(cell, x), scale_atoms=True) eminus = atoms.get_potential_energy(force_consistent=True) stress[i, i] = (eplus - eminus) / (2 * d * V) x[i, i] += d j = i - 2 x[i, j] = d x[j, i] = d atoms.set_cell(np.dot(cell, x), scale_atoms=True) eplus = atoms.get_potential_energy(force_consistent=True) x[i, j] = -d x[j, i] = -d atoms.set_cell(np.dot(cell, x), scale_atoms=True) eminus = atoms.get_potential_energy(force_consistent=True) stress[i, j] = (eplus - eminus) / (4 * d * V) stress[j, i] = stress[i, j] atoms.set_cell(cell, scale_atoms=True) if voigt: return stress.flat[[0, 4, 8, 5, 2, 1]] else: return stress def _deprecated_get_spin_polarized(self): msg = ('This calculator does not implement get_spin_polarized(). ' 'In the future, calc.get_spin_polarized() will work only on ' 'calculator classes that explicitly implement this method or ' 'inherit the method via specialized subclasses.') warnings.warn(msg, FutureWarning) return False def band_structure(self): """Create band-structure object for plotting.""" from ase.spectrum.band_structure import get_band_structure # XXX This calculator is supposed to just have done a band structure # calculation, but the calculator may not have the correct Fermi level # if it updated the Fermi level after changing k-points. # This will be a problem with some calculators (currently GPAW), and # the user would have to override this by providing the Fermi level # from the selfconsistent calculation. return get_band_structure(calc=self) def calculate_properties(self, atoms, properties): """This method is experimental; currently for internal use.""" for name in properties: if name not in all_outputs: raise ValueError(f'No such property: {name}') # We ignore system changes for now. self.calculate(atoms, properties, system_changes=all_changes) props = self.export_properties() for name in properties: if name not in props: raise PropertyNotPresent(name) return props def export_properties(self): return Properties(self.results) class FileIOCalculator(Calculator): """Base class for calculators that write/read input/output files.""" command: Optional[str] = None 'Command used to start calculation' def __init__(self, restart=None, ignore_bad_restart_file=Calculator._deprecated, label=None, atoms=None, command=None, **kwargs): """File-IO calculator. command: str Command used to start calculation. """ Calculator.__init__(self, restart, ignore_bad_restart_file, label, atoms, **kwargs) if command is not None: self.command = command else: name = 'ASE_' + self.name.upper() + '_COMMAND' self.command = os.environ.get(name, self.command) def calculate(self, atoms=None, properties=['energy'], system_changes=all_changes): Calculator.calculate(self, atoms, properties, system_changes) self.write_input(self.atoms, properties, system_changes) if self.command is None: raise CalculatorSetupError( 'Please set ${} environment variable ' .format('ASE_' + self.name.upper() + '_COMMAND') + 'or supply the command keyword') command = self.command if 'PREFIX' in command: command = command.replace('PREFIX', self.prefix) try: proc = subprocess.Popen(command, shell=True, cwd=self.directory) except OSError as err: # Actually this may never happen with shell=True, since # probably the shell launches successfully. But we soon want # to allow calling the subprocess directly, and then this # distinction (failed to launch vs failed to run) is useful. msg = 'Failed to execute "{}"'.format(command) raise EnvironmentError(msg) from err errorcode = proc.wait() if errorcode: path = os.path.abspath(self.directory) msg = ('Calculator "{}" failed with command "{}" failed in ' '{} with error code {}'.format(self.name, command, path, errorcode)) raise CalculationFailed(msg) self.read_results() def write_input(self, atoms, properties=None, system_changes=None): """Write input file(s). Call this method first in subclasses so that directories are created automatically.""" absdir = os.path.abspath(self.directory) if absdir != os.curdir and not os.path.isdir(self.directory): os.makedirs(self.directory) def read_results(self): """Read energy, forces, ... from output file(s).""" pass ase-3.22.1/ase/calculators/castep.py000066400000000000000000003741131415166253600173060ustar00rootroot00000000000000"""This module defines an interface to CASTEP for use by the ASE (Webpage: http://wiki.fysik.dtu.dk/ase) Authors: Max Hoffmann, max.hoffmann@ch.tum.de Joerg Meyer, joerg.meyer@ch.tum.de Simon P. Rittmeyer, simon.rittmeyer@tum.de Contributors: Juan M. Lorenzi, juan.lorenzi@tum.de Georg S. Michelitsch, georg.michelitsch@tch.tum.de Reinhard J. Maurer, reinhard.maurer@yale.edu Simone Sturniolo, simone.sturniolo@stfc.ac.uk """ import difflib import numpy as np import os import re import glob import shutil import sys import json import time import tempfile import warnings import subprocess from copy import deepcopy from collections import namedtuple from itertools import product from typing import List, Set import ase import ase.units as units from ase.calculators.general import Calculator from ase.calculators.calculator import compare_atoms from ase.calculators.calculator import PropertyNotImplementedError from ase.calculators.calculator import kpts2sizeandoffsets from ase.dft.kpoints import BandPath from ase.parallel import paropen from ase.io.castep import read_param from ase.io.castep import read_bands from ase.constraints import FixCartesian __all__ = [ 'Castep', 'CastepCell', 'CastepParam', 'create_castep_keywords'] contact_email = 'simon.rittmeyer@tum.de' # A convenient table to avoid the previously used "eval" _tf_table = { '': True, # Just the keyword is equivalent to True 'True': True, 'False': False} def _self_getter(getf): # A decorator that makes it so that if no 'atoms' argument is passed to a # getter function, self.atoms is used instead def decor_getf(self, atoms=None, *args, **kwargs): if atoms is None: atoms = self.atoms return getf(self, atoms, *args, **kwargs) return decor_getf def _parse_tss_block(value, scaled=False): # Parse the assigned value for a Transition State Search structure block is_atoms = isinstance(value, ase.atoms.Atoms) try: is_strlist = all(map(lambda x: isinstance(x, str), value)) except TypeError: is_strlist = False if not is_atoms: if not is_strlist: # Invalid! raise TypeError('castep.cell.positions_abs/frac_intermediate/' 'product expects Atoms object or list of strings') # First line must be Angstroms! if (not scaled) and value[0].strip() != 'ang': raise RuntimeError('Only ang units currently supported in castep.' 'cell.positions_abs_intermediate/product') return '\n'.join(map(str.strip, value)) else: text_block = '' if scaled else 'ang\n' positions = (value.get_scaled_positions() if scaled else value.get_positions()) symbols = value.get_chemical_symbols() for s, p in zip(symbols, positions): text_block += ' {0} {1:.3f} {2:.3f} {3:.3f}\n'.format(s, *p) return text_block class Castep(Calculator): r""" CASTEP Interface Documentation Introduction ============ CASTEP_ [1]_ W_ is a software package which uses density functional theory to provide a good atomic-level description of all manner of materials and molecules. CASTEP can give information about total energies, forces and stresses on an atomic system, as well as calculating optimum geometries, band structures, optical spectra, phonon spectra and much more. It can also perform molecular dynamics simulations. The CASTEP calculator interface class offers intuitive access to all CASTEP settings and most results. All CASTEP specific settings are accessible via attribute access (*i.e*. ``calc.param.keyword = ...`` or ``calc.cell.keyword = ...``) Getting Started: ================ Set the environment variables appropriately for your system. >>> export CASTEP_COMMAND=' ... ' >>> export CASTEP_PP_PATH=' ... ' Note: alternatively to CASTEP_PP_PATH one can set PSPOT_DIR as CASTEP consults this by default, i.e. >>> export PSPOT_DIR=' ... ' Running the Calculator ====================== The default initialization command for the CASTEP calculator is .. class:: Castep(directory='CASTEP', label='castep') To do a minimal run one only needs to set atoms, this will use all default settings of CASTEP, meaning LDA, singlepoint, etc.. With a generated *castep_keywords.json* in place all options are accessible by inspection, *i.e.* tab-completion. This works best when using ``ipython``. All options can be accessed via ``calc.param.`` or ``calc.cell.`` and documentation is printed with ``calc.param. ?`` or ``calc.cell. ?``. All options can also be set directly using ``calc.keyword = ...`` or ``calc.KEYWORD = ...`` or even ``ialc.KeYwOrD`` or directly as named arguments in the call to the constructor (*e.g.* ``Castep(task='GeometryOptimization')``). If using this calculator on a machine without CASTEP, one might choose to copy a *castep_keywords.json* file generated elsewhere in order to access this feature: the file will be used if located in the working directory, *$HOME/.ase/* or *ase/ase/calculators/* within the ASE library. The file should be generated the first time it is needed, but you can generate a new keywords file in the currect directory with ``python -m ase.calculators.castep``. All options that go into the ``.param`` file are held in an ``CastepParam`` instance, while all options that go into the ``.cell`` file and don't belong to the atoms object are held in an ``CastepCell`` instance. Each instance can be created individually and can be added to calculators by attribute assignment, *i.e.* ``calc.param = param`` or ``calc.cell = cell``. All internal variables of the calculator start with an underscore (_). All cell attributes that clearly belong into the atoms object are blocked. Setting ``calc.atoms_attribute`` (*e.g.* ``= positions``) is sent directly to the atoms object. Arguments: ========== ========================= ==================================================== Keyword Description ========================= ==================================================== ``directory`` The relative path where all input and output files will be placed. If this does not exist, it will be created. Existing directories will be moved to directory-TIMESTAMP unless self._rename_existing_dir is set to false. ``label`` The prefix of .param, .cell, .castep, etc. files. ``castep_command`` Command to run castep. Can also be set via the bash environment variable ``CASTEP_COMMAND``. If none is given or found, will default to ``castep`` ``check_castep_version`` Boolean whether to check if the installed castep version matches the version from which the available options were deduced. Defaults to ``False``. ``castep_pp_path`` The path where the pseudopotentials are stored. Can also be set via the bash environment variables ``PSPOT_DIR`` (preferred) and ``CASTEP_PP_PATH``. Will default to the current working directory if none is given or found. Note that pseudopotentials may be generated on-the-fly if they are not found. ``find_pspots`` Boolean whether to search for pseudopotentials in ```` or not. If activated, files in this directory will be checked for typical names. If files are not found, they will be generated on the fly, depending on the ``_build_missing_pspots`` value. A RuntimeError will be raised in case multiple files per element are found. Defaults to ``False``. ``keyword_tolerance`` Integer to indicate the level of tolerance to apply validation of any parameters set in the CastepCell or CastepParam objects against the ones found in castep_keywords. Levels are as following: 0 = no tolerance, keywords not found in castep_keywords will raise an exception 1 = keywords not found will be accepted but produce a warning (default) 2 = keywords not found will be accepted silently 3 = no attempt is made to look for castep_keywords.json at all ``castep_keywords`` Can be used to pass a CastepKeywords object that is then used with no attempt to actually load a castep_keywords.json file. Most useful for debugging and testing purposes. ========================= ==================================================== Additional Settings =================== ========================= ==================================================== Internal Setting Description ========================= ==================================================== ``_castep_command`` (``=castep``): the actual shell command used to call CASTEP. ``_check_checkfile`` (``=True``): this makes write_param() only write a continue or reuse statement if the addressed .check or .castep_bin file exists in the directory. ``_copy_pspots`` (``=False``): if set to True the calculator will actually copy the needed pseudo-potential (\*.usp) file, usually it will only create symlinks. ``_link_pspots`` (``=True``): if set to True the calculator will actually will create symlinks to the needed pseudo potentials. Set this option (and ``_copy_pspots``) to False if you rather want to access your pseudo potentials using the PSPOT_DIR environment variable that is read by CASTEP. *Note:* This option has no effect if ``copy_pspots`` is True.. ``_build_missing_pspots`` (``=True``): if set to True, castep will generate missing pseudopotentials on the fly. If not, a RuntimeError will be raised if not all files were found. ``_export_settings`` (``=True``): if this is set to True, all calculator internal settings shown here will be included in the .param in a comment line (#) and can be read again by merge_param. merge_param can be forced to ignore this directive using the optional argument ``ignore_internal_keys=True``. ``_force_write`` (``=True``): this controls wether the \*cell and \*param will be overwritten. ``_prepare_input_only`` (``=False``): If set to True, the calculator will create \*cell und \*param file but not start the calculation itself. If this is used to prepare jobs locally and run on a remote cluster it is recommended to set ``_copy_pspots = True``. ``_castep_pp_path`` (``='.'``) : the place where the calculator will look for pseudo-potential files. ``_find_pspots`` (``=False``): if set to True, the calculator will try to find the respective pseudopotentials from <_castep_pp_path>. As long as there are no multiple files per element in this directory, the auto-detect feature should be very robust. Raises a RuntimeError if required files are not unique (multiple files per element). Non existing pseudopotentials will be generated, though this could be dangerous. ``_rename_existing_dir`` (``=True``) : when using a new instance of the calculator, this will move directories out of the way that would be overwritten otherwise, appending a date string. ``_set_atoms`` (``=False``) : setting this to True will overwrite any atoms object previously attached to the calculator when reading a \.castep file. By de- fault, the read() function will only create a new atoms object if none has been attached and other- wise try to assign forces etc. based on the atom's positions. ``_set_atoms=True`` could be necessary if one uses CASTEP's internal geometry optimization (``calc.param.task='GeometryOptimization'``) because then the positions get out of sync. *Warning*: this option is generally not recommended unless one knows one really needs it. There should never be any need, if CASTEP is used as a single-point calculator. ``_track_output`` (``=False``) : if set to true, the interface will append a number to the label on all input and output files, where n is the number of calls to this instance. *Warning*: this setting may con- sume a lot more disk space because of the additio- nal \*check files. ``_try_reuse`` (``=_track_output``) : when setting this, the in- terface will try to fetch the reuse file from the previous run even if _track_output is True. By de- fault it is equal to _track_output, but may be overridden. Since this behavior may not always be desirable for single-point calculations. Regular reuse for *e.g.* a geometry-optimization can be achieved by setting ``calc.param.reuse = True``. ``_pedantic`` (``=False``) if set to true, the calculator will inform about settings probably wasting a lot of CPU time or causing numerical inconsistencies. ========================= ==================================================== Special features: ================= ``.dryrun_ok()`` Runs ``castep_command seed -dryrun`` in a temporary directory return True if all variables initialized ok. This is a fast way to catch errors in the input. Afterwards _kpoints_used is set. ``.merge_param()`` Takes a filename or filehandler of a .param file or CastepParam instance and merges it into the current calculator instance, overwriting current settings ``.keyword.clear()`` Can be used on any option like ``calc.param.keyword.clear()`` or ``calc.cell.keyword.clear()`` to return to the CASTEP default. ``.initialize()`` Creates all needed input in the ``_directory``. This can then copied to and run in a place without ASE or even python. ``.set_pspot('')`` This automatically sets the pseudo-potential for all present species to ``_.usp``. Make sure that ``_castep_pp_path`` is set correctly. Note that there is no check, if the file actually exists. If it doesn't castep will crash! You may want to use ``find_pspots()`` instead. ``.find_pspots(pspot=, suffix=)`` This automatically searches for pseudopotentials of type ``_.`` or ``-.`` in ``castep_pp_path` (make sure this is set correctly). Note that ```` will be searched for case insensitive. Regular expressions are accepted, and arguments ``'*'`` will be regarded as bash-like wildcards. Defaults are any ```` and any ```` from ``['usp', 'UPF', 'recpot']``. If you have well-organized folders with pseudopotentials of one kind, this should work with the defaults. ``print(calc)`` Prints a short summary of the calculator settings and atoms. ``ase.io.castep.read_seed('path-to/seed')`` Given you have a combination of seed.{param,cell,castep} this will return an atoms object with the last ionic positions in the .castep file and all other settings parsed from the .cell and .param file. If no .castep file is found the positions are taken from the .cell file. The output directory will be set to the same directory, only the label is preceded by 'copy_of\_' to avoid overwriting. ``.set_kpts(kpoints)`` This is equivalent to initialising the calculator with ``calc = Castep(kpts=kpoints)``. ``kpoints`` can be specified in many convenient forms: simple Monkhorst-Pack grids can be specified e.g. ``(2, 2, 3)`` or ``'2 2 3'``; lists of specific weighted k-points can be given in reciprocal lattice coordinates e.g. ``[[0, 0, 0, 0.25], [0.25, 0.25, 0.25, 0.75]]``; a dictionary syntax is available for more complex requirements e.g. ``{'size': (2, 2, 2), 'gamma': True}`` will give a Gamma-centered 2x2x2 M-P grid, ``{'density': 10, 'gamma': False, 'even': False}`` will give a mesh with density of at least 10 Ang (based on the unit cell of currently-attached atoms) with an odd number of points in each direction and avoiding the Gamma point. ``.set_bandpath(bandpath)`` This is equivalent to initialialising the calculator with ``calc=Castep(bandpath=bandpath)`` and may be set simultaneously with *kpts*. It allows an electronic band structure path to be set up using ASE BandPath objects. This enables a band structure calculation to be set up conveniently using e.g. calc.set_bandpath(atoms.cell.bandpath().interpolate(npoints=200)) ``.band_structure(bandfile=None)`` Read a band structure from _seedname.bands_ file. This returns an ase BandStructure object which may be plotted with e.g. ``calc.band_structure().plot()`` Notes/Issues: ============== * Currently *only* the FixAtoms *constraint* is fully supported for reading and writing. There is some experimental support for the FixCartesian constraint. * There is no support for the CASTEP *unit system*. Units of eV and Angstrom are used throughout. In particular when converting total energies from different calculators, one should check that the same CODATA_ version is used for constants and conversion factors, respectively. .. _CASTEP: http://www.castep.org/ .. _W: https://en.wikipedia.org/wiki/CASTEP .. _CODATA: https://physics.nist.gov/cuu/Constants/index.html .. [1] S. J. Clark, M. D. Segall, C. J. Pickard, P. J. Hasnip, M. J. Probert, K. Refson, M. C. Payne Zeitschrift für Kristallographie 220(5-6) pp.567- 570 (2005) PDF_. .. _PDF: http://www.tcm.phy.cam.ac.uk/castep/papers/ZKristallogr_2005.pdf End CASTEP Interface Documentation """ # Class attributes ! # keys set through atoms object atoms_keys = [ 'charges', 'ionic_constraints', 'lattice_abs', 'lattice_cart', 'positions_abs', 'positions_abs_final', 'positions_abs_intermediate', 'positions_frac', 'positions_frac_final', 'positions_frac_intermediate'] atoms_obj_keys = [ 'dipole', 'energy_free', 'energy_zero', 'fermi', 'forces', 'nbands', 'positions', 'stress', 'pressure'] internal_keys = [ '_castep_command', '_check_checkfile', '_copy_pspots', '_link_pspots', '_find_pspots', '_build_missing_pspots', '_directory', '_export_settings', '_force_write', '_label', '_prepare_input_only', '_castep_pp_path', '_rename_existing_dir', '_set_atoms', '_track_output', '_try_reuse', '_pedantic'] def __init__(self, directory='CASTEP', label='castep', castep_command=None, check_castep_version=False, castep_pp_path=None, find_pspots=False, keyword_tolerance=1, castep_keywords=None, **kwargs): self.__name__ = 'Castep' # initialize the ase.calculators.general calculator Calculator.__init__(self) from ase.io.castep import write_cell self._write_cell = write_cell if castep_keywords is None: castep_keywords = CastepKeywords(make_param_dict(), make_cell_dict(), [], [], 0) if keyword_tolerance < 3: try: castep_keywords = import_castep_keywords(castep_command) except CastepVersionError as e: if keyword_tolerance == 0: raise e else: warnings.warn(str(e)) self._kw_tol = keyword_tolerance keyword_tolerance = max(keyword_tolerance, 2) # 3 not accepted below self.param = CastepParam(castep_keywords, keyword_tolerance=keyword_tolerance) self.cell = CastepCell(castep_keywords, keyword_tolerance=keyword_tolerance) ################################### # Calculator state variables # ################################### self._calls = 0 self._castep_version = castep_keywords.castep_version # collects warning from .castep files self._warnings = [] # collects content from *.err file self._error = None # warnings raised by the ASE interface self._interface_warnings = [] # store to check if recalculation is necessary self._old_atoms = None self._old_cell = None self._old_param = None ################################### # Internal keys # # Allow to tweak the behavior # ################################### self._opt = {} self._castep_command = get_castep_command(castep_command) self._castep_pp_path = get_castep_pp_path(castep_pp_path) self._check_checkfile = True self._copy_pspots = False self._link_pspots = True self._find_pspots = find_pspots self._build_missing_pspots = True self._directory = os.path.abspath(directory) self._export_settings = True self._force_write = True self._label = label self._prepare_input_only = False self._rename_existing_dir = True self._set_atoms = False self._track_output = False self._try_reuse = False # turn off the pedantic user warnings self._pedantic = False # will be set on during runtime self._seed = None ################################### # (Physical) result variables # ################################### self.atoms = None # initialize result variables self._forces = None self._energy_total = None self._energy_free = None self._energy_0K = None self._energy_total_corr = None self._eigenvalues = None self._efermi = None self._ibz_kpts = None self._ibz_weights = None self._band_structure = None # dispersion corrections self._dispcorr_energy_total = None self._dispcorr_energy_free = None self._dispcorr_energy_0K = None # spins and hirshfeld volumes self._spins = None self._hirsh_volrat = None # Mulliken charges self._mulliken_charges = None # Hirshfeld charges self._hirshfeld_charges = None self._number_of_cell_constraints = None self._output_verbosity = None self._stress = None self._pressure = None self._unit_cell = None self._kpoints = None # pointers to other files used at runtime self._check_file = None self._castep_bin_file = None # plane wave cutoff energy (may be derived during PP generation) self._cut_off_energy = None # runtime information self._total_time = None self._peak_memory = None # check version of CASTEP options module against current one if check_castep_version: local_castep_version = get_castep_version(self._castep_command) if not hasattr(self, '_castep_version'): warnings.warn('No castep version found') return if not local_castep_version == self._castep_version: warnings.warn('The options module was generated from version %s ' 'while your are currently using CASTEP version %s' % (self._castep_version, get_castep_version(self._castep_command))) self._castep_version = local_castep_version # processes optional arguments in kw style for keyword, value in kwargs.items(): # first fetch special keywords issued by ASE CLI if keyword == 'kpts': self.set_kpts(value) elif keyword == 'bandpath': self.set_bandpath(value) elif keyword == 'xc': self.xc_functional = value elif keyword == 'ecut': self.cut_off_energy = value else: # the general case self.__setattr__(keyword, value) def band_structure(self, bandfile=None): from ase.spectrum.band_structure import BandStructure if bandfile is None: bandfile = os.path.join(self._directory, self._seed) + '.bands' if not os.path.exists(bandfile): raise ValueError('Cannot find band file "{}".'.format(bandfile)) kpts, weights, eigenvalues, efermi = read_bands(bandfile) # Get definitions of high-symmetry points special_points = self.atoms.cell.bandpath(npoints=0).special_points bandpath = BandPath(self.atoms.cell, kpts=kpts, special_points=special_points) return BandStructure(bandpath, eigenvalues, reference=efermi) def set_bandpath(self, bandpath): """Set a band structure path from ase.dft.kpoints.BandPath object This will set the bs_kpoint_list block with a set of specific points determined in ASE. bs_kpoint_spacing will not be used; to modify the number of points, consider using e.g. bandpath.resample(density=20) to obtain a new dense path. Args: bandpath (:obj:`ase.dft.kpoints.BandPath` or None): Set to None to remove list of band structure points. Otherwise, sampling will follow BandPath parameters. """ def clear_bs_keywords(): bs_keywords = product({'bs_kpoint', 'bs_kpoints'}, {'path', 'path_spacing', 'list', 'mp_grid', 'mp_spacing', 'mp_offset'}) for bs_tag in bs_keywords: setattr(self.cell, '_'.join(bs_tag), None) if bandpath is None: clear_bs_keywords() elif isinstance(bandpath, BandPath): clear_bs_keywords() self.cell.bs_kpoint_list = [' '.join(map(str, row)) for row in bandpath.kpts] else: raise TypeError('Band structure path must be an ' 'ase.dft.kpoint.BandPath object') def set_kpts(self, kpts): """Set k-point mesh/path using a str, tuple or ASE features Args: kpts (None, tuple, str, dict): This method will set the CASTEP parameters kpoints_mp_grid, kpoints_mp_offset and kpoints_mp_spacing as appropriate. Unused parameters will be set to None (i.e. not included in input files.) If kpts=None, all these parameters are set as unused. The simplest useful case is to give a 3-tuple of integers specifying a Monkhorst-Pack grid. This may also be formatted as a string separated by spaces; this is the format used internally before writing to the input files. A more powerful set of features is available when using a python dictionary with the following allowed keys: - 'size' (3-tuple of int) mesh of mesh dimensions - 'density' (float) for BZ sampling density in points per recip. Ang ( kpoint_mp_spacing = 1 / (2pi * density) ). An explicit MP mesh will be set to allow for rounding/centering. - 'spacing' (float) for BZ sampling density for maximum space between sample points in reciprocal space. This is numerically equivalent to the inbuilt ``calc.cell.kpoint_mp_spacing``, but will be converted to 'density' to allow for rounding/centering. - 'even' (bool) to round each direction up to the nearest even number; set False for odd numbers, leave as None for no odd/even rounding. - 'gamma' (bool) to offset the Monkhorst-Pack grid to include (0, 0, 0); set False to offset each direction avoiding 0. """ def clear_mp_keywords(): mp_keywords = product({'kpoint', 'kpoints'}, {'mp_grid', 'mp_offset', 'mp_spacing', 'list'}) for kp_tag in mp_keywords: setattr(self.cell, '_'.join(kp_tag), None) # Case 1: Clear parameters with set_kpts(None) if kpts is None: clear_mp_keywords() pass # Case 2: list of explicit k-points with weights # e.g. [[ 0, 0, 0, 0.125], # [ 0, -0.5, 0, 0.375], # [-0.5, 0, -0.5, 0.375], # [-0.5, -0.5, -0.5, 0.125]] elif (isinstance(kpts, (tuple, list)) and isinstance(kpts[0], (tuple, list))): if not all(map((lambda row: len(row) == 4), kpts)): raise ValueError( 'In explicit kpt list each row should have 4 elements') clear_mp_keywords() self.cell.kpoint_list = [' '.join(map(str, row)) for row in kpts] # Case 3: list of explicit kpts formatted as list of str # i.e. the internal format of calc.kpoint_list split on \n # e.g. ['0 0 0 0.125', '0 -0.5 0 0.375', '-0.5 0 -0.5 0.375'] elif isinstance(kpts, (tuple, list)) and isinstance(kpts[0], str): if not all(map((lambda row: len(row.split()) == 4), kpts)): raise ValueError( 'In explicit kpt list each row should have 4 elements') clear_mp_keywords() self.cell.kpoint_list = kpts # Case 4: list or tuple of MP samples e.g. [3, 3, 2] elif isinstance(kpts, (tuple, list)) and isinstance(kpts[0], int): if len(kpts) != 3: raise ValueError('Monkhorst-pack grid should have 3 values') clear_mp_keywords() self.cell.kpoint_mp_grid = '%d %d %d' % tuple(kpts) # Case 5: str representation of Case 3 e.g. '3 3 2' elif isinstance(kpts, str): self.set_kpts([int(x) for x in kpts.split()]) # Case 6: dict of options e.g. {'size': (3, 3, 2), 'gamma': True} # 'spacing' is allowed but transformed to 'density' to get mesh/offset elif isinstance(kpts, dict): kpts = kpts.copy() if (kpts.get('spacing') is not None and kpts.get('density') is not None): raise ValueError( 'Cannot set kpts spacing and density simultaneously.') else: if kpts.get('spacing') is not None: kpts = kpts.copy() spacing = kpts.pop('spacing') kpts['density'] = 1 / (np.pi * spacing) clear_mp_keywords() size, offsets = kpts2sizeandoffsets(atoms=self.atoms, **kpts) self.cell.kpoint_mp_grid = '%d %d %d' % tuple(size) self.cell.kpoint_mp_offset = '%f %f %f' % tuple(offsets) # Case 7: some other iterator. Try treating as a list: elif hasattr(kpts, '__iter__'): self.set_kpts(list(kpts)) # Otherwise, give up else: raise TypeError('Cannot interpret kpts of this type') def todict(self, skip_default=True): """Create dict with settings of .param and .cell""" dct = {} dct['param'] = self.param.get_attr_dict() dct['cell'] = self.cell.get_attr_dict() return dct def check_state(self, atoms, tol=1e-15): """Check for system changes since last calculation.""" return compare_atoms(self._old_atoms, atoms) def _castep_find_last_record(self, castep_file): """Checks wether a given castep file has a regular ending message following the last banner message. If this is the case, the line number of the last banner is message is return, otherwise False. returns (record_start, record_end, end_found, last_record_complete) """ if isinstance(castep_file, str): castep_file = paropen(castep_file, 'r') file_opened = True else: file_opened = False record_starts = [] while True: line = castep_file.readline() if 'Welcome' in line and 'CASTEP' in line: record_starts = [castep_file.tell()] + record_starts if not line: break if record_starts == []: warnings.warn('Could not find CASTEP label in result file: %s.' ' Are you sure this is a .castep file?' % castep_file) return # search for regular end of file end_found = False # start to search from record beginning from the back # and see if record_end = -1 for record_nr, record_start in enumerate(record_starts): castep_file.seek(record_start) while True: line = castep_file.readline() if not line: break if 'warn' in line.lower(): self._warnings.append(line) if 'Finalisation time =' in line: end_found = True record_end = castep_file.tell() break if end_found: break if file_opened: castep_file.close() if end_found: # record_nr == 0 corresponds to the last record here if record_nr == 0: return (record_start, record_end, True, True) else: return (record_start, record_end, True, False) else: return (0, record_end, False, False) def read(self, castep_file=None): """Read a castep file into the current instance.""" _close = True if castep_file is None: if self._castep_file: castep_file = self._castep_file out = paropen(castep_file, 'r') else: warnings.warn('No CASTEP file specified') return if not os.path.exists(castep_file): warnings.warn('No CASTEP file found') elif isinstance(castep_file, str): out = paropen(castep_file, 'r') else: # in this case we assume that we have a fileobj already, but check # for attributes in order to avoid extended EAFP blocks. out = castep_file # look before you leap... attributes = ['name', 'seek', 'close', 'readline', 'tell'] for attr in attributes: if not hasattr(out, attr): raise TypeError( '"castep_file" is neither str nor valid fileobj') castep_file = out.name _close = False if self._seed is None: self._seed = os.path.splitext(os.path.basename(castep_file))[0] err_file = '%s.0001.err' % self._seed if os.path.exists(err_file): err_file = paropen(err_file) self._error = err_file.read() err_file.close() # we return right-away because it might # just be here from a previous run # look for last result, if several CASTEP # run are appended record_start, record_end, end_found, _\ = self._castep_find_last_record(out) if not end_found: warnings.warn('No regular end found in %s file. %s' % (castep_file, self._error)) if _close: out.close() return # we return here, because the file has no a regular end # now iterate over last CASTEP output in file to extract information # could be generalized as well to extract trajectory from file # holding several outputs n_cell_const = 0 forces = [] # HOTFIX: # we have to initialize the _stress variable as a zero array # otherwise the calculator crashes upon pickling trajectories # Alternative would be to raise a NotImplementedError() which # is also kind of not true, since we can extract stresses if # the user configures CASTEP to print them in the outfile # stress = [] stress = np.zeros([3, 3]) hirsh_volrat = [] # Two flags to check whether spin-polarized or not, and whether # Hirshfeld volumes are calculated spin_polarized = False calculate_hirshfeld = False mulliken_analysis = False hirshfeld_analysis = False kpoints = None positions_frac_list = [] out.seek(record_start) while True: # TODO: add a switch if we have a geometry optimization: record # atoms objects for intermediate steps. try: # in case we need to rewind back one line, we memorize the bit # position of this line in the file. # --> see symops problem below _line_start = out.tell() line = out.readline() if not line or out.tell() > record_end: break elif 'Hirshfeld Analysis' in line: hirshfeld_charges = [] hirshfeld_analysis = True # skip the separating line line = out.readline() # this is the headline line = out.readline() if 'Charge' in line: # skip the next separator line line = out.readline() while True: line = out.readline() fields = line.split() if len(fields) == 1: break else: hirshfeld_charges.append(float(fields[-1])) elif 'stress calculation' in line: if line.split()[-1].strip() == 'on': self.param.calculate_stress = True elif 'basis set accuracy' in line: self.param.basis_precision = line.split()[-1] elif 'plane wave basis set cut-off' in line: # NB this is set as a private "result" attribute to avoid # conflict with input option basis_precision cutoff = float(line.split()[-2]) self._cut_off_energy = cutoff if self.param.basis_precision.value is None: self.param.cut_off_energy = cutoff elif 'total energy / atom convergence tol.' in line: elec_energy_tol = float(line.split()[-2]) self.param.elec_energy_tol = elec_energy_tol elif 'convergence tolerance window' in line: elec_convergence_win = int(line.split()[-2]) self.param.elec_convergence_win = elec_convergence_win elif re.match(r'\sfinite basis set correction\s*:', line): finite_basis_corr = line.split()[-1] fbc_possibilities = {'none': 0, 'manual': 1, 'automatic': 2} fbc = fbc_possibilities[finite_basis_corr] self.param.finite_basis_corr = fbc elif 'Treating system as non-metallic' in line: self.param.fix_occupancy = True elif 'max. number of SCF cycles:' in line: max_no_scf = float(line.split()[-1]) self.param.max_scf_cycles = max_no_scf elif 'density-mixing scheme' in line: mixing_scheme = line.split()[-1] self.param.mixing_scheme = mixing_scheme elif 'dump wavefunctions every' in line: no_dump_cycles = float(line.split()[-3]) self.param.num_dump_cycles = no_dump_cycles elif 'optimization strategy' in line: lspl = line.split(":") if lspl[0].strip() != 'optimization strategy': # This can happen in iprint: 3 calculations continue if 'memory' in line: self.param.opt_strategy = 'Memory' if 'speed' in line: self.param.opt_strategy = 'Speed' elif 'calculation limited to maximum' in line: calc_limit = float(line.split()[-2]) self.param.run_time = calc_limit elif 'type of calculation' in line: lspl = line.split(":") if lspl[0].strip() != 'type of calculation': # This can happen in iprint: 3 calculations continue calc_type = lspl[-1] calc_type = re.sub(r'\s+', ' ', calc_type) calc_type = calc_type.strip() if calc_type != 'single point energy': calc_type_possibilities = { 'geometry optimization': 'GeometryOptimization', 'band structure': 'BandStructure', 'molecular dynamics': 'MolecularDynamics', 'optical properties': 'Optics', 'phonon calculation': 'Phonon', 'E-field calculation': 'Efield', 'Phonon followed by E-field': 'Phonon+Efield', 'transition state search': 'TransitionStateSearch', 'Magnetic Resonance': 'MagRes', 'Core level spectra': 'Elnes', 'Electronic Spectroscopy': 'ElectronicSpectroscopy' } ctype = calc_type_possibilities[calc_type] self.param.task = ctype elif 'using functional' in line: used_functional = line.split(":")[-1] used_functional = re.sub(r'\s+', ' ', used_functional) used_functional = used_functional.strip() if used_functional != 'Local Density Approximation': used_functional_possibilities = { 'Perdew Wang (1991)': 'PW91', 'Perdew Burke Ernzerhof': 'PBE', 'revised Perdew Burke Ernzerhof': 'RPBE', 'PBE with Wu-Cohen exchange': 'WC', 'PBE for solids (2008)': 'PBESOL', 'Hartree-Fock': 'HF', 'Hartree-Fock +': 'HF-LDA', 'Screened Hartree-Fock': 'sX', 'Screened Hartree-Fock + ': 'sX-LDA', 'hybrid PBE0': 'PBE0', 'hybrid B3LYP': 'B3LYP', 'hybrid HSE03': 'HSE03', 'hybrid HSE06': 'HSE06' } used_func = used_functional_possibilities[ used_functional] self.param.xc_functional = used_func elif 'output verbosity' in line: iprint = int(line.split()[-1][1]) if int(iprint) != 1: self.param.iprint = iprint elif 'treating system as spin-polarized' in line: spin_polarized = True self.param.spin_polarized = spin_polarized elif 'treating system as non-spin-polarized' in line: spin_polarized = False elif 'Number of kpoints used' in line: kpoints = int(line.split('=')[-1].strip()) elif 'Unit Cell' in line: lattice_real = [] lattice_reci = [] while True: line = out.readline() fields = line.split() if len(fields) == 6: break for i in range(3): lattice_real.append([float(f) for f in fields[0:3]]) lattice_reci.append([float(f) for f in fields[3:7]]) line = out.readline() fields = line.split() elif 'Cell Contents' in line: while True: line = out.readline() if 'Total number of ions in cell' in line: n_atoms = int(line.split()[7]) if 'Total number of species in cell' in line: int(line.split()[7]) fields = line.split() if len(fields) == 0: break elif 'Fractional coordinates of atoms' in line: species = [] custom_species = None # A CASTEP special thing positions_frac = [] # positions_cart = [] while True: line = out.readline() fields = line.split() if len(fields) == 7: break for n in range(n_atoms): spec_custom = fields[1].split(':', 1) elem = spec_custom[0] if len(spec_custom) > 1 and custom_species is None: # Add it to the custom info! custom_species = list(species) species.append(elem) if custom_species is not None: custom_species.append(fields[1]) positions_frac.append([float(s) for s in fields[3:6]]) line = out.readline() fields = line.split() positions_frac_list.append(positions_frac) elif 'Files used for pseudopotentials' in line: while True: line = out.readline() if 'Pseudopotential generated on-the-fly' in line: continue fields = line.split() if (len(fields) >= 2): elem, pp_file = fields self.cell.species_pot = (elem, pp_file) else: break elif 'k-Points For BZ Sampling' in line: # TODO: generalize for non-Monkhorst Pack case # (i.e. kpoint lists) - # kpoints_offset cannot be read this way and # is hence always set to None while True: line = out.readline() if not line.strip(): break if 'MP grid size for SCF calculation' in line: # kpoints = ' '.join(line.split()[-3:]) # self.kpoints_mp_grid = kpoints # self.kpoints_mp_offset = '0. 0. 0.' # not set here anymore because otherwise # two calculator objects go out of sync # after each calculation triggering unnecessary # recalculation break elif 'Symmetry and Constraints' in line: # this is a bit of a hack, but otherwise the read_symops # would need to re-read the entire file. --> just rewind # back by one line, so the read_symops routine can find the # start of this block. out.seek(_line_start) self.read_symops(castep_castep=out) elif 'Number of cell constraints' in line: n_cell_const = int(line.split()[4]) elif 'Final energy' in line: self._energy_total = float(line.split()[-2]) elif 'Final free energy' in line: self._energy_free = float(line.split()[-2]) elif 'NB est. 0K energy' in line: self._energy_0K = float(line.split()[-2]) # check if we had a finite basis set correction elif 'Total energy corrected for finite basis set' in line: self._energy_total_corr = float(line.split()[-2]) # Add support for dispersion correction # filtering due to SEDC is done in get_potential_energy elif 'Dispersion corrected final energy' in line: self._dispcorr_energy_total = float(line.split()[-2]) elif 'Dispersion corrected final free energy' in line: self._dispcorr_energy_free = float(line.split()[-2]) elif 'dispersion corrected est. 0K energy' in line: self._dispcorr_energy_0K = float(line.split()[-2]) # remember to remove constraint labels in force components # (lacking a space behind the actual floating point number in # the CASTEP output) elif '******************** Forces *********************'\ in line or\ '************** Symmetrised Forces ***************'\ in line or\ '************** Constrained Symmetrised Forces ****'\ '**********'\ in line or\ '******************** Constrained Forces **********'\ '**********'\ in line or\ '******************* Unconstrained Forces *********'\ '**********'\ in line: fix = [] fix_cart = [] forces = [] while True: line = out.readline() fields = line.split() if len(fields) == 7: break for n in range(n_atoms): consd = np.array([0, 0, 0]) fxyz = [0, 0, 0] for (i, force_component) in enumerate(fields[-4:-1]): if force_component.count("(cons'd)") > 0: consd[i] = 1 fxyz[i] = float(force_component.replace( "(cons'd)", '')) if consd.all(): fix.append(n) elif consd.any(): fix_cart.append(FixCartesian(n, consd)) forces.append(fxyz) line = out.readline() fields = line.split() # add support for Hirshfeld analysis elif 'Hirshfeld / free atomic volume :' in line: # if we are here, then params must be able to cope with # Hirshfeld flag (if castep_keywords.py matches employed # castep version) calculate_hirshfeld = True hirsh_volrat = [] while True: line = out.readline() fields = line.split() if len(fields) == 1: break for n in range(n_atoms): hirsh_atom = float(fields[0]) hirsh_volrat.append(hirsh_atom) while True: line = out.readline() if 'Hirshfeld / free atomic volume :' in line or\ 'Hirshfeld Analysis' in line: break line = out.readline() fields = line.split() elif '***************** Stress Tensor *****************'\ in line or\ '*********** Symmetrised Stress Tensor ***********'\ in line: stress = [] while True: line = out.readline() fields = line.split() if len(fields) == 6: break for n in range(3): stress.append([float(s) for s in fields[2:5]]) line = out.readline() fields = line.split() line = out.readline() if "Pressure:" in line: self._pressure = float(line.split()[-2]) * units.GPa elif ('BFGS: starting iteration' in line or 'BFGS: improving iteration' in line): if n_cell_const < 6: lattice_real = [] lattice_reci = [] # backup previous configuration first: # for highly symmetric systems (where essentially only the # stress is optimized, but the atomic positions) positions # are only printed once. if species: prev_species = deepcopy(species) if positions_frac: prev_positions_frac = deepcopy(positions_frac) species = [] positions_frac = [] forces = [] # HOTFIX: # Same reason for the stress initialization as before # stress = [] stress = np.zeros([3, 3]) # extract info from the Mulliken analysis elif 'Atomic Populations' in line: # sometimes this appears twice in a castep file mulliken_charges = [] spins = [] mulliken_analysis = True # skip the separating line line = out.readline() # this is the headline line = out.readline() if 'Charge' in line: # skip the next separator line line = out.readline() while True: line = out.readline() fields = line.split() if len(fields) == 1: break # the check for len==7 is due to CASTEP 18 # outformat changes if spin_polarized: if len(fields) != 7: spins.append(float(fields[-1])) mulliken_charges.append(float(fields[-2])) else: mulliken_charges.append(float(fields[-1])) # There is actually no good reason to get out of the loop # already at this point... or do I miss something? # elif 'BFGS: Final Configuration:' in line: # break elif 'warn' in line.lower(): self._warnings.append(line) # fetch some last info elif 'Total time' in line: pattern = r'.*=\s*([\d\.]+) s' self._total_time = float(re.search(pattern, line).group(1)) elif 'Peak Memory Use' in line: pattern = r'.*=\s*([\d]+) kB' self._peak_memory = int(re.search(pattern, line).group(1)) except Exception as exception: sys.stderr.write(line + '|-> line triggered exception: ' + str(exception)) raise if _close: out.close() # in highly summetric crystals, positions and symmetry are only printed # upon init, hence we here restore these original values if not positions_frac: positions_frac = prev_positions_frac if not species: species = prev_species if not spin_polarized: # set to zero spin if non-spin polarized calculation spins = np.zeros(len(positions_frac)) positions_frac_atoms = np.array(positions_frac) forces_atoms = np.array(forces) spins_atoms = np.array(spins) if mulliken_analysis: mulliken_charges_atoms = np.array(mulliken_charges) else: mulliken_charges_atoms = np.zeros(len(positions_frac)) if hirshfeld_analysis: hirshfeld_charges_atoms = np.array(hirshfeld_charges) else: hirshfeld_charges_atoms = None if calculate_hirshfeld: hirsh_atoms = np.array(hirsh_volrat) else: hirsh_atoms = np.zeros_like(spins) if self.atoms and not self._set_atoms: # compensate for internal reordering of atoms by CASTEP # using the fact that the order is kept within each species # positions_frac_ase = self.atoms.get_scaled_positions(wrap=False) atoms_assigned = [False] * len(self.atoms) # positions_frac_castep_init = np.array(positions_frac_list[0]) positions_frac_castep = np.array(positions_frac_list[-1]) # species_castep = list(species) forces_castep = np.array(forces) hirsh_castep = np.array(hirsh_volrat) spins_castep = np.array(spins) mulliken_charges_castep = np.array(mulliken_charges_atoms) # go through the atoms position list and replace # with the corresponding one from the # castep file corresponding atomic number for iase in range(n_atoms): for icastep in range(n_atoms): if (species[icastep] == self.atoms[iase].symbol and not atoms_assigned[icastep]): positions_frac_atoms[iase] = \ positions_frac_castep[icastep] forces_atoms[iase] = np.array(forces_castep[icastep]) if iprint > 1 and calculate_hirshfeld: hirsh_atoms[iase] = np.array(hirsh_castep[icastep]) if spin_polarized: # reordering not necessary in case all spins == 0 spins_atoms[iase] = np.array(spins_castep[icastep]) mulliken_charges_atoms[iase] = np.array( mulliken_charges_castep[icastep]) atoms_assigned[icastep] = True break if not all(atoms_assigned): not_assigned = [i for (i, assigned) in zip(range(len(atoms_assigned)), atoms_assigned) if not assigned] warnings.warn('%s atoms not assigned. ' ' DEBUGINFO: The following atoms where not assigned: %s' % (atoms_assigned.count(False), not_assigned)) else: self.atoms.set_scaled_positions(positions_frac_atoms) else: # If no atoms, object has been previously defined # we define it here and set the Castep() instance as calculator. # This covers the case that we simply want to open a .castep file. # The next time around we will have an atoms object, since # set_calculator also set atoms in the calculator. if self.atoms: constraints = self.atoms.constraints else: constraints = [] atoms = ase.atoms.Atoms(species, cell=lattice_real, constraint=constraints, pbc=True, scaled_positions=positions_frac, ) if custom_species is not None: atoms.new_array('castep_custom_species', np.array(custom_species)) if self.param.spin_polarized: # only set magnetic moments if this was a spin polarized # calculation # this one fails as is atoms.set_initial_magnetic_moments(magmoms=spins_atoms) if mulliken_analysis: atoms.set_initial_charges(charges=mulliken_charges_atoms) atoms.calc = self self._kpoints = kpoints self._forces = forces_atoms # stress in .castep file is given in GPa: self._stress = np.array(stress) * units.GPa self._hirsh_volrat = hirsh_atoms self._spins = spins_atoms self._mulliken_charges = mulliken_charges_atoms self._hirshfeld_charges = hirshfeld_charges_atoms if self._warnings: warnings.warn('WARNING: %s contains warnings' % castep_file) for warning in self._warnings: warnings.warn(warning) # reset self._warnings = [] # Read in eigenvalues from bands file bands_file = castep_file[:-7] + '.bands' if (self.param.task.value is not None and self.param.task.value.lower() == 'bandstructure'): self._band_structure = self.band_structure(bandfile=bands_file) else: try: (self._ibz_kpts, self._ibz_weights, self._eigenvalues, self._efermi) = read_bands(filename=bands_file) except FileNotFoundError: warnings.warn('Could not load .bands file, eigenvalues and ' 'Fermi energy are unknown') def read_symops(self, castep_castep=None): # TODO: check that this is really backwards compatible # with previous routine with this name... """Read all symmetry operations used from a .castep file.""" if castep_castep is None: castep_castep = self._seed + '.castep' if isinstance(castep_castep, str): if not os.path.isfile(castep_castep): warnings.warn('Warning: CASTEP file %s not found!' % castep_castep) f = paropen(castep_castep, 'r') _close = True else: # in this case we assume that we have a fileobj already, but check # for attributes in order to avoid extended EAFP blocks. f = castep_castep # look before you leap... attributes = ['name', 'readline', 'close'] for attr in attributes: if not hasattr(f, attr): raise TypeError('read_castep_castep_symops: castep_castep ' 'is not of type str nor valid fileobj!') castep_castep = f.name _close = False while True: line = f.readline() if not line: return if 'output verbosity' in line: iprint = line.split()[-1][1] # filter out the default if int(iprint) != 1: self.param.iprint = iprint if 'Symmetry and Constraints' in line: break if self.param.iprint.value is None or int(self.param.iprint.value) < 2: self._interface_warnings.append( 'Warning: No symmetry' 'operations could be read from %s (iprint < 2).' % f.name) return while True: line = f.readline() if not line: break if 'Number of symmetry operations' in line: nsym = int(line.split()[5]) # print "nsym = %d" % nsym # information about symmetry related atoms currently not read symmetry_operations = [] for _ in range(nsym): rotation = [] displacement = [] while True: if 'rotation' in f.readline(): break for _ in range(3): line = f.readline() rotation.append([float(r) for r in line.split()[1:4]]) while True: if 'displacement' in f.readline(): break line = f.readline() displacement = [float(d) for d in line.split()[1:4]] symop = {'rotation': rotation, 'displacement': displacement} self.symmetry_ops = symop self.symmetry = symmetry_operations warnings.warn('Symmetry operations successfully read from %s. %s' % (f.name, self.cell.symmetry_ops)) break # only close if we opened the file in this routine if _close: f.close() def get_hirsh_volrat(self): """ Return the Hirshfeld volumes. """ return self._hirsh_volrat def get_spins(self): """ Return the spins from a plane-wave Mulliken analysis. """ return self._spins def get_mulliken_charges(self): """ Return the charges from a plane-wave Mulliken analysis. """ return self._mulliken_charges def get_hirshfeld_charges(self): """ Return the charges from a Hirshfeld analysis. """ return self._hirshfeld_charges def get_total_time(self): """ Return the total runtime """ return self._total_time def get_peak_memory(self): """ Return the peak memory usage """ return self._peak_memory def set_label(self, label): """The label is part of each seed, which in turn is a prefix in each CASTEP related file. """ # we may think about changing this in future to set `self._directory` # and `self._label`, as one would expect self._label = label def set_pspot(self, pspot, elems=None, notelems=None, clear=True, suffix='usp'): """Quickly set all pseudo-potentials: Usually CASTEP psp are named like _. so this function function only expects the . It then clears any previous pseudopotential settings apply the one with for each element in the atoms object. The optional elems and notelems arguments can be used to exclusively assign to some species, or to exclude with notelemens. Parameters :: - elems (None) : set only these elements - notelems (None): do not set the elements - clear (True): clear previous settings - suffix (usp): PP file suffix """ if self._find_pspots: if self._pedantic: warnings.warn('Warning: <_find_pspots> = True. ' 'Do you really want to use `set_pspots()`? ' 'This does not check whether the PP files exist. ' 'You may rather want to use `find_pspots()` with ' 'the same .') if clear and not elems and not notelems: self.cell.species_pot.clear() for elem in set(self.atoms.get_chemical_symbols()): if elems is not None and elem not in elems: continue if notelems is not None and elem in notelems: continue self.cell.species_pot = (elem, '%s_%s.%s' % (elem, pspot, suffix)) def find_pspots(self, pspot='.+', elems=None, notelems=None, clear=True, suffix='(usp|UPF|recpot)'): r"""Quickly find and set all pseudo-potentials by searching in castep_pp_path: This one is more flexible than set_pspots, and also checks if the files are actually available from the castep_pp_path. Essentially, the function parses the filenames in and does a regex matching. The respective pattern is: r"^(||elem.lower()>(_|-)\.$" In most cases, it will be sufficient to not specify anything, if you use standard CASTEP USPPs with only one file per element in the . The function raises a `RuntimeError` if there is some ambiguity (multiple files per element). Parameters :: - pspots ('.+') : as defined above, will be a wildcard if not specified. - elems (None) : set only these elements - notelems (None): do not set the elements - clear (True): clear previous settings - suffix (usp|UPF|recpot): PP file suffix """ if clear and not elems and not notelems: self.cell.species_pot.clear() if not os.path.isdir(self._castep_pp_path): if self._pedantic: warnings.warn('Cannot search directory: {} Folder does not exist' .format(self._castep_pp_path)) return # translate the bash wildcard syntax to regex if pspot == '*': pspot = '.*' if suffix == '*': suffix = '.*' if pspot == '*': pspot = '.*' # GBRV USPPs have a strnage naming schme pattern = r'^({elem}|{elem_upper}|{elem_lower})(_|-){pspot}\.{suffix}$' for elem in set(self.atoms.get_chemical_symbols()): if elems is not None and elem not in elems: continue if notelems is not None and elem in notelems: continue p = pattern.format(elem=elem, elem_upper=elem.upper(), elem_lower=elem.lower(), pspot=pspot, suffix=suffix) pps = [] for f in os.listdir(self._castep_pp_path): if re.match(p, f): pps.append(f) if not pps: if self._pedantic: warnings.warn('Pseudopotential for species {} not found!' .format(elem)) elif not len(pps) == 1: raise RuntimeError( 'Pseudopotential for species ''{} not unique!\n' .format(elem) + 'Found the following files in {}\n' .format(self._castep_pp_path) + '\n'.join([' {}'.format(pp) for pp in pps]) + '\nConsider a stricter search pattern in `find_pspots()`.') else: self.cell.species_pot = (elem, pps[0]) @property def name(self): """Return the name of the calculator (string). """ return self.__name__ def get_property(self, name, atoms=None, allow_calculation=True): # High-level getter for compliance with the database module... # in principle this would not be necessary any longer if we properly # based this class on `Calculator` if name == 'forces': return self.get_forces(atoms) elif name == 'energy': return self.get_potential_energy(atoms) elif name == 'stress': return self.get_stress(atoms) elif name == 'charges': return self.get_charges(atoms) else: raise PropertyNotImplementedError @_self_getter def get_forces(self, atoms): """Run CASTEP calculation if needed and return forces.""" self.update(atoms) return np.array(self._forces) @_self_getter def get_total_energy(self, atoms): """Run CASTEP calculation if needed and return total energy.""" self.update(atoms) return self._energy_total @_self_getter def get_total_energy_corrected(self, atoms): """Run CASTEP calculation if needed and return total energy.""" self.update(atoms) return self._energy_total_corr @_self_getter def get_free_energy(self, atoms): """Run CASTEP calculation if needed and return free energy. Only defined with smearing.""" self.update(atoms) return self._energy_free @_self_getter def get_0K_energy(self, atoms): """Run CASTEP calculation if needed and return 0K energy. Only defined with smearing.""" self.update(atoms) return self._energy_0K @_self_getter def get_potential_energy(self, atoms, force_consistent=False): # here for compatibility with ase/calculators/general.py # but accessing only _name variables """Return the total potential energy.""" self.update(atoms) if force_consistent: # Assumption: If no dispersion correction is applied, then the # respective value will default to None as initialized. if self._dispcorr_energy_free is not None: return self._dispcorr_energy_free else: return self._energy_free else: if self._energy_0K is not None: if self._dispcorr_energy_0K is not None: return self._dispcorr_energy_0K else: return self._energy_0K else: if self._dispcorr_energy_total is not None: return self._dispcorr_energy_total else: if self._energy_total_corr is not None: return self._energy_total_corr else: return self._energy_total @_self_getter def get_stress(self, atoms): """Return the stress.""" self.update(atoms) # modification: we return the Voigt form directly to get rid of the # annoying user warnings stress = np.array( [self._stress[0, 0], self._stress[1, 1], self._stress[2, 2], self._stress[1, 2], self._stress[0, 2], self._stress[0, 1]]) # return self._stress return stress @_self_getter def get_pressure(self, atoms): """Return the pressure.""" self.update(atoms) return self._pressure @_self_getter def get_unit_cell(self, atoms): """Return the unit cell.""" self.update(atoms) return self._unit_cell @_self_getter def get_kpoints(self, atoms): """Return the kpoints.""" self.update(atoms) return self._kpoints @_self_getter def get_number_cell_constraints(self, atoms): """Return the number of cell constraints.""" self.update(atoms) return self._number_of_cell_constraints @_self_getter def get_charges(self, atoms): """Run CASTEP calculation if needed and return Mulliken charges.""" self.update(atoms) return np.array(self._mulliken_charges) @_self_getter def get_magnetic_moments(self, atoms): """Run CASTEP calculation if needed and return Mulliken charges.""" self.update(atoms) return np.array(self._spins) def set_atoms(self, atoms): """Sets the atoms for the calculator and vice versa.""" atoms.pbc = [True, True, True] self.__dict__['atoms'] = atoms.copy() self.atoms._calc = self def update(self, atoms): """Checks if atoms object or calculator changed and runs calculation if so. """ if self.calculation_required(atoms): self.calculate(atoms) def calculation_required(self, atoms, _=None): """Checks wether anything changed in the atoms object or CASTEP settings since the last calculation using this instance. """ # SPR: what happens with the atoms parameter here? Why don't we use it? # from all that I can tell we need to compare against atoms instead of # self.atoms # if not self.atoms == self._old_atoms: if not atoms == self._old_atoms: return True if self._old_param is None or self._old_cell is None: return True if not self.param._options == self._old_param._options: return True if not self.cell._options == self._old_cell._options: return True return False def calculate(self, atoms): """Write all necessary input file and call CASTEP.""" self.prepare_input_files(atoms, force_write=self._force_write) if not self._prepare_input_only: self.run() self.read() # we need to push the old state here! # although run() pushes it, read() may change the atoms object # again. # yet, the old state is supposed to be the one AFTER read() self.push_oldstate() def push_oldstate(self): """This function pushes the current state of the (CASTEP) Atoms object onto the previous state. Or in other words after calling this function, calculation_required will return False and enquiry functions just report the current value, e.g. get_forces(), get_potential_energy(). """ # make a snapshot of all current input # to be able to test if recalculation # is necessary self._old_atoms = self.atoms.copy() self._old_param = deepcopy(self.param) self._old_cell = deepcopy(self.cell) def initialize(self, *args, **kwargs): """Just an alias for prepar_input_files to comply with standard function names in ASE. """ self.prepare_input_files(*args, **kwargs) def prepare_input_files(self, atoms=None, force_write=None): """Only writes the input .cell and .param files and return This can be useful if one quickly needs to prepare input files for a cluster where no python or ASE is available. One can than upload the file manually and read out the results using Castep().read(). """ if self.param.reuse.value is None: if self._pedantic: warnings.warn('You have not set e.g. calc.param.reuse = True. ' 'Reusing a previous calculation may save CPU time! ' 'The interface will make sure by default, a .check exists. ' 'file before adding this statement to the .param file.') if self.param.num_dump_cycles.value is None: if self._pedantic: warnings.warn('You have not set e.g. calc.param.num_dump_cycles = 0. ' 'This can save you a lot of disk space. One only needs ' '*wvfn* if electronic convergence is not achieved.') from ase.io.castep import write_param if atoms is None: atoms = self.atoms else: self.atoms = atoms if force_write is None: force_write = self._force_write # if we have new instance of the calculator, # move existing results out of the way, first if (os.path.isdir(self._directory) and self._calls == 0 and self._rename_existing_dir): if os.listdir(self._directory) == []: os.rmdir(self._directory) else: # rename appending creation date of the directory ctime = time.localtime(os.lstat(self._directory).st_ctime) os.rename(self._directory, '%s.bak-%s' % (self._directory, time.strftime('%Y%m%d-%H%M%S', ctime))) # create work directory if not os.path.isdir(self._directory): os.makedirs(self._directory, 0o775) # we do this every time, not only upon first call # if self._calls == 0: self._fetch_pspots() # if _try_reuse is requested and this # is not the first run, we try to find # the .check file from the previous run # this is only necessary if _track_output # is set to true if self._try_reuse and self._calls > 0: if os.path.exists(self._abs_path(self._check_file)): self.param.reuse = self._check_file elif os.path.exists(self._abs_path(self._castep_bin_file)): self.param.reuse = self._castep_bin_file self._seed = self._build_castep_seed() self._check_file = '%s.check' % self._seed self._castep_bin_file = '%s.castep_bin' % self._seed self._castep_file = self._abs_path('%s.castep' % self._seed) # write out the input file self._write_cell(self._abs_path('%s.cell' % self._seed), self.atoms, castep_cell=self.cell, force_write=force_write) if self._export_settings: interface_options = self._opt else: interface_options = None write_param(self._abs_path('%s.param' % self._seed), self.param, check_checkfile=self._check_checkfile, force_write=force_write, interface_options=interface_options,) def _build_castep_seed(self): """Abstracts to construction of the final castep with and without _tracking_output. """ if self._track_output: return '%s-%06d' % (self._label, self._calls) else: return '%s' % (self._label) def _abs_path(self, path): # Create an absolute path for a file to put in the working directory return os.path.join(self._directory, path) def run(self): """Simply call castep. If the first .err file contains text, this will be printed to the screen. """ # change to target directory self._calls += 1 # run castep itself stdout, stderr = shell_stdouterr('%s %s' % (self._castep_command, self._seed), cwd=self._directory) if stdout: print('castep call stdout:\n%s' % stdout) if stderr: print('castep call stderr:\n%s' % stderr) # shouldn't it be called after read()??? # self.push_oldstate() # check for non-empty error files err_file = self._abs_path('%s.0001.err' % self._seed) if os.path.exists(err_file): err_file = open(err_file) self._error = err_file.read() err_file.close() if self._error: raise RuntimeError(self._error) def __repr__(self): """Returns generic, fast to capture representation of CASTEP settings along with atoms object. """ expr = '' expr += '-----------------Atoms--------------------\n' if self.atoms is not None: expr += str('%20s\n' % self.atoms) else: expr += 'None\n' expr += '-----------------Param keywords-----------\n' expr += str(self.param) expr += '-----------------Cell keywords------------\n' expr += str(self.cell) expr += '-----------------Internal keys------------\n' for key in self.internal_keys: expr += '%20s : %s\n' % (key, self._opt[key]) return expr def __getattr__(self, attr): """___getattr___ gets overloaded to reroute the internal keys and to be able to easily store them in in the param so that they can be read in again in subsequent calls. """ if attr in self.internal_keys: return self._opt[attr] if attr in ['__repr__', '__str__']: raise AttributeError elif attr not in self.__dict__: raise AttributeError else: return self.__dict__[attr] def __setattr__(self, attr, value): """We overload the settattr method to make value assignment as pythonic as possible. Internal values all start with _. Value assigment is case insensitive! """ if attr.startswith('_'): # internal variables all start with _ # let's check first if they are close but not identical # to one of the switches, that the user accesses directly similars = difflib.get_close_matches(attr, self.internal_keys, cutoff=0.9) if attr not in self.internal_keys and similars: warnings.warn('Warning: You probably tried one of: %s but typed %s' % (similars, attr)) if attr in self.internal_keys: self._opt[attr] = value if attr == '_track_output': if value: self._try_reuse = True if self._pedantic: warnings.warn('You switched _track_output on. This will ' 'consume a lot of disk-space. The interface ' 'also switched _try_reuse on, which will ' 'try to find the last check file. Set ' '_try_reuse = False, if you need ' 'really separate calculations') elif '_try_reuse' in self._opt and self._try_reuse: self._try_reuse = False if self._pedantic: warnings.warn('_try_reuse is set to False, too') else: self.__dict__[attr] = value return elif attr in ['atoms', 'cell', 'param']: if value is not None: if attr == 'atoms' and not isinstance(value, ase.atoms.Atoms): raise TypeError( '%s is not an instance of ase.atoms.Atoms.' % value) elif attr == 'cell' and not isinstance(value, CastepCell): raise TypeError('%s is not an instance of CastepCell.' % value) elif attr == 'param' and not isinstance(value, CastepParam): raise TypeError('%s is not an instance of CastepParam.' % value) # These 3 are accepted right-away, no matter what self.__dict__[attr] = value return elif attr in self.atoms_obj_keys: # keywords which clearly belong to the atoms object are # rerouted to go there self.atoms.__dict__[attr] = value return elif attr in self.atoms_keys: # CASTEP keywords that should go into the atoms object # itself are blocked warnings.warn('Ignoring setings of "%s", since this has to be set ' 'through the atoms object' % attr) return attr = attr.lower() if attr not in (list(self.cell._options.keys()) + list(self.param._options.keys())): # what is left now should be meant to be a castep keyword # so we first check if it defined, and if not offer some error # correction if self._kw_tol == 0: similars = difflib.get_close_matches( attr, self.cell._options.keys() + self.param._options.keys()) if similars: raise UserWarning('Option "%s" not known! You mean "%s"?' % (attr, similars[0])) else: raise UserWarning('Option "%s" is not known!' % attr) else: warnings.warn('Option "%s" is not known - please set any new' ' options directly in the .cell or .param ' 'objects' % attr) return # here we know it must go into one of the component param or cell # so we first determine which one if attr in self.param._options.keys(): comp = 'param' elif attr in self.cell._options.keys(): comp = 'cell' else: raise UserWarning('Programming error: could not attach ' 'the keyword to an input file') self.__dict__[comp].__setattr__(attr, value) def merge_param(self, param, overwrite=True, ignore_internal_keys=False): """Parse a param file and merge it into the current parameters.""" if isinstance(param, CastepParam): for key, option in param._options.items(): if option.value is not None: self.param.__setattr__(key, option.value) return elif isinstance(param, str): param_file = open(param, 'r') _close = True else: # in this case we assume that we have a fileobj already, but check # for attributes in order to avoid extended EAFP blocks. param_file = param # look before you leap... attributes = ['name', 'close' 'readlines'] for attr in attributes: if not hasattr(param_file, attr): raise TypeError('"param" is neither CastepParam nor str ' 'nor valid fileobj') param = param_file.name _close = False self, int_opts = read_param(fd=param_file, calc=self, get_interface_options=True) # Add the interface options for k, val in int_opts.items(): if (k in self.internal_keys and not ignore_internal_keys): if val in _tf_table: val = _tf_table[val] self._opt[k] = val if _close: param_file.close() def dryrun_ok(self, dryrun_flag='-dryrun'): """Starts a CASTEP run with the -dryrun flag [default] in a temporary and check wether all variables are initialized correctly. This is recommended for every bigger simulation. """ from ase.io.castep import write_param temp_dir = tempfile.mkdtemp() self._fetch_pspots(temp_dir) seed = 'dryrun' self._write_cell(os.path.join(temp_dir, '%s.cell' % seed), self.atoms, castep_cell=self.cell) # This part needs to be modified now that we rely on the new formats.py # interface if not os.path.isfile(os.path.join(temp_dir, '%s.cell' % seed)): warnings.warn('%s.cell not written - aborting dryrun' % seed) return write_param(os.path.join(temp_dir, '%s.param' % seed), self.param, ) stdout, stderr = shell_stdouterr(('%s %s %s' % (self._castep_command, seed, dryrun_flag)), cwd=temp_dir) if stdout: print(stdout) if stderr: print(stderr) result_file = open(os.path.join(temp_dir, '%s.castep' % seed)) txt = result_file.read() ok_string = r'.*DRYRUN finished.*No problems found with input files.*' match = re.match(ok_string, txt, re.DOTALL) m = re.search(r'Number of kpoints used =\s*([0-9]+)', txt) if m: self._kpoints = int(m.group(1)) else: warnings.warn( 'Couldn\'t fetch number of kpoints from dryrun CASTEP file') err_file = os.path.join(temp_dir, '%s.0001.err' % seed) if match is None and os.path.exists(err_file): err_file = open(err_file) self._error = err_file.read() err_file.close() result_file.close() shutil.rmtree(temp_dir) # re.match return None is the string does not match return match is not None # this could go into the Atoms() class at some point... def _get_number_in_species(self, at, atoms=None): """Return the number of the atoms within the set of it own species. If you are an ASE commiter: why not move this into ase.atoms.Atoms ?""" if atoms is None: atoms = self.atoms numbers = atoms.get_atomic_numbers() n = numbers[at] nis = numbers.tolist()[:at + 1].count(n) return nis def _get_absolute_number(self, species, nic, atoms=None): """This is the inverse function to _get_number in species.""" if atoms is None: atoms = self.atoms ch = atoms.get_chemical_symbols() ch.reverse() total_nr = 0 assert nic > 0, 'Number in species needs to be 1 or larger' while True: if ch.pop() == species: if nic == 1: return total_nr nic -= 1 total_nr += 1 def _fetch_pspots(self, directory=None): """Put all specified pseudo-potentials into the working directory. """ # should be a '==' right? Otherwise setting _castep_pp_path is not # honored. if (not os.environ.get('PSPOT_DIR', None) and self._castep_pp_path == os.path.abspath('.')): # By default CASTEP consults the environment variable # PSPOT_DIR. If this contains a list of colon separated # directories it will check those directories for pseudo- # potential files if not in the current directory. # Thus if PSPOT_DIR is set there is nothing left to do. # If however PSPOT_DIR was been accidentally set # (e.g. with regards to a different program) # setting CASTEP_PP_PATH to an explicit value will # still be honored. return if directory is None: directory = self._directory if not os.path.isdir(self._castep_pp_path): warnings.warn('PSPs directory %s not found' % self._castep_pp_path) pspots = {} if self._find_pspots: self.find_pspots() if self.cell.species_pot.value is not None: for line in self.cell.species_pot.value.split('\n'): line = line.split() if line: pspots[line[0]] = line[1] for species in self.atoms.get_chemical_symbols(): if not pspots or species not in pspots.keys(): if self._build_missing_pspots: if self._pedantic: warnings.warn('Warning: you have no PP specified for %s. ' 'CASTEP will now generate an on-the-fly potentials. ' 'For sake of numerical consistency and efficiency ' 'this is discouraged.' % species) else: raise RuntimeError( 'Warning: you have no PP specified for %s.' % species) if self.cell.species_pot.value: for (species, pspot) in pspots.items(): orig_pspot_file = os.path.join(self._castep_pp_path, pspot) cp_pspot_file = os.path.join(directory, pspot) if (os.path.exists(orig_pspot_file) and not os.path.exists(cp_pspot_file)): if self._copy_pspots: shutil.copy(orig_pspot_file, directory) elif self._link_pspots: os.symlink(orig_pspot_file, cp_pspot_file) else: if self._pedantic: warnings.warn('Warning: PP files have neither been ' 'linked nor copied to the working directory. Make ' 'sure to set the evironment variable PSPOT_DIR ' 'accordingly!') def get_castep_version(castep_command): """This returns the version number as printed in the CASTEP banner. For newer CASTEP versions ( > 6.1) the --version command line option has been added; this will be attempted first. """ import tempfile with tempfile.TemporaryDirectory() as temp_dir: return _get_castep_version(castep_command, temp_dir) def _get_castep_version(castep_command, temp_dir): jname = 'dummy_jobname' stdout, stderr = '', '' fallback_version = 16. # CASTEP 16.0 and 16.1 report version wrongly try: stdout, stderr = subprocess.Popen( castep_command.split() + ['--version'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=temp_dir, universal_newlines=True).communicate() if 'CASTEP version' not in stdout: stdout, stderr = subprocess.Popen( castep_command.split() + [jname], stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=temp_dir, universal_newlines=True).communicate() except Exception: # XXX Which kind of exception? msg = '' msg += 'Could not determine the version of your CASTEP binary \n' msg += 'This usually means one of the following \n' msg += ' * you do not have CASTEP installed \n' msg += ' * you have not set the CASTEP_COMMAND to call it \n' msg += ' * you have provided a wrong CASTEP_COMMAND. \n' msg += ' Make sure it is in your PATH\n\n' msg += stdout msg += stderr raise CastepVersionError(msg) if 'CASTEP version' in stdout: output_txt = stdout.split('\n') version_re = re.compile(r'CASTEP version:\s*([0-9\.]*)') else: output = open(os.path.join(temp_dir, '%s.castep' % jname)) output_txt = output.readlines() output.close() version_re = re.compile(r'(?<=CASTEP version )[0-9.]*') # shutil.rmtree(temp_dir) for line in output_txt: if 'CASTEP version' in line: try: return float(version_re.findall(line)[0]) except ValueError: # Fallback for buggy --version on CASTEP 16.0, 16.1 return fallback_version def create_castep_keywords(castep_command, filename='castep_keywords.json', force_write=True, path='.', fetch_only=None): """This function allows to fetch all available keywords from stdout of an installed castep binary. It furthermore collects the documentation to harness the power of (ipython) inspection and type for some basic type checking of input. All information is stored in a JSON file that is not distributed by default to avoid breaking the license of CASTEP. """ # Takes a while ... # Fetch all allowed parameters # fetch_only : only fetch that many parameters (for testsuite only) suffixes = ['cell', 'param'] filepath = os.path.join(path, filename) if os.path.exists(filepath) and not force_write: warnings.warn('CASTEP Options Module file exists. ' 'You can overwrite it by calling ' 'python castep.py -f [CASTEP_COMMAND].') return False # Not saving directly to file her to prevent half-generated files # which will cause problems on future runs castep_version = get_castep_version(castep_command) help_all, _ = shell_stdouterr('%s -help all' % castep_command) # Filter out proper keywords try: # The old pattern does not math properly as in CASTEP as of v8.0 there # are some keywords for the semi-empircal dispersion correction (SEDC) # which also include numbers. if castep_version < 7.0: pattern = r'((?<=^ )[A-Z_]{2,}|(?<=^)[A-Z_]{2,})' else: pattern = r'((?<=^ )[A-Z_\d]{2,}|(?<=^)[A-Z_\d]{2,})' raw_options = re.findall(pattern, help_all, re.MULTILINE) except Exception: warnings.warn('Problem parsing: %s' % help_all) raise types = set() levels = set() processed_n = 0 to_process = len(raw_options[:fetch_only]) processed_options = {sf: {} for sf in suffixes} for o_i, option in enumerate(raw_options[:fetch_only]): doc, _ = shell_stdouterr('%s -help %s' % (castep_command, option)) # Stand Back! I know regular expressions (http://xkcd.com/208/) :-) match = re.match(r'(?P.*)Type: (?P.+?)\s+' + r'Level: (?P[^ ]+)\n\s*\n' + r'(?P.*?)(\n\s*\n|$)', doc, re.DOTALL) processed_n += 1 if match is not None: match = match.groupdict() # JM: uncomment lines in following block to debug issues # with keyword assignment during extraction process from CASTEP suffix = None if re.findall(r'PARAMETERS keywords:\n\n\s?None found', doc): suffix = 'cell' if re.findall(r'CELL keywords:\n\n\s?None found', doc): suffix = 'param' if suffix is None: warnings.warn('%s -> not assigned to either' ' CELL or PARAMETERS keywords' % option) option = option.lower() mtyp = match.get('type', None) mlvl = match.get('level', None) mdoc = match.get('doc', None) if mtyp is None: warnings.warn('Found no type for %s' % option) continue if mlvl is None: warnings.warn('Found no level for %s' % option) continue if mdoc is None: warnings.warn('Found no doc string for %s' % option) continue types = types.union([mtyp]) levels = levels.union([mlvl]) processed_options[suffix][option] = { 'keyword': option, 'option_type': mtyp, 'level': mlvl, 'docstring': mdoc } processed_n += 1 frac = (o_i + 1.0) / to_process sys.stdout.write('\rProcessed: [{0}] {1:>3.0f}%'.format( '#' * int(frac * 20) + ' ' * (20 - int(frac * 20)), 100 * frac)) sys.stdout.flush() else: warnings.warn('create_castep_keywords: Could not process %s' % option) sys.stdout.write('\n') sys.stdout.flush() processed_options['types'] = list(types) processed_options['levels'] = list(levels) processed_options['castep_version'] = castep_version json.dump(processed_options, open(filepath, 'w'), indent=4) warnings.warn('CASTEP v%s, fetched %s keywords' % (castep_version, processed_n)) return True class CastepOption: """"A CASTEP option. It handles basic conversions from string to its value type.""" default_convert_types = { 'boolean (logical)': 'bool', 'defined': 'bool', 'string': 'str', 'integer': 'int', 'real': 'float', 'integer vector': 'int_vector', 'real vector': 'float_vector', 'physical': 'float_physical', 'block': 'block' } def __init__(self, keyword, level, option_type, value=None, docstring='No information available'): self.keyword = keyword self.level = level self.type = option_type self._value = value self.__doc__ = docstring @property def value(self): if self._value is not None: if self.type.lower() in ('integer vector', 'real vector', 'physical'): return ' '.join(map(str, self._value)) elif self.type.lower() in ('boolean (logical)', 'defined'): return str(self._value).upper() else: return str(self._value) @property def raw_value(self): # The value, not converted to a string return self._value @value.setter # type: ignore def value(self, val): if val is None: self.clear() return ctype = self.default_convert_types.get(self.type.lower(), 'str') typeparse = '_parse_%s' % ctype try: self._value = getattr(self, typeparse)(val) except ValueError: raise ConversionError(ctype, self.keyword, val) def clear(self): """Reset the value of the option to None again""" self._value = None @staticmethod def _parse_bool(value): try: value = _tf_table[str(value).strip().title()] except (KeyError, ValueError): raise ValueError() return value @staticmethod def _parse_str(value): value = str(value) return value @staticmethod def _parse_int(value): value = int(value) return value @staticmethod def _parse_float(value): value = float(value) return value @staticmethod def _parse_int_vector(value): # Accepts either a string or an actual list/numpy array of ints if isinstance(value, str): if ',' in value: value = value.replace(',', ' ') value = list(map(int, value.split())) value = np.array(value) if value.shape != (3,) or value.dtype != int: raise ValueError() return list(value) @staticmethod def _parse_float_vector(value): # Accepts either a string or an actual list/numpy array of floats if isinstance(value, str): if ',' in value: value = value.replace(',', ' ') value = list(map(float, value.split())) value = np.array(value) * 1.0 if value.shape != (3,) or value.dtype != float: raise ValueError() return list(value) @staticmethod def _parse_float_physical(value): # If this is a string containing units, saves them if isinstance(value, str): value = value.split() try: l = len(value) except TypeError: l = 1 value = [value] if l == 1: try: value = (float(value[0]), '') except (TypeError, ValueError): raise ValueError() elif l == 2: try: value = (float(value[0]), value[1]) except (TypeError, ValueError, IndexError): raise ValueError() else: raise ValueError() return value @staticmethod def _parse_block(value): if isinstance(value, str): return value elif hasattr(value, '__getitem__'): return '\n'.join(value) # Arrays of lines else: raise ValueError() def __repr__(self): if self._value: expr = ('Option: {keyword}({type}, {level}):\n{_value}\n' ).format(**self.__dict__) else: expr = ('Option: {keyword}[unset]({type}, {level})' ).format(**self.__dict__) return expr def __eq__(self, other): if not isinstance(other, CastepOption): return False else: return self.__dict__ == other.__dict__ class CastepOptionDict: """A dictionary-like object to hold a set of options for .cell or .param files loaded from a dictionary, for the sake of validation. Replaces the old CastepCellDict and CastepParamDict that were defined in the castep_keywords.py file. """ def __init__(self, options=None): object.__init__(self) self._options = {} # ComparableDict is not needed any more as # CastepOptions can be compared directly now for kw in options: opt = CastepOption(**options[kw]) self._options[opt.keyword] = opt self.__dict__[opt.keyword] = opt class CastepInputFile: """Master class for CastepParam and CastepCell to inherit from""" _keyword_conflicts: List[Set[str]] = [] def __init__(self, options_dict=None, keyword_tolerance=1): object.__init__(self) if options_dict is None: options_dict = CastepOptionDict({}) self._options = options_dict._options self.__dict__.update(self._options) # keyword_tolerance means how strict the checks on new attributes are # 0 = no new attributes allowed # 1 = new attributes allowed, warning given # 2 = new attributes allowed, silent self._perm = np.clip(keyword_tolerance, 0, 2) # Compile a dictionary for quick check of conflict sets self._conflict_dict = {kw: set(cset).difference({kw}) for cset in self._keyword_conflicts for kw in cset} def __repr__(self): expr = '' is_default = True for key, option in sorted(self._options.items()): if option.value is not None: is_default = False expr += ('%20s : %s\n' % (key, option.value)) if is_default: expr = 'Default\n' expr += 'Keyword tolerance: {0}'.format(self._perm) return expr def __setattr__(self, attr, value): # Hidden attributes are treated normally if attr.startswith('_'): self.__dict__[attr] = value return if attr not in self._options.keys(): if self._perm > 0: # Do we consider it a string or a block? is_str = isinstance(value, str) is_block = False if ((hasattr(value, '__getitem__') and not is_str) or (is_str and len(value.split('\n')) > 1)): is_block = True if self._perm == 0: similars = difflib.get_close_matches(attr, self._options.keys()) if similars: raise UserWarning(('Option "%s" not known! You mean "%s"?') % (attr, similars[0])) else: raise UserWarning('Option "%s" is not known!' % attr) elif self._perm == 1: warnings.warn(('Option "%s" is not known and will ' 'be added as a %s') % (attr, ('block' if is_block else 'string'))) attr = attr.lower() opt = CastepOption(keyword=attr, level='Unknown', option_type='block' if is_block else 'string') self._options[attr] = opt self.__dict__[attr] = opt else: attr = attr.lower() opt = self._options[attr] if not opt.type.lower() == 'block' and isinstance(value, str): value = value.replace(':', ' ') # If it is, use the appropriate parser, unless a custom one is defined attrparse = '_parse_%s' % attr.lower() # Check for any conflicts if the value is not None if not (value is None): cset = self._conflict_dict.get(attr.lower(), {}) for c in cset: if (c in self._options and self._options[c].value): warnings.warn( 'option "{attr}" conflicts with "{conflict}" in ' 'calculator. Setting "{conflict}" to ' 'None.'.format(attr=attr, conflict=c)) self._options[c].value = None if hasattr(self, attrparse): self._options[attr].value = self.__getattribute__(attrparse)(value) else: self._options[attr].value = value def __getattr__(self, name): if name[0] == '_' or self._perm == 0: raise AttributeError() if self._perm == 1: warnings.warn('Option %s is not known, returning None' % (name)) return CastepOption(keyword='none', level='Unknown', option_type='string', value=None) def get_attr_dict(self, raw=False, types=False): """Settings that go into .param file in a traditional dict""" attrdict = {k: o.raw_value if raw else o.value for k, o in self._options.items() if o.value is not None} if types: for key, val in attrdict.items(): attrdict[key] = (val, self._options[key].type) return attrdict class CastepParam(CastepInputFile): """CastepParam abstracts the settings that go into the .param file""" _keyword_conflicts = [{'cut_off_energy', 'basis_precision'}, ] def __init__(self, castep_keywords, keyword_tolerance=1): self._castep_version = castep_keywords.castep_version CastepInputFile.__init__(self, castep_keywords.CastepParamDict(), keyword_tolerance) @property def castep_version(self): return self._castep_version # .param specific parsers def _parse_reuse(self, value): if value is None: return None # Reset the value try: if self._options['continuation'].value: warnings.warn('Cannot set reuse if continuation is set, and ' 'vice versa. Set the other to None, if you want ' 'this setting.') return None except KeyError: pass return 'default' if (value is True) else str(value) def _parse_continuation(self, value): if value is None: return None # Reset the value try: if self._options['reuse'].value: warnings.warn('Cannot set reuse if continuation is set, and ' 'vice versa. Set the other to None, if you want ' 'this setting.') return None except KeyError: pass return 'default' if (value is True) else str(value) class CastepCell(CastepInputFile): """CastepCell abstracts all setting that go into the .cell file""" _keyword_conflicts = [ {'kpoint_mp_grid', 'kpoint_mp_spacing', 'kpoint_list', 'kpoints_mp_grid', 'kpoints_mp_spacing', 'kpoints_list'}, {'bs_kpoint_mp_grid', 'bs_kpoint_mp_spacing', 'bs_kpoint_list', 'bs_kpoint_path', 'bs_kpoints_mp_grid', 'bs_kpoints_mp_spacing', 'bs_kpoints_list', 'bs_kpoints_path'}, {'spectral_kpoint_mp_grid', 'spectral_kpoint_mp_spacing', 'spectral_kpoint_list', 'spectral_kpoint_path', 'spectral_kpoints_mp_grid', 'spectral_kpoints_mp_spacing', 'spectral_kpoints_list', 'spectral_kpoints_path'}, {'phonon_kpoint_mp_grid', 'phonon_kpoint_mp_spacing', 'phonon_kpoint_list', 'phonon_kpoint_path', 'phonon_kpoints_mp_grid', 'phonon_kpoints_mp_spacing', 'phonon_kpoints_list', 'phonon_kpoints_path'}, {'fine_phonon_kpoint_mp_grid', 'fine_phonon_kpoint_mp_spacing', 'fine_phonon_kpoint_list', 'fine_phonon_kpoint_path'}, {'magres_kpoint_mp_grid', 'magres_kpoint_mp_spacing', 'magres_kpoint_list', 'magres_kpoint_path'}, {'elnes_kpoint_mp_grid', 'elnes_kpoint_mp_spacing', 'elnes_kpoint_list', 'elnes_kpoint_path'}, {'optics_kpoint_mp_grid', 'optics_kpoint_mp_spacing', 'optics_kpoint_list', 'optics_kpoint_path'}, {'supercell_kpoint_mp_grid', 'supercell_kpoint_mp_spacing', 'supercell_kpoint_list', 'supercell_kpoint_path'}, ] def __init__(self, castep_keywords, keyword_tolerance=1): self._castep_version = castep_keywords.castep_version CastepInputFile.__init__(self, castep_keywords.CastepCellDict(), keyword_tolerance) @property def castep_version(self): return self._castep_version # .cell specific parsers def _parse_species_pot(self, value): # Single tuple if isinstance(value, tuple) and len(value) == 2: value = [value] # List of tuples if hasattr(value, '__getitem__'): pspots = [tuple(map(str.strip, x)) for x in value] if not all(map(lambda x: len(x) == 2, value)): warnings.warn('Please specify pseudopotentials in python as ' 'a tuple or a list of tuples formatted like: ' '(species, file), e.g. ("O", "path-to/O_OTFG.usp") ' 'Anything else will be ignored') return None text_block = self._options['species_pot'].value text_block = text_block if text_block else '' # Remove any duplicates for pp in pspots: text_block = re.sub(r'\n?\s*%s\s+.*' % pp[0], '', text_block) if pp[1]: text_block += '\n%s %s' % pp return text_block def _parse_symmetry_ops(self, value): if not isinstance(value, tuple) \ or not len(value) == 2 \ or not value[0].shape[1:] == (3, 3) \ or not value[1].shape[1:] == (3,) \ or not value[0].shape[0] == value[1].shape[0]: warnings.warn('Invalid symmetry_ops block, skipping') return # Now on to print... text_block = '' for op_i, (op_rot, op_tranls) in enumerate(zip(*value)): text_block += '\n'.join([' '.join([str(x) for x in row]) for row in op_rot]) text_block += '\n' text_block += ' '.join([str(x) for x in op_tranls]) text_block += '\n\n' return text_block def _parse_positions_abs_intermediate(self, value): return _parse_tss_block(value) def _parse_positions_abs_product(self, value): return _parse_tss_block(value) def _parse_positions_frac_intermediate(self, value): return _parse_tss_block(value, True) def _parse_positions_frac_product(self, value): return _parse_tss_block(value, True) CastepKeywords = namedtuple('CastepKeywords', ['CastepParamDict', 'CastepCellDict', 'types', 'levels', 'castep_version']) # We keep this just for naming consistency with older versions def make_cell_dict(data=None): data = data if data is not None else {} class CastepCellDict(CastepOptionDict): def __init__(self): CastepOptionDict.__init__(self, data) return CastepCellDict def make_param_dict(data=None): data = data if data is not None else {} class CastepParamDict(CastepOptionDict): def __init__(self): CastepOptionDict.__init__(self, data) return CastepParamDict class CastepVersionError(Exception): """No special behaviour, works to signal when Castep can not be found""" pass class ConversionError(Exception): """Print customized error for options that are not converted correctly and point out that they are maybe not implemented, yet""" def __init__(self, key_type, attr, value): Exception.__init__(self) self.key_type = key_type self.value = value self.attr = attr def __str__(self): return 'Could not convert %s = %s to %s\n' \ % (self.attr, self.value, self.key_type) \ + 'This means you either tried to set a value of the wrong\n'\ + 'type or this keyword needs some special care. Please feel\n'\ + 'to add it to the corresponding __setattr__ method and send\n'\ + 'the patch to %s, so we can all benefit.' % (contact_email) def get_castep_pp_path(castep_pp_path=''): """Abstract the quest for a CASTEP PSP directory.""" if castep_pp_path: return os.path.abspath(os.path.expanduser(castep_pp_path)) elif 'PSPOT_DIR' in os.environ: return os.environ['PSPOT_DIR'] elif 'CASTEP_PP_PATH' in os.environ: return os.environ['CASTEP_PP_PATH'] else: return os.path.abspath('.') def get_castep_command(castep_command=''): """Abstract the quest for a castep_command string.""" if castep_command: return castep_command elif 'CASTEP_COMMAND' in os.environ: return os.environ['CASTEP_COMMAND'] else: return 'castep' def shell_stdouterr(raw_command, cwd=None): """Abstracts the standard call of the commandline, when we are only interested in the stdout and stderr """ stdout, stderr = subprocess.Popen(raw_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True, cwd=cwd).communicate() return stdout.strip(), stderr.strip() def import_castep_keywords(castep_command='', filename='castep_keywords.json', path='.'): # Search for castep_keywords.json (or however it's called) in multiple # paths searchpaths = [path, os.path.expanduser('~/.ase'), os.path.join(ase.__path__[0], 'calculators')] try: kwfile = sum([glob.glob(os.path.join(sp, filename)) for sp in searchpaths], [])[0] except IndexError: warnings.warn("""Generating CASTEP keywords JSON file... hang on. The CASTEP keywords JSON file contains abstractions for CASTEP input parameters (for both .cell and .param input files), including some format checks and descriptions. The latter are extracted from the internal online help facility of a CASTEP binary, thus allowing to easily keep the calculator synchronized with (different versions of) the CASTEP code. Consequently, avoiding licensing issues (CASTEP is distributed commercially by accelrys), we consider it wise not to provide the file in the first place.""") create_castep_keywords(get_castep_command(castep_command), filename=filename, path=path) warnings.warn('Stored %s in %s. Copy it to your ASE installation under ' 'ase/calculators for system-wide installation. Using a *nix ' 'OS this can be a simple as mv %s %s' % (filename, os.path.abspath(path), os.path.join(os.path.abspath(path), filename), os.path.join(os.path.dirname(ase.__file__), 'calculators'))) kwfile = os.path.join(path, filename) # Now create the castep_keywords object proper kwdata = json.load(open(kwfile)) # This is a bit awkward, but it's necessary for backwards compatibility param_dict = make_param_dict(kwdata['param']) cell_dict = make_cell_dict(kwdata['cell']) castep_keywords = CastepKeywords(param_dict, cell_dict, kwdata['types'], kwdata['levels'], kwdata['castep_version']) return castep_keywords if __name__ == '__main__': warnings.warn('When called directly this calculator will fetch all available ' 'keywords from the binarys help function into a ' 'castep_keywords.json in the current directory %s ' 'For system wide usage, it can be copied into an ase installation ' 'at ASE/calculators. ' 'This castep_keywords.json usually only needs to be generated once ' 'for a CASTEP binary/CASTEP version.' % os.getcwd()) import optparse parser = optparse.OptionParser() parser.add_option( '-f', '--force-write', dest='force_write', help='Force overwriting existing castep_keywords.json', default=False, action='store_true') (options, args) = parser.parse_args() if args: opt_castep_command = ''.join(args) else: opt_castep_command = '' generated = create_castep_keywords(get_castep_command(opt_castep_command), force_write=options.force_write) if generated: try: with open('castep_keywords.json') as fd: json.load(fd) except Exception as e: warnings.warn( '%s Ooops, something went wrong with the CASTEP keywords' % e) else: warnings.warn('Import works. Looking good!') ase-3.22.1/ase/calculators/checkpoint.py000066400000000000000000000244261415166253600201550ustar00rootroot00000000000000"""Checkpointing and restart functionality for scripts using ASE Atoms objects. Initialize checkpoint object: CP = Checkpoint('checkpoints.db') Checkpointed code block in try ... except notation: try: a, C, C_err = CP.load() except NoCheckpoint: C, C_err = fit_elastic_constants(a) CP.save(a, C, C_err) Checkpoint code block, shorthand notation: C, C_err = CP(fit_elastic_constants)(a) Example for checkpointing within an iterative loop, e.g. for searching crack tip position: try: a, converged, tip_x, tip_y = CP.load() except NoCheckpoint: converged = False tip_x = tip_x0 tip_y = tip_y0 while not converged: ... do something to find better crack tip position ... converged = ... CP.flush(a, converged, tip_x, tip_y) The simplest way to use checkpointing is through the CheckpointCalculator. It wraps any calculator object and does a checkpoint whenever a calculation is performed: calc = ... cp_calc = CheckpointCalculator(calc) atoms.calc = cp_calc e = atoms.get_potential_energy() # 1st time, does calc, writes to checkfile # subsequent runs, reads from checkpoint """ from typing import Dict, Any import numpy as np import ase from ase.db import connect from ase.calculators.calculator import Calculator class NoCheckpoint(Exception): pass class DevNull: def write(str, *args): pass class Checkpoint: _value_prefix = '_values_' def __init__(self, db='checkpoints.db', logfile=None): self.db = db if logfile is None: logfile = DevNull() self.logfile = logfile self.checkpoint_id = [0] self.in_checkpointed_region = False def __call__(self, func, *args, **kwargs): checkpoint_func_name = str(func) def decorated_func(*args, **kwargs): # Get the first ase.Atoms object. atoms = None for a in args: if atoms is None and isinstance(a, ase.Atoms): atoms = a try: retvals = self.load(atoms=atoms) except NoCheckpoint: retvals = func(*args, **kwargs) if isinstance(retvals, tuple): self.save(*retvals, atoms=atoms, checkpoint_func_name=checkpoint_func_name) else: self.save(retvals, atoms=atoms, checkpoint_func_name=checkpoint_func_name) return retvals return decorated_func def _increase_checkpoint_id(self): if self.in_checkpointed_region: self.checkpoint_id += [1] else: self.checkpoint_id[-1] += 1 self.logfile.write('Entered checkpoint region ' '{0}.\n'.format(self.checkpoint_id)) self.in_checkpointed_region = True def _decrease_checkpoint_id(self): self.logfile.write('Leaving checkpoint region ' '{0}.\n'.format(self.checkpoint_id)) if not self.in_checkpointed_region: self.checkpoint_id = self.checkpoint_id[:-1] assert len(self.checkpoint_id) >= 1 self.in_checkpointed_region = False assert self.checkpoint_id[-1] >= 1 def _mangled_checkpoint_id(self): """ Returns a mangled checkpoint id string: check_c_1:c_2:c_3:... E.g. if checkpoint is nested and id is [3,2,6] it returns: 'check3:2:6' """ return 'check' + ':'.join(str(id) for id in self.checkpoint_id) def load(self, atoms=None): """ Retrieve checkpoint data from file. If atoms object is specified, then the calculator connected to that object is copied to all returning atoms object. Returns tuple of values as passed to flush or save during checkpoint write. """ self._increase_checkpoint_id() retvals = [] with connect(self.db) as db: try: dbentry = db.get(checkpoint_id=self._mangled_checkpoint_id()) except KeyError: raise NoCheckpoint data = dbentry.data atomsi = data['checkpoint_atoms_args_index'] i = 0 while (i == atomsi or '{0}{1}'.format(self._value_prefix, i) in data): if i == atomsi: newatoms = dbentry.toatoms() if atoms is not None: # Assign calculator newatoms.calc = atoms.calc retvals += [newatoms] else: retvals += [data['{0}{1}'.format(self._value_prefix, i)]] i += 1 self.logfile.write('Successfully restored checkpoint ' '{0}.\n'.format(self.checkpoint_id)) self._decrease_checkpoint_id() if len(retvals) == 1: return retvals[0] else: return tuple(retvals) def _flush(self, *args, **kwargs): data = dict(('{0}{1}'.format(self._value_prefix, i), v) for i, v in enumerate(args)) try: atomsi = [isinstance(v, ase.Atoms) for v in args].index(True) atoms = args[atomsi] del data['{0}{1}'.format(self._value_prefix, atomsi)] except ValueError: atomsi = -1 try: atoms = kwargs['atoms'] except KeyError: raise RuntimeError('No atoms object provided in arguments.') try: del kwargs['atoms'] except KeyError: pass data['checkpoint_atoms_args_index'] = atomsi data.update(kwargs) with connect(self.db) as db: try: dbentry = db.get(checkpoint_id=self._mangled_checkpoint_id()) del db[dbentry.id] except KeyError: pass db.write(atoms, checkpoint_id=self._mangled_checkpoint_id(), data=data) self.logfile.write('Successfully stored checkpoint ' '{0}.\n'.format(self.checkpoint_id)) def flush(self, *args, **kwargs): """ Store data to a checkpoint without increasing the checkpoint id. This is useful to continuously update the checkpoint state in an iterative loop. """ # If we are flushing from a successfully restored checkpoint, then # in_checkpointed_region will be set to False. We need to reset to True # because a call to flush indicates that this checkpoint is still # active. self.in_checkpointed_region = False self._flush(*args, **kwargs) def save(self, *args, **kwargs): """ Store data to a checkpoint and increase the checkpoint id. This closes the checkpoint. """ self._decrease_checkpoint_id() self._flush(*args, **kwargs) def atoms_almost_equal(a, b, tol=1e-9): return (np.abs(a.positions - b.positions).max() < tol and (a.numbers == b.numbers).all() and np.abs(a.cell - b.cell).max() < tol and (a.pbc == b.pbc).all()) class CheckpointCalculator(Calculator): """ This wraps any calculator object to checkpoint whenever a calculation is performed. This is particularly useful for expensive calculators, e.g. DFT and allows usage of complex workflows. Example usage: calc = ... cp_calc = CheckpointCalculator(calc) atoms.calc = cp_calc e = atoms.get_potential_energy() # 1st time, does calc, writes to checkfile # subsequent runs, reads from checkpoint file """ implemented_properties = ase.calculators.calculator.all_properties default_parameters: Dict[str, Any] = {} name = 'CheckpointCalculator' property_to_method_name = { 'energy': 'get_potential_energy', 'energies': 'get_potential_energies', 'forces': 'get_forces', 'stress': 'get_stress', 'stresses': 'get_stresses'} def __init__(self, calculator, db='checkpoints.db', logfile=None): Calculator.__init__(self) self.calculator = calculator if logfile is None: logfile = DevNull() self.checkpoint = Checkpoint(db, logfile) self.logfile = logfile def calculate(self, atoms, properties, system_changes): Calculator.calculate(self, atoms, properties, system_changes) try: results = self.checkpoint.load(atoms) prev_atoms, results = results[0], results[1:] try: assert atoms_almost_equal(atoms, prev_atoms) except AssertionError: raise AssertionError('mismatch between current atoms and ' 'those read from checkpoint file') self.logfile.write('retrieved results for {0} from checkpoint\n' .format(properties)) # save results in calculator for next time if isinstance(self.calculator, Calculator): if not hasattr(self.calculator, 'results'): self.calculator.results = {} self.calculator.results.update(dict(zip(properties, results))) except NoCheckpoint: if isinstance(self.calculator, Calculator): self.logfile.write('doing calculation of {0} with new-style ' 'calculator interface\n'.format(properties)) self.calculator.calculate(atoms, properties, system_changes) results = [self.calculator.results[prop] for prop in properties] else: self.logfile.write('doing calculation of {0} with old-style ' 'calculator interface\n'.format(properties)) results = [] for prop in properties: method_name = self.property_to_method_name[prop] method = getattr(self.calculator, method_name) results.append(method(atoms)) _calculator = atoms.calc try: atoms.calc = self.calculator self.checkpoint.save(atoms, *results) finally: atoms.calc = _calculator self.results = dict(zip(properties, results)) ase-3.22.1/ase/calculators/combine_mm.py000066400000000000000000000245201415166253600201260ustar00rootroot00000000000000import numpy as np from ase.calculators.calculator import Calculator from ase.calculators.qmmm import combine_lj_lorenz_berthelot from ase import units import copy k_c = units.Hartree * units.Bohr class CombineMM(Calculator): implemented_properties = ['energy', 'forces'] def __init__(self, idx, apm1, apm2, calc1, calc2, sig1, eps1, sig2, eps2, rc=7.0, width=1.0): """A calculator that combines two MM calculators (TIPnP, Counterions, ...) parameters: idx: List of indices of atoms belonging to calculator 1 apm1,2: atoms pr molecule of each subsystem (NB: apm for TIP4P is 3!) calc1,2: calculator objects for each subsystem sig1,2, eps1,2: LJ parameters for each subsystem. Should be a numpy array of length = apm rc = long range cutoff width = width of cutoff region. Currently the interactions are limited to being: - Nonbonded - Hardcoded to two terms: - Coulomb electrostatics - Lennard-Jones It could of course benefit from being more like the EIQMMM class where the interactions are switchable. But this is in princple just meant for adding counter ions to a qmmm simulation to neutralize the charge of the total systemn Maybe it can combine n MM calculators in the future? """ self.idx = idx self.apm1 = apm1 # atoms per mol for LJ calculator self.apm2 = apm2 self.rc = rc self.width = width self.atoms1 = None self.atoms2 = None self.mask = None self.calc1 = calc1 self.calc2 = calc2 self.sig1 = sig1 self.eps1 = eps1 self.sig2 = sig2 self.eps2 = eps2 Calculator.__init__(self) def initialize(self, atoms): self.mask = np.zeros(len(atoms), bool) self.mask[self.idx] = True constraints = atoms.constraints atoms.constraints = [] self.atoms1 = atoms[self.mask] self.atoms2 = atoms[~self.mask] atoms.constraints = constraints self.atoms1.calc = self.calc1 self.atoms2.calc = self.calc2 self.cell = atoms.cell self.pbc = atoms.pbc self.sigma, self.epsilon =\ combine_lj_lorenz_berthelot(self.sig1, self.sig2, self.eps1, self.eps2) self.make_virtual_mask() def calculate(self, atoms, properties, system_changes): Calculator.calculate(self, atoms, properties, system_changes) if self.atoms1 is None: self.initialize(atoms) pos1 = atoms.positions[self.mask] pos2 = atoms.positions[~self.mask] self.atoms1.set_positions(pos1) self.atoms2.set_positions(pos2) # positions and charges for the coupling term, which should # include virtual charges and sites: spm1 = self.atoms1.calc.sites_per_mol spm2 = self.atoms2.calc.sites_per_mol xpos1 = self.atoms1.calc.add_virtual_sites(pos1) xpos2 = self.atoms2.calc.add_virtual_sites(pos2) xc1 = self.atoms1.calc.get_virtual_charges(self.atoms1) xc2 = self.atoms2.calc.get_virtual_charges(self.atoms2) xpos1 = xpos1.reshape((-1, spm1, 3)) xpos2 = xpos2.reshape((-1, spm2, 3)) e_c, f_c = self.coulomb(xpos1, xpos2, xc1, xc2, spm1, spm2) e_lj, f1, f2 = self.lennard_jones(self.atoms1, self.atoms2) f_lj = np.zeros((len(atoms), 3)) f_lj[self.mask] += f1 f_lj[~self.mask] += f2 # internal energy, forces of each subsystem: f12 = np.zeros((len(atoms), 3)) e1 = self.atoms1.get_potential_energy() fi1 = self.atoms1.get_forces() e2 = self.atoms2.get_potential_energy() fi2 = self.atoms2.get_forces() f12[self.mask] += fi1 f12[~self.mask] += fi2 self.results['energy'] = e_c + e_lj + e1 + e2 self.results['forces'] = f_c + f_lj + f12 def get_virtual_charges(self, atoms): if self.atoms1 is None: self.initialize(atoms) vc1 = self.atoms1.calc.get_virtual_charges(atoms[self.mask]) vc2 = self.atoms2.calc.get_virtual_charges(atoms[~self.mask]) # Need to expand mask with possible new virtual sites. # Virtual sites should ALWAYS be put AFTER actual atoms, like in # TIP4P: OHHX, OHHX, ... vc = np.zeros(len(vc1) + len(vc2)) vc[self.virtual_mask] = vc1 vc[~self.virtual_mask] = vc2 return vc def add_virtual_sites(self, positions): vs1 = self.atoms1.calc.add_virtual_sites(positions[self.mask]) vs2 = self.atoms2.calc.add_virtual_sites(positions[~self.mask]) vs = np.zeros((len(vs1) + len(vs2), 3)) vs[self.virtual_mask] = vs1 vs[~self.virtual_mask] = vs2 return vs def make_virtual_mask(self): virtual_mask = [] ct1 = 0 ct2 = 0 for i in range(len(self.mask)): virtual_mask.append(self.mask[i]) if self.mask[i]: ct1 += 1 if not self.mask[i]: ct2 += 1 if ((ct2 == self.apm2) & (self.apm2 != self.atoms2.calc.sites_per_mol)): virtual_mask.append(False) ct2 = 0 if ((ct1 == self.apm1) & (self.apm1 != self.atoms1.calc.sites_per_mol)): virtual_mask.append(True) ct1 = 0 self.virtual_mask = np.array(virtual_mask) def coulomb(self, xpos1, xpos2, xc1, xc2, spm1, spm2): energy = 0.0 forces = np.zeros((len(xc1) + len(xc2), 3)) self.xpos1 = xpos1 self.xpos2 = xpos2 R1 = xpos1 R2 = xpos2 F1 = np.zeros_like(R1) F2 = np.zeros_like(R2) C1 = xc1.reshape((-1, np.shape(xpos1)[1])) C2 = xc2.reshape((-1, np.shape(xpos2)[1])) # Vectorized evaluation is not as trivial when spm1 != spm2. # This is pretty inefficient, but for ~1-5 counter ions as region 1 # it should not matter much .. # There is definitely room for improvements here. cell = self.cell.diagonal() for m1, (r1, c1) in enumerate(zip(R1, C1)): for m2, (r2, c2) in enumerate(zip(R2, C2)): r00 = r2[0] - r1[0] shift = np.zeros(3) for i, periodic in enumerate(self.pbc): if periodic: L = cell[i] shift[i] = (r00[i] + L / 2.) % L - L / 2. - r00[i] r00 += shift d00 = (r00**2).sum()**0.5 t = 1 dtdd = 0 if d00 > self.rc: continue elif d00 > self.rc - self.width: y = (d00 - self.rc + self.width) / self.width t -= y**2 * (3.0 - 2.0 * y) dtdd = r00 * 6 * y * (1.0 - y) / (self.width * d00) for a1 in range(spm1): for a2 in range(spm2): r = r2[a2] - r1[a1] + shift d2 = (r**2).sum() d = d2**0.5 e = k_c * c1[a1] * c2[a2] / d energy += t * e F1[m1, a1] -= t * (e / d2) * r F2[m2, a2] += t * (e / d2) * r F1[m1, 0] -= dtdd * e F2[m2, 0] += dtdd * e F1 = F1.reshape((-1, 3)) F2 = F2.reshape((-1, 3)) # Redist forces but dont save forces in org calculators atoms1 = self.atoms1.copy() atoms1.calc = copy.copy(self.calc1) atoms1.calc.atoms = atoms1 F1 = atoms1.calc.redistribute_forces(F1) atoms2 = self.atoms2.copy() atoms2.calc = copy.copy(self.calc2) atoms2.calc.atoms = atoms2 F2 = atoms2.calc.redistribute_forces(F2) forces = np.zeros((len(self.atoms), 3)) forces[self.mask] = F1 forces[~self.mask] = F2 return energy, forces def lennard_jones(self, atoms1, atoms2): pos1 = atoms1.get_positions().reshape((-1, self.apm1, 3)) pos2 = atoms2.get_positions().reshape((-1, self.apm2, 3)) f1 = np.zeros_like(atoms1.positions) f2 = np.zeros_like(atoms2.positions) energy = 0.0 cell = self.cell.diagonal() for q, p1 in enumerate(pos1): # molwise loop eps = self.epsilon sig = self.sigma R00 = pos2[:, 0] - p1[0, :] # cutoff from first atom of each mol shift = np.zeros_like(R00) for i, periodic in enumerate(self.pbc): if periodic: L = cell[i] shift[:, i] = (R00[:, i] + L / 2) % L - L / 2 - R00[:, i] R00 += shift d002 = (R00**2).sum(1) d00 = d002**0.5 x1 = d00 > self.rc - self.width x2 = d00 < self.rc x12 = np.logical_and(x1, x2) y = (d00[x12] - self.rc + self.width) / self.width t = np.zeros(len(d00)) t[x2] = 1.0 t[x12] -= y**2 * (3.0 - 2.0 * y) dt = np.zeros(len(d00)) dt[x12] -= 6.0 / self.width * y * (1.0 - y) for qa in range(len(p1)): if ~np.any(eps[qa, :]): continue R = pos2 - p1[qa, :] + shift[:, None] d2 = (R**2).sum(2) c6 = (sig[qa, :]**2 / d2)**3 c12 = c6**2 e = 4 * eps[qa, :] * (c12 - c6) energy += np.dot(e.sum(1), t) f = t[:, None, None] * (24 * eps[qa, :] * (2 * c12 - c6) / d2)[:, :, None] * R f00 = - (e.sum(1) * dt / d00)[:, None] * R00 f2 += f.reshape((-1, 3)) f1[q * self.apm1 + qa, :] -= f.sum(0).sum(0) f1[q * self.apm1, :] -= f00.sum(0) f2[::self.apm2, :] += f00 return energy, f1, f2 def redistribute_forces(self, forces): f1 = self.calc1.redistribute_forces(forces[self.virtual_mask]) f2 = self.calc2.redistribute_forces(forces[~self.virtual_mask]) # and then they are back on the real atom centers so f = np.zeros((len(self.atoms), 3)) f[self.mask] = f1 f[~self.mask] = f2 return f ase-3.22.1/ase/calculators/counterions.py000066400000000000000000000053301415166253600203670ustar00rootroot00000000000000import numpy as np from ase.calculators.calculator import Calculator from ase import units k_c = units.Hartree * units.Bohr class AtomicCounterIon(Calculator): implemented_properties = ['energy', 'forces'] def __init__(self, charge, epsilon, sigma, sites_per_mol=1, rc=7.0, width=1.0): """ Counter Ion Calculator. A very simple, nonbonded (Coulumb and LJ) interaction calculator meant for single atom ions to charge neutralize systems (and nothing else)... """ self.rc = rc self.width = width self.sites_per_mol = sites_per_mol self.epsilon = epsilon self.sigma = sigma self.charge = charge Calculator.__init__(self) def add_virtual_sites(self, positions): return positions def get_virtual_charges(self, atoms): charges = np.tile(self.charge, len(atoms) // self.sites_per_mol) return charges def redistribute_forces(self, forces): return forces def calculate(self, atoms, properties, system_changes): Calculator.calculate(self, atoms, properties, system_changes) R = atoms.get_positions() charges = self.get_virtual_charges(atoms) pbc = atoms.pbc energy = 0.0 forces = np.zeros_like(atoms.get_positions()) for m in range(len(atoms)): D = R[m + 1:] - R[m] shift = np.zeros_like(D) for i, periodic in enumerate(pbc): if periodic: L = atoms.cell.diagonal()[i] shift[:, i] = (D[:, i] + L / 2) % L - L / 2 - D[:, i] D += shift d2 = (D**2).sum(1) d = d2**0.5 x1 = d > self.rc - self.width x2 = d < self.rc x12 = np.logical_and(x1, x2) y = (d[x12] - self.rc + self.width) / self.width t = np.zeros(len(d)) # cutoff function t[x2] = 1.0 t[x12] -= y**2 * (3.0 - 2.0 * y) dtdd = np.zeros(len(d)) dtdd[x12] -= 6.0 / self.width * y * (1.0 - y) c6 = (self.sigma**2 / d2)**3 c12 = c6**2 e_lj = 4 * self.epsilon * (c12 - c6) e_c = k_c * charges[m + 1:] * charges[m] / d energy += np.dot(t, e_lj) energy += np.dot(t, e_c) F = (24 * self.epsilon * (2 * c12 - c6) / d2 * t - e_lj * dtdd / d)[:, None] * D forces[m] -= F.sum(0) forces[m + 1:] += F F = (e_c / d2 * t)[:, None] * D \ - (e_c * dtdd / d)[:, None] * D forces[m] -= F.sum(0) forces[m + 1:] += F self.results['energy'] = energy self.results['forces'] = forces ase-3.22.1/ase/calculators/cp2k.py000066400000000000000000000574161415166253600166720ustar00rootroot00000000000000"""This module defines an ASE interface to CP2K. https://www.cp2k.org/ Author: Ole Schuett """ import os import os.path from warnings import warn from subprocess import Popen, PIPE import numpy as np import ase.io from ase.units import Rydberg from ase.calculators.calculator import (Calculator, all_changes, Parameters, CalculatorSetupError) class CP2K(Calculator): """ASE-Calculator for CP2K. CP2K is a program to perform atomistic and molecular simulations of solid state, liquid, molecular, and biological systems. It provides a general framework for different methods such as e.g., density functional theory (DFT) using a mixed Gaussian and plane waves approach (GPW) and classical pair and many-body potentials. CP2K is freely available under the GPL license. It is written in Fortran 2003 and can be run efficiently in parallel. Check https://www.cp2k.org about how to obtain and install CP2K. Make sure that you also have the CP2K-shell available, since it is required by the CP2K-calulator. The CP2K-calculator relies on the CP2K-shell. The CP2K-shell was originally designed for interactive sessions. When a calculator object is instantiated, it launches a CP2K-shell as a subprocess in the background and communications with it through stdin/stdout pipes. This has the advantage that the CP2K process is kept alive for the whole lifetime of the calculator object, i.e. there is no startup overhead for a sequence of energy evaluations. Furthermore, the usage of pipes avoids slow file- system I/O. This mechanism even works for MPI-parallelized runs, because stdin/stdout of the first rank are forwarded by the MPI-environment to the mpiexec-process. The command used by the calculator to launch the CP2K-shell is ``cp2k_shell``. To run a parallelized simulation use something like this: >>> CP2K.command="env OMP_NUM_THREADS=2 mpiexec -np 4 cp2k_shell.psmp" Arguments: auto_write: bool Flag to enable the auto-write mode. If enabled the ``write()`` routine is called after every calculation, which mimics the behavior of the ``FileIOCalculator``. Default is ``False``. basis_set: str Name of the basis set to be use. The default is ``DZVP-MOLOPT-SR-GTH``. basis_set_file: str Filename of the basis set file. Default is ``BASIS_MOLOPT``. Set the environment variable $CP2K_DATA_DIR to enabled automatic file discovered. charge: float The total charge of the system. Default is ``0``. command: str The command used to launch the CP2K-shell. If ``command`` is not passed as an argument to the constructor, the class-variable ``CP2K.command``, and then the environment variable ``$ASE_CP2K_COMMAND`` are checked. Eventually, ``cp2k_shell`` is used as default. cutoff: float The cutoff of the finest grid level. Default is ``400 * Rydberg``. debug: bool Flag to enable debug mode. This will print all communication between the CP2K-shell and the CP2K-calculator. Default is ``False``. force_eval_method: str The method CP2K uses to evaluate energies and forces. The default is ``Quickstep``, which is CP2K's module for electronic structure methods like DFT. inp: str CP2K input template. If present, the calculator will augment the template, e.g. with coordinates, and use it to launch CP2K. Hence, this generic mechanism gives access to all features of CP2K. Note, that most keywords accept ``None`` to disable the generation of the corresponding input section. This input template is important for advanced CP2K inputs, but is also needed for e.g. controlling the Brillouin zone integration. The example below illustrates some common options:: >>> inp = '''&FORCE_EVAL >>> &DFT >>> &KPOINTS >>> SCHEME MONKHORST-PACK 12 12 8 >>> &END KPOINTS >>> &SCF >>> ADDED_MOS 10 >>> &SMEAR >>> METHOD FERMI_DIRAC >>> ELECTRONIC_TEMPERATURE [K] 500.0 >>> &END SMEAR >>> &END SCF >>> &END DFT >>> &END FORCE_EVAL >>> ''' max_scf: int Maximum number of SCF iteration to be performed for one optimization. Default is ``50``. poisson_solver: str The poisson solver to be used. Currently, the only supported values are ``auto`` and ``None``. Default is ``auto``. potential_file: str Filename of the pseudo-potential file. Default is ``POTENTIAL``. Set the environment variable $CP2K_DATA_DIR to enabled automatic file discovered. pseudo_potential: str Name of the pseudo-potential to be use. Default is ``auto``. This tries to infer the potential from the employed XC-functional, otherwise it falls back to ``GTH-PBE``. stress_tensor: bool Indicates whether the analytic stress-tensor should be calculated. Default is ``True``. uks: bool Requests an unrestricted Kohn-Sham calculations. This is need for spin-polarized systems, ie. with an odd number of electrons. Default is ``False``. xc: str Name of exchange and correlation functional. Accepts all functions supported by CP2K itself or libxc. Default is ``LDA``. print_level: str PRINT_LEVEL of global output. Possible options are: DEBUG Everything is written out, useful for debugging purposes only HIGH Lots of output LOW Little output MEDIUM Quite some output SILENT Almost no output Default is 'LOW' """ implemented_properties = ['energy', 'free_energy', 'forces', 'stress'] command = None default_parameters = dict( auto_write=False, basis_set='DZVP-MOLOPT-SR-GTH', basis_set_file='BASIS_MOLOPT', charge=0, cutoff=400 * Rydberg, force_eval_method="Quickstep", inp='', max_scf=50, potential_file='POTENTIAL', pseudo_potential='auto', stress_tensor=True, uks=False, poisson_solver='auto', xc='LDA', print_level='LOW') def __init__(self, restart=None, ignore_bad_restart_file=Calculator._deprecated, label='cp2k', atoms=None, command=None, debug=False, **kwargs): """Construct CP2K-calculator object.""" self._debug = debug self._force_env_id = None self._shell = None self.label = None self.parameters = None self.results = None self.atoms = None # Several places are check to determine self.command if command is not None: self.command = command elif CP2K.command is not None: self.command = CP2K.command elif 'ASE_CP2K_COMMAND' in os.environ: self.command = os.environ['ASE_CP2K_COMMAND'] else: self.command = 'cp2k_shell' # default Calculator.__init__(self, restart=restart, ignore_bad_restart_file=ignore_bad_restart_file, label=label, atoms=atoms, **kwargs) self._shell = Cp2kShell(self.command, self._debug) if restart is not None: self.read(restart) def __del__(self): """Release force_env and terminate cp2k_shell child process""" if self._shell: self._release_force_env() del(self._shell) def set(self, **kwargs): """Set parameters like set(key1=value1, key2=value2, ...).""" msg = '"%s" is not a known keyword for the CP2K calculator. ' \ 'To access all features of CP2K by means of an input ' \ 'template, consider using the "inp" keyword instead.' for key in kwargs: if key not in self.default_parameters: raise CalculatorSetupError(msg % key) changed_parameters = Calculator.set(self, **kwargs) if changed_parameters: self.reset() def write(self, label): 'Write atoms, parameters and calculated results into restart files.' if self._debug: print("Writing restart to: ", label) self.atoms.write(label + '_restart.traj') self.parameters.write(label + '_params.ase') from ase.io.jsonio import write_json with open(label + '_results.json', 'w') as fd: write_json(fd, self.results) def read(self, label): 'Read atoms, parameters and calculated results from restart files.' self.atoms = ase.io.read(label + '_restart.traj') self.parameters = Parameters.read(label + '_params.ase') from ase.io.jsonio import read_json with open(label + '_results.json') as fd: self.results = read_json(fd) def calculate(self, atoms=None, properties=None, system_changes=all_changes): """Do the calculation.""" if not properties: properties = ['energy'] Calculator.calculate(self, atoms, properties, system_changes) if self._debug: print("system_changes:", system_changes) if 'numbers' in system_changes: self._release_force_env() if self._force_env_id is None: self._create_force_env() # enable eV and Angstrom as units self._shell.send('UNITS_EV_A') self._shell.expect('* READY') n_atoms = len(self.atoms) if 'cell' in system_changes: cell = self.atoms.get_cell() self._shell.send('SET_CELL %d' % self._force_env_id) for i in range(3): self._shell.send('%.18e %.18e %.18e' % tuple(cell[i, :])) self._shell.expect('* READY') if 'positions' in system_changes: self._shell.send('SET_POS %d' % self._force_env_id) self._shell.send('%d' % (3 * n_atoms)) for pos in self.atoms.get_positions(): self._shell.send('%.18e %.18e %.18e' % tuple(pos)) self._shell.send('*END') max_change = float(self._shell.recv()) assert max_change >= 0 # sanity check self._shell.expect('* READY') self._shell.send('EVAL_EF %d' % self._force_env_id) self._shell.expect('* READY') self._shell.send('GET_E %d' % self._force_env_id) self.results['energy'] = float(self._shell.recv()) self.results['free_energy'] = self.results['energy'] self._shell.expect('* READY') forces = np.zeros(shape=(n_atoms, 3)) self._shell.send('GET_F %d' % self._force_env_id) nvals = int(self._shell.recv()) assert nvals == 3 * n_atoms # sanity check for i in range(n_atoms): line = self._shell.recv() forces[i, :] = [float(x) for x in line.split()] self._shell.expect('* END') self._shell.expect('* READY') self.results['forces'] = forces self._shell.send('GET_STRESS %d' % self._force_env_id) line = self._shell.recv() self._shell.expect('* READY') stress = np.array([float(x) for x in line.split()]).reshape(3, 3) assert np.all(stress == np.transpose(stress)) # should be symmetric # Convert 3x3 stress tensor to Voigt form as required by ASE stress = np.array([stress[0, 0], stress[1, 1], stress[2, 2], stress[1, 2], stress[0, 2], stress[0, 1]]) self.results['stress'] = -1.0 * stress # cp2k uses the opposite sign if self.parameters.auto_write: self.write(self.label) def _create_force_env(self): """Instantiates a new force-environment""" assert self._force_env_id is None label_dir = os.path.dirname(self.label) if len(label_dir) > 0 and not os.path.exists(label_dir): print('Creating directory: ' + label_dir) os.makedirs(label_dir) # cp2k expects dirs to exist inp = self._generate_input() inp_fn = self.label + '.inp' out_fn = self.label + '.out' self._write_file(inp_fn, inp) self._shell.send('LOAD %s %s' % (inp_fn, out_fn)) self._force_env_id = int(self._shell.recv()) assert self._force_env_id > 0 self._shell.expect('* READY') def _write_file(self, fn, content): """Write content to a file""" if self._debug: print('Writting to file: ' + fn) print(content) if self._shell.version < 2.0: with open(fn, 'w') as fd: fd.write(content) else: lines = content.split('\n') if self._shell.version < 2.1: lines = [l.strip() for l in lines] # save chars self._shell.send('WRITE_FILE') self._shell.send(fn) self._shell.send('%d' % len(lines)) for line in lines: self._shell.send(line) self._shell.send('*END') self._shell.expect('* READY') def _release_force_env(self): """Destroys the current force-environment""" if self._force_env_id: if self._shell.isready: self._shell.send('DESTROY %d' % self._force_env_id) self._shell.expect('* READY') else: msg = "CP2K-shell not ready, could not release force_env." warn(msg, RuntimeWarning) self._force_env_id = None def _generate_input(self): """Generates a CP2K input file""" p = self.parameters root = parse_input(p.inp) root.add_keyword('GLOBAL', 'PROJECT ' + self.label) if p.print_level: root.add_keyword('GLOBAL', 'PRINT_LEVEL ' + p.print_level) if p.force_eval_method: root.add_keyword('FORCE_EVAL', 'METHOD ' + p.force_eval_method) if p.stress_tensor: root.add_keyword('FORCE_EVAL', 'STRESS_TENSOR ANALYTICAL') root.add_keyword('FORCE_EVAL/PRINT/STRESS_TENSOR', '_SECTION_PARAMETERS_ ON') if p.basis_set_file: root.add_keyword('FORCE_EVAL/DFT', 'BASIS_SET_FILE_NAME ' + p.basis_set_file) if p.potential_file: root.add_keyword('FORCE_EVAL/DFT', 'POTENTIAL_FILE_NAME ' + p.potential_file) if p.cutoff: root.add_keyword('FORCE_EVAL/DFT/MGRID', 'CUTOFF [eV] %.18e' % p.cutoff) if p.max_scf: root.add_keyword('FORCE_EVAL/DFT/SCF', 'MAX_SCF %d' % p.max_scf) root.add_keyword('FORCE_EVAL/DFT/LS_SCF', 'MAX_SCF %d' % p.max_scf) if p.xc: legacy_libxc = "" for functional in p.xc.split(): functional = functional.replace("LDA", "PADE") # resolve alias xc_sec = root.get_subsection('FORCE_EVAL/DFT/XC/XC_FUNCTIONAL') # libxc input section changed over time if functional.startswith("XC_") and self._shell.version < 3.0: legacy_libxc += " " + functional # handled later elif functional.startswith("XC_"): s = InputSection(name='LIBXC') s.keywords.append('FUNCTIONAL ' + functional) xc_sec.subsections.append(s) else: s = InputSection(name=functional.upper()) xc_sec.subsections.append(s) if legacy_libxc: root.add_keyword('FORCE_EVAL/DFT/XC/XC_FUNCTIONAL/LIBXC', 'FUNCTIONAL ' + legacy_libxc) if p.uks: root.add_keyword('FORCE_EVAL/DFT', 'UNRESTRICTED_KOHN_SHAM ON') if p.charge and p.charge != 0: root.add_keyword('FORCE_EVAL/DFT', 'CHARGE %d' % p.charge) # add Poisson solver if needed if p.poisson_solver == 'auto' and not any(self.atoms.get_pbc()): root.add_keyword('FORCE_EVAL/DFT/POISSON', 'PERIODIC NONE') root.add_keyword('FORCE_EVAL/DFT/POISSON', 'PSOLVER MT') # write coords syms = self.atoms.get_chemical_symbols() atoms = self.atoms.get_positions() for elm, pos in zip(syms, atoms): line = '%s %.18e %.18e %.18e' % (elm, pos[0], pos[1], pos[2]) root.add_keyword('FORCE_EVAL/SUBSYS/COORD', line, unique=False) # write cell pbc = ''.join([a for a, b in zip('XYZ', self.atoms.get_pbc()) if b]) if len(pbc) == 0: pbc = 'NONE' root.add_keyword('FORCE_EVAL/SUBSYS/CELL', 'PERIODIC ' + pbc) c = self.atoms.get_cell() for i, a in enumerate('ABC'): line = '%s %.18e %.18e %.18e' % (a, c[i, 0], c[i, 1], c[i, 2]) root.add_keyword('FORCE_EVAL/SUBSYS/CELL', line) # determine pseudo-potential potential = p.pseudo_potential if p.pseudo_potential == 'auto': if p.xc and p.xc.upper() in ('LDA', 'PADE', 'BP', 'BLYP', 'PBE',): potential = 'GTH-' + p.xc.upper() else: msg = 'No matching pseudo potential found, using GTH-PBE' warn(msg, RuntimeWarning) potential = 'GTH-PBE' # fall back # write atomic kinds subsys = root.get_subsection('FORCE_EVAL/SUBSYS').subsections kinds = dict([(s.params, s) for s in subsys if s.name == "KIND"]) for elem in set(self.atoms.get_chemical_symbols()): if elem not in kinds.keys(): s = InputSection(name='KIND', params=elem) subsys.append(s) kinds[elem] = s if p.basis_set: kinds[elem].keywords.append('BASIS_SET ' + p.basis_set) if potential: kinds[elem].keywords.append('POTENTIAL ' + potential) output_lines = ['!!! Generated by ASE !!!'] + root.write() return '\n'.join(output_lines) class Cp2kShell: """Wrapper for CP2K-shell child-process""" def __init__(self, command, debug): """Construct CP2K-shell object""" self.isready = False self.version = 1.0 # assume oldest possible version until verified self._child = None self._debug = debug # launch cp2k_shell child process assert 'cp2k_shell' in command if self._debug: print(command) self._child = Popen(command, shell=True, universal_newlines=True, stdin=PIPE, stdout=PIPE, bufsize=1) self.expect('* READY') # check version of shell self.send('VERSION') line = self.recv() if not line.startswith('CP2K Shell Version:'): raise RuntimeError('Cannot determine version of CP2K shell. ' 'Probably the shell version is too old. ' 'Please update to CP2K 3.0 or newer.') shell_version = line.rsplit(":", 1)[1] self.version = float(shell_version) assert self.version >= 1.0 self.expect('* READY') # enable harsh mode, stops on any error self.send('HARSH') self.expect('* READY') def __del__(self): """Terminate cp2k_shell child process""" if self.isready: self.send('EXIT') rtncode = self._child.wait() assert rtncode == 0 # child process exited properly? else: warn("CP2K-shell not ready, sending SIGTERM.", RuntimeWarning) self._child.terminate() self._child = None self.version = None self.isready = False def send(self, line): """Send a line to the cp2k_shell""" assert self._child.poll() is None # child process still alive? if self._debug: print('Sending: ' + line) if self.version < 2.1 and len(line) >= 80: raise Exception('Buffer overflow, upgrade CP2K to r16779 or later') assert(len(line) < 800) # new input buffer size self.isready = False self._child.stdin.write(line + '\n') def recv(self): """Receive a line from the cp2k_shell""" assert self._child.poll() is None # child process still alive? line = self._child.stdout.readline().strip() if self._debug: print('Received: ' + line) self.isready = line == '* READY' return line def expect(self, line): """Receive a line and asserts that it matches the expected one""" received = self.recv() assert received == line class InputSection: """Represents a section of a CP2K input file""" def __init__(self, name, params=None): self.name = name.upper() self.params = params self.keywords = [] self.subsections = [] def write(self): """Outputs input section as string""" output = [] for k in self.keywords: output.append(k) for s in self.subsections: if s.params: output.append('&%s %s' % (s.name, s.params)) else: output.append('&%s' % s.name) for l in s.write(): output.append(' %s' % l) output.append('&END %s' % s.name) return output def add_keyword(self, path, line, unique=True): """Adds a keyword to section.""" parts = path.upper().split('/', 1) candidates = [s for s in self.subsections if s.name == parts[0]] if len(candidates) == 0: s = InputSection(name=parts[0]) self.subsections.append(s) candidates = [s] elif len(candidates) != 1: raise Exception('Multiple %s sections found ' % parts[0]) key = line.split()[0].upper() if len(parts) > 1: candidates[0].add_keyword(parts[1], line, unique) elif key == '_SECTION_PARAMETERS_': if candidates[0].params is not None: msg = 'Section parameter of section %s already set' % parts[0] raise Exception(msg) candidates[0].params = line.split(' ', 1)[1].strip() else: old_keys = [k.split()[0].upper() for k in candidates[0].keywords] if unique and key in old_keys: msg = 'Keyword %s already present in section %s' raise Exception(msg % (key, parts[0])) candidates[0].keywords.append(line) def get_subsection(self, path): """Finds a subsection""" parts = path.upper().split('/', 1) candidates = [s for s in self.subsections if s.name == parts[0]] if len(candidates) > 1: raise Exception('Multiple %s sections found ' % parts[0]) if len(candidates) == 0: s = InputSection(name=parts[0]) self.subsections.append(s) candidates = [s] if len(parts) == 1: return candidates[0] return candidates[0].get_subsection(parts[1]) def parse_input(inp): """Parses the given CP2K input string""" root_section = InputSection('CP2K_INPUT') section_stack = [root_section] for line in inp.split('\n'): line = line.split('!', 1)[0].strip() if len(line) == 0: continue if line.upper().startswith('&END'): s = section_stack.pop() elif line[0] == '&': parts = line.split(' ', 1) name = parts[0][1:] if len(parts) > 1: s = InputSection(name=name, params=parts[1].strip()) else: s = InputSection(name=name) section_stack[-1].subsections.append(s) section_stack.append(s) else: section_stack[-1].keywords.append(line) return root_section ase-3.22.1/ase/calculators/crystal.py000066400000000000000000000411441415166253600175030ustar00rootroot00000000000000"""This module defines an ASE interface to CRYSTAL14/CRYSTAL17 http://www.crystal.unito.it/ Written by: Daniele Selli, daniele.selli@unimib.it Gianluca Fazio, g.fazio3@campus.unimib.it The file 'fort.34' contains the input and output geometry and it will be updated during the crystal calculations. The wavefunction is stored in 'fort.20' as binary file. The keywords are given, for instance, as follows: guess = True, xc = 'PBE', kpts = (2,2,2), otherkeys = [ 'scfdir', 'anderson', ['maxcycles','500'], ['fmixing','90']], ... When used for QM/MM, Crystal calculates coulomb terms within all point charges. This is wrong and should be corrected by either: 1. Re-calculating the terms and subtracting them 2. Reading in the values from FORCES_CHG.DAT and subtracting BOTH Options should be available, with 1 as standard, since 2 is only available in a development version of CRYSTAL """ from ase.units import Hartree, Bohr from ase.io import write import numpy as np import os from ase.calculators.calculator import FileIOCalculator class CRYSTAL(FileIOCalculator): """ A crystal calculator with ase-FileIOCalculator nomenclature """ implemented_properties = ['energy', 'forces', 'stress', 'charges', 'dipole'] def __init__(self, restart=None, ignore_bad_restart_file=FileIOCalculator._deprecated, label='cry', atoms=None, crys_pcc=False, **kwargs): """Construct a crystal calculator. """ # default parameters self.default_parameters = dict( xc='HF', spinpol=False, oldgrid=False, neigh=False, coarsegrid=False, guess=True, kpts=None, isp=1, basis='custom', smearing=None, otherkeys=[]) self.pcpot = None self.lines = None self.atoms = None self.crys_pcc = crys_pcc # True: Reads Coulomb Correction from file. self.atoms_input = None self.outfilename = 'cry.out' FileIOCalculator.__init__(self, restart, ignore_bad_restart_file, label, atoms, **kwargs) def write_crystal_in(self, filename): """ Write the input file for the crystal calculation. Geometry is taken always from the file 'fort.34' """ # write BLOCK 1 (only SP with gradients) with open(filename, 'wt', encoding='latin-1') as outfile: self._write_crystal_in(outfile) def _write_crystal_in(self, outfile): outfile.write('Single point + Gradient crystal calculation \n') outfile.write('EXTERNAL \n') outfile.write('NEIGHPRT \n') outfile.write('0 \n') if self.pcpot: outfile.write('POINTCHG \n') self.pcpot.write_mmcharges('POINTCHG.INP') # write BLOCK 2 from file (basis sets) p = self.parameters if p.basis == 'custom': outfile.write('END \n') with open(os.path.join(self.directory, 'basis')) as basisfile: basis_ = basisfile.readlines() for line in basis_: outfile.write(line) outfile.write('99 0 \n') outfile.write('END \n') else: outfile.write('BASISSET \n') outfile.write(p.basis.upper() + '\n') # write BLOCK 3 according to parameters set as input # ----- write hamiltonian if self.atoms.get_initial_magnetic_moments().any(): p.spinpol = True if p.xc == 'HF': if p.spinpol: outfile.write('UHF \n') else: outfile.write('RHF \n') elif p.xc == 'MP2': outfile.write('MP2 \n') outfile.write('ENDMP2 \n') else: outfile.write('DFT \n') # Standalone keywords and LDA are given by a single string. if isinstance(p.xc, str): xc = {'LDA': 'EXCHANGE\nLDA\nCORRELAT\nVWN', 'PBE': 'PBEXC'}.get(p.xc, p.xc) outfile.write(xc.upper()+'\n') # Custom xc functional are given by a tuple of string else: x, c = p.xc outfile.write('EXCHANGE \n') outfile.write(x + ' \n') outfile.write('CORRELAT \n') outfile.write(c + ' \n') if p.spinpol: outfile.write('SPIN \n') if p.oldgrid: outfile.write('OLDGRID \n') if p.coarsegrid: outfile.write('RADIAL\n') outfile.write('1\n') outfile.write('4.0\n') outfile.write('20\n') outfile.write('ANGULAR\n') outfile.write('5\n') outfile.write('0.1667 0.5 0.9 3.05 9999.0\n') outfile.write('2 6 8 13 8\n') outfile.write('END \n') # When guess=True, wf is read. if p.guess: # wf will be always there after 2nd step. if os.path.isfile('fort.20'): outfile.write('GUESSP \n') elif os.path.isfile('fort.9'): outfile.write('GUESSP \n') os.system('cp fort.9 fort.20') # smearing if p.smearing is not None: if p.smearing[0] != 'Fermi-Dirac': raise ValueError('Only Fermi-Dirac smearing is allowed.') else: outfile.write('SMEAR \n') outfile.write(str(p.smearing[1] / Hartree) + ' \n') # ----- write other CRYSTAL keywords # ----- in the list otherkey = ['ANDERSON', ...] . for keyword in p.otherkeys: if isinstance(keyword, str): outfile.write(keyword.upper() + '\n') else: for key in keyword: outfile.write(key.upper() + '\n') ispbc = self.atoms.get_pbc() self.kpts = p.kpts # if it is periodic, gamma is the default. if any(ispbc): if self.kpts is None: self.kpts = (1, 1, 1) else: self.kpts = None # explicit lists of K-points, shifted Monkhorst- # Pack net and k-point density definition are # not allowed. if self.kpts is not None: if isinstance(self.kpts, float): raise ValueError('K-point density definition not allowed.') if isinstance(self.kpts, list): raise ValueError('Explicit K-points definition not allowed.') if isinstance(self.kpts[-1], str): raise ValueError('Shifted Monkhorst-Pack not allowed.') outfile.write('SHRINK \n') # isp is by default 1, 2 is suggested for metals. outfile.write('0 ' + str(p.isp*max(self.kpts)) + ' \n') if ispbc[2]: outfile.write(str(self.kpts[0]) + ' ' + str(self.kpts[1]) + ' ' + str(self.kpts[2]) + ' \n') elif ispbc[1]: outfile.write(str(self.kpts[0]) + ' ' + str(self.kpts[1]) + ' 1 \n') elif ispbc[0]: outfile.write(str(self.kpts[0]) + ' 1 1 \n') # GRADCAL command performs a single # point and prints out the forces # also on the charges outfile.write('GRADCAL \n') outfile.write('END \n') def write_input(self, atoms, properties=None, system_changes=None): FileIOCalculator.write_input( self, atoms, properties, system_changes) self.write_crystal_in(os.path.join(self.directory, 'INPUT')) write(os.path.join(self.directory, 'fort.34'), atoms) # self.atoms is none until results are read out, # then it is set to the ones at writing input self.atoms_input = atoms self.atoms = None def read_results(self): """ all results are read from OUTPUT file It will be destroyed after it is read to avoid reading it once again after some runtime error """ with open(os.path.join(self.directory, 'OUTPUT'), 'rt', encoding='latin-1') as myfile: self.lines = myfile.readlines() self.atoms = self.atoms_input # Energy line index estring1 = 'SCF ENDED' estring2 = 'TOTAL ENERGY + DISP' for iline, line in enumerate(self.lines): if line.find(estring1) >= 0: index_energy = iline pos_en = 8 break else: raise RuntimeError('Problem in reading energy') # Check if there is dispersion corrected # energy value. for iline, line in enumerate(self.lines): if line.find(estring2) >= 0: index_energy = iline pos_en = 5 # If there's a point charge potential (QM/MM), read corrections e_coul = 0 if self.pcpot: if self.crys_pcc: self.pcpot.read_pc_corrections() # also pass on to pcpot that it should read in from file self.pcpot.crys_pcc = True else: self.pcpot.manual_pc_correct() e_coul, f_coul = self.pcpot.coulomb_corrections energy = float(self.lines[index_energy].split()[pos_en]) * Hartree energy -= e_coul # e_coul already in eV. self.results['energy'] = energy # Force line indexes fstring = 'CARTESIAN FORCES' gradients = [] for iline, line in enumerate(self.lines): if line.find(fstring) >= 0: index_force_begin = iline + 2 break else: raise RuntimeError('Problem in reading forces') for j in range(index_force_begin, index_force_begin+len(self.atoms)): word = self.lines[j].split() # If GHOST atoms give problems, have a close look at this if len(word) == 5: gradients.append([float(word[k+2]) for k in range(0, 3)]) elif len(word) == 4: gradients.append([float(word[k+1]) for k in range(0, 3)]) else: raise RuntimeError('Problem in reading forces') forces = np.array(gradients) * Hartree / Bohr self.results['forces'] = forces # stress stuff begins sstring = 'STRESS TENSOR, IN' have_stress = False stress = [] for iline, line in enumerate(self.lines): if sstring in line: have_stress = True start = iline + 4 end = start + 3 for i in range(start, end): cell = [float(x) for x in self.lines[i].split()] stress.append(cell) if have_stress: stress = -np.array(stress) * Hartree / Bohr**3 self.results['stress'] = stress # stress stuff ends # Get partial charges on atoms. # In case we cannot find charges # they are set to None qm_charges = [] # ----- this for cycle finds the last entry of the # ----- string search, which corresponds # ----- to the charges at the end of the SCF. for n, line in enumerate(self.lines): if 'TOTAL ATOMIC CHARGE' in line: chargestart = n + 1 lines1 = self.lines[chargestart:(chargestart + (len(self.atoms) - 1) // 6 + 1)] atomnum = self.atoms.get_atomic_numbers() words = [] for line in lines1: for el in line.split(): words.append(float(el)) i = 0 for atn in atomnum: qm_charges.append(-words[i] + atn) i = i + 1 charges = np.array(qm_charges) self.results['charges'] = charges # Read dipole moment. dipole = np.zeros([1, 3]) for n, line in enumerate(self.lines): if 'DIPOLE MOMENT ALONG' in line: dipolestart = n + 2 dipole = np.array([float(f) for f in self.lines[dipolestart].split()[2:5]]) break # debye to e*Ang self.results['dipole'] = dipole * 0.2081943482534 def embed(self, mmcharges=None, directory='./'): """Embed atoms in point-charges (mmcharges) """ self.pcpot = PointChargePotential(mmcharges, self.directory) return self.pcpot class PointChargePotential: def __init__(self, mmcharges, directory='./'): """Point-charge potential for CRYSTAL. """ self.mmcharges = mmcharges self.directory = directory self.mmpositions = None self.mmforces = None self.coulomb_corrections = None self.crys_pcc = False def set_positions(self, mmpositions): self.mmpositions = mmpositions def set_charges(self, mmcharges): self.mmcharges = mmcharges def write_mmcharges(self, filename): """ mok all write external charges as monopoles for CRYSTAL. """ if self.mmcharges is None: print("CRYSTAL: Warning: not writing external charges ") return with open(os.path.join(self.directory, filename), 'w') as charge_file: charge_file.write(str(len(self.mmcharges))+' \n') for [pos, charge] in zip(self.mmpositions, self.mmcharges): [x, y, z] = pos charge_file.write('%12.6f %12.6f %12.6f %12.6f \n' % (x, y, z, charge)) def get_forces(self, calc, get_forces=True): """ returns forces on point charges if the flag get_forces=True """ if get_forces: return self.read_forces_on_pointcharges() else: return np.zeros_like(self.mmpositions) def read_forces_on_pointcharges(self): """Read Forces from CRYSTAL output file (OUTPUT).""" with open(os.path.join(self.directory, 'OUTPUT'), 'r') as infile: lines = infile.readlines() print('PCPOT crys_pcc: '+str(self.crys_pcc)) # read in force and energy Coulomb corrections if self.crys_pcc: self.read_pc_corrections() else: self.manual_pc_correct() e_coul, f_coul = self.coulomb_corrections external_forces = [] for n, line in enumerate(lines): if ('RESULTANT FORCE' in line): chargeend = n - 1 break else: raise RuntimeError( 'Problem in reading forces on MM external-charges') lines1 = lines[(chargeend - len(self.mmcharges)):chargeend] for line in lines1: external_forces.append( [float(i) for i in line.split()[2:]]) f = np.array(external_forces) - f_coul f *= (Hartree / Bohr) return f def read_pc_corrections(self): ''' Crystal calculates Coulomb forces and energies between all point charges, and adds that to the QM subsystem. That needs to be subtracted again. This will be standard in future CRYSTAL versions .''' with open(os.path.join(self.directory, 'FORCES_CHG.DAT'), 'r') as infile: lines = infile.readlines() e = [float(x.split()[-1]) for x in lines if 'SELF-INTERACTION ENERGY(AU)' in x][0] e *= Hartree f_lines = [s for s in lines if '199' in s] assert(len(f_lines) == len(self.mmcharges)), \ 'Mismatch in number of point charges from FORCES_CHG.dat' pc_forces = np.zeros((len(self.mmcharges), 3)) for i, l in enumerate(f_lines): first = l.split(str(i + 1) + ' 199 ') assert(len(first) == 2), 'Problem reading FORCES_CHG.dat' f = first[-1].split() pc_forces[i] = [float(x) for x in f] self.coulomb_corrections = (e, pc_forces) def manual_pc_correct(self): ''' For current versions of CRYSTAL14/17, manual Coulomb correction ''' R = self.mmpositions / Bohr charges = self.mmcharges forces = np.zeros_like(R) energy = 0.0 for m in range(len(charges)): D = R[m + 1:] - R[m] d2 = (D**2).sum(1) d = d2**0.5 e_c = charges[m + 1:] * charges[m] / d energy += np.sum(e_c) F = (e_c / d2)[:, None] * D forces[m] -= F.sum(0) forces[m + 1:] += F energy *= Hartree self.coulomb_corrections = (energy, forces) ase-3.22.1/ase/calculators/demon/000077500000000000000000000000001415166253600165465ustar00rootroot00000000000000ase-3.22.1/ase/calculators/demon/__init__.py000066400000000000000000000001611415166253600206550ustar00rootroot00000000000000from ase.calculators.demon.demon import Demon #from ase.calculators.demon.demon_io import * __all__ = ['Demon'] ase-3.22.1/ase/calculators/demon/demon.py000066400000000000000000000604071415166253600202310ustar00rootroot00000000000000"""This module defines an ASE interface to deMon. http://www.demon-software.com """ import os import os.path as op import subprocess import shutil import numpy as np from ase.units import Bohr, Hartree import ase.data from ase.calculators.calculator import FileIOCalculator, ReadError from ase.calculators.calculator import Parameters, all_changes from ase.calculators.calculator import equal import ase.io from .demon_io import parse_xray m_e_to_amu = 1822.88839 class Parameters_deMon(Parameters): """Parameters class for the calculator. Documented in Base_deMon.__init__ The options here are the most important ones that the user needs to be aware of. Further options accepted by deMon can be set in the dictionary input_arguments. """ def __init__( self, label='rundir', atoms=None, command=None, restart=None, basis_path=None, ignore_bad_restart_file=FileIOCalculator._deprecated, deMon_restart_path='.', title='deMon input file', scftype='RKS', forces=False, dipole=False, xc='VWN', guess='TB', print_out='MOE', basis={}, ecps={}, mcps={}, auxis={}, augment={}, input_arguments=None): kwargs = locals() kwargs.pop('self') Parameters.__init__(self, **kwargs) class Demon(FileIOCalculator): """Calculator interface to the deMon code. """ implemented_properties = [ 'energy', 'forces', 'dipole', 'eigenvalues'] def __init__(self, **kwargs): """ASE interface to the deMon code. The deMon2k code can be obtained from http://www.demon-software.com The DEMON_COMMAND environment variable must be set to run the executable, in bash it would be set along the lines of export DEMON_COMMAND="deMon.4.3.6.std > deMon_ase.out 2>&1" Parameters: label : str relative path to the run directory atoms : Atoms object the atoms object command : str Command to run deMon. If not present the environment varable DEMON_COMMAND will be used restart : str Relative path to ASE restart directory for parameters and atoms object and results basis_path : str Relative path to the directory containing BASIS, AUXIS, ECPS, MCPS and AUGMENT ignore_bad_restart_file : bool Ignore broken or missing ASE restart files By default, it is an error if the restart file is missing or broken. deMon_restart_path : str Relative path to the deMon restart dir title : str Title in the deMon input file. scftype : str Type of scf forces : bool If True a force calculation will be enforced. dipole : bool If True a dipole calculation will be enforced xc : str xc-functional guess : str guess for initial density and wave functions print_out : str | list Options for the printing in deMon basis : dict Definition of basis sets. ecps : dict Definition of ECPs mcps : dict Definition of MCPs auxis : dict Definition of AUXIS augment : dict Definition of AUGMENT input_arguments : dict Explicitly given input arguments. The key is the input keyword and the value is either a str, a list of str (will be written on the same line as the keyword), or a list of lists of str (first list is written on the first line, the others on following lines.) For example usage, see the tests h2o.py and h2o_xas_xes.py in the directory ase/test/demon """ parameters = Parameters_deMon(**kwargs) # Setup the run command command = parameters['command'] if command is None: command = os.environ.get('DEMON_COMMAND') if command is None: mess = 'The "DEMON_COMMAND" environment is not defined.' raise ValueError(mess) else: parameters['command'] = command # Call the base class. FileIOCalculator.__init__( self, **parameters) def __getitem__(self, key): """Convenience method to retrieve a parameter as calculator[key] rather than calculator.parameters[key] Parameters: key : str, the name of the parameters to get. """ return self.parameters[key] def set(self, **kwargs): """Set all parameters. Parameters: kwargs : Dictionary containing the keywords for deMon """ # Put in the default arguments. kwargs = self.default_parameters.__class__(**kwargs) if 'parameters' in kwargs: filename = kwargs.pop('parameters') parameters = Parameters.read(filename) parameters.update(kwargs) kwargs = parameters changed_parameters = {} for key, value in kwargs.items(): oldvalue = self.parameters.get(key) if key not in self.parameters or not equal(value, oldvalue): changed_parameters[key] = value self.parameters[key] = value return changed_parameters def link_file(self, fromdir, todir, filename): if op.exists(todir + '/' + filename): os.remove(todir + '/' + filename) if op.exists(fromdir + '/' + filename): os.symlink(fromdir + '/' + filename, todir + '/' + filename) else: raise RuntimeError( "{0} doesn't exist".format(fromdir + '/' + filename)) def calculate(self, atoms=None, properties=['energy'], system_changes=all_changes): """Capture the RuntimeError from FileIOCalculator.calculate and add a little debug information from the deMon output. See base FileIocalculator for documentation. """ if atoms is not None: self.atoms = atoms.copy() self.write_input(self.atoms, properties, system_changes) if self.command is None: raise RuntimeError('Please set $%s environment variable ' % ('DEMON_COMMAND') + 'or supply the command keyword') command = self.command # .replace('PREFIX', self.prefix) # basis path basis_path = self.parameters['basis_path'] if basis_path is None: basis_path = os.environ.get('DEMON_BASIS_PATH') if basis_path is None: raise RuntimeError('Please set basis_path keyword,' + ' or the DEMON_BASIS_PATH' + ' environment variable') # link restart file value = self.parameters['guess'] if value.upper() == 'RESTART': value2 = self.parameters['deMon_restart_path'] if op.exists(self.directory + '/deMon.rst')\ or op.islink(self.directory + '/deMon.rst'): os.remove(self.directory + '/deMon.rst') abspath = op.abspath(value2) if op.exists(abspath + '/deMon.mem') \ or op.islink(abspath + '/deMon.mem'): shutil.copy(abspath + '/deMon.mem', self.directory + '/deMon.rst') else: raise RuntimeError( "{0} doesn't exist".format(abspath + '/deMon.rst')) abspath = op.abspath(basis_path) for name in ['BASIS', 'AUXIS', 'ECPS', 'MCPS', 'FFDS']: self.link_file(abspath, self.directory, name) subprocess.check_call(command, shell=True, cwd=self.directory) try: self.read_results() except Exception: # XXX Which kind of exception? with open(self.directory + '/deMon.out', 'r') as fd: lines = fd.readlines() debug_lines = 10 print('##### %d last lines of the deMon.out' % debug_lines) for line in lines[-20:]: print(line.strip()) print('##### end of deMon.out') raise RuntimeError def set_label(self, label): """Set label directory """ self.label = label # in our case self.directory = self.label self.directory = self.label if self.directory == '': self.directory = os.curdir def write_input(self, atoms, properties=None, system_changes=None): """Write input (in)-file. See calculator.py for further details. Parameters: atoms : The Atoms object to write. properties : The properties which should be calculated. system_changes : List of properties changed since last run. """ # Call base calculator. FileIOCalculator.write_input( self, atoms=atoms, properties=properties, system_changes=system_changes) if system_changes is None and properties is None: return filename = self.label + '/deMon.inp' add_print = '' # Start writing the file. with open(filename, 'w') as fd: # write keyword argument keywords value = self.parameters['title'] self._write_argument('TITLE', value, fd) fd.write('#\n') value = self.parameters['scftype'] self._write_argument('SCFTYPE', value, fd) value = self.parameters['xc'] self._write_argument('VXCTYPE', value, fd) value = self.parameters['guess'] self._write_argument('GUESS', value, fd) # obtain forces through a single BOMD step # only if forces is in properties, or if keyword forces is True value = self.parameters['forces'] if 'forces' in properties or value: self._write_argument('DYNAMICS', ['INT=1', 'MAX=0', 'STEP=0'], fd) self._write_argument('TRAJECTORY', 'FORCES', fd) self._write_argument('VELOCITIES', 'ZERO', fd) add_print = add_print + ' ' + 'MD OPT' # if dipole is True, enforce dipole calculation. # Otherwise only if asked for value = self.parameters['dipole'] if 'dipole' in properties or value: self._write_argument('DIPOLE', '', fd) # print argument, here other options could change this value = self.parameters['print_out'] assert(type(value) is str) value = value + add_print if not len(value) == 0: self._write_argument('PRINT', value, fd) fd.write('#\n') # write general input arguments self._write_input_arguments(fd) fd.write('#\n') # write basis set, ecps, mcps, auxis, augment basis = self.parameters['basis'] if 'all' not in basis: basis['all'] = 'DZVP' self._write_basis(fd, atoms, basis, string='BASIS') ecps = self.parameters['ecps'] if not len(ecps) == 0: self._write_basis(fd, atoms, ecps, string='ECPS') mcps = self.parameters['mcps'] if not len(mcps) == 0: self._write_basis(fd, atoms, mcps, string='MCPS') auxis = self.parameters['auxis'] if not len(auxis) == 0: self._write_basis(fd, atoms, auxis, string='AUXIS') augment = self.parameters['augment'] if not len(augment) == 0: self._write_basis(fd, atoms, augment, string='AUGMENT') # write geometry self._write_atomic_coordinates(fd, atoms) # write xyz file for good measure. ase.io.write(self.label + '/deMon_atoms.xyz', self.atoms) def read(self, restart_path): """Read parameters from directory restart_path.""" self.set_label(restart_path) if not op.exists(restart_path + '/deMon.inp'): raise ReadError('The restart_path file {0} does not exist' .format(restart_path)) self.atoms = self.deMon_inp_to_atoms(restart_path + '/deMon.inp') self.read_results() def _write_input_arguments(self, fd): """Write directly given input-arguments.""" input_arguments = self.parameters['input_arguments'] # Early return if input_arguments is None: return for key, value in input_arguments.items(): self._write_argument(key, value, fd) def _write_argument(self, key, value, fd): """Write an argument to file. key : a string coresponding to the input keyword value : the arguments, can be a string, a number or a list f : and open file """ # for only one argument, write on same line if not isinstance(value, (tuple, list)): line = key.upper() line += ' ' + str(value).upper() fd.write(line) fd.write('\n') # for a list, write first argument on the first line, # then the rest on new lines else: line = key if not isinstance(value[0], (tuple, list)): for i in range(len(value)): line += ' ' + str(value[i].upper()) fd.write(line) fd.write('\n') else: for i in range(len(value)): for j in range(len(value[i])): line += ' ' + str(value[i][j]).upper() fd.write(line) fd.write('\n') line = '' def _write_atomic_coordinates(self, fd, atoms): """Write atomic coordinates. Parameters: - f: An open file object. - atoms: An atoms object. """ fd.write('#\n') fd.write('# Atomic coordinates\n') fd.write('#\n') fd.write('GEOMETRY CARTESIAN ANGSTROM\n') for i in range(len(atoms)): xyz = atoms.get_positions()[i] chem_symbol = atoms.get_chemical_symbols()[i] chem_symbol += str(i + 1) # if tag is set to 1 then we have a ghost atom, # set nuclear charge to 0 if(atoms.get_tags()[i] == 1): nuc_charge = str(0) else: nuc_charge = str(atoms.get_atomic_numbers()[i]) mass = atoms.get_masses()[i] line = '{0:6s}'.format(chem_symbol).rjust(10) + ' ' line += '{0:.5f}'.format(xyz[0]).rjust(10) + ' ' line += '{0:.5f}'.format(xyz[1]).rjust(10) + ' ' line += '{0:.5f}'.format(xyz[2]).rjust(10) + ' ' line += '{0:5s}'.format(nuc_charge).rjust(10) + ' ' line += '{0:.5f}'.format(mass).rjust(10) + ' ' fd.write(line) fd.write('\n') # routine to write basis set inormation, including ecps and auxis def _write_basis(self, fd, atoms, basis={}, string='BASIS'): """Write basis set, ECPs, AUXIS, or AUGMENT basis Parameters: - f: An open file object. - atoms: An atoms object. - basis: A dictionary specifying the basis set - string: 'BASIS', 'ECP','AUXIS' or 'AUGMENT' """ # basis for all atoms line = '{0}'.format(string).ljust(10) if 'all' in basis: default_basis = basis['all'] line += '({0})'.format(default_basis).rjust(16) fd.write(line) fd.write('\n') # basis for all atomic species chemical_symbols = atoms.get_chemical_symbols() chemical_symbols_set = set(chemical_symbols) for i in range(chemical_symbols_set.__len__()): symbol = chemical_symbols_set.pop() if symbol in basis: line = '{0}'.format(symbol).ljust(10) line += '({0})'.format(basis[symbol]).rjust(16) fd.write(line) fd.write('\n') # basis for individual atoms for i in range(len(atoms)): if i in basis: symbol = str(chemical_symbols[i]) symbol += str(i + 1) line = '{0}'.format(symbol).ljust(10) line += '({0})'.format(basis[i]).rjust(16) fd.write(line) fd.write('\n') # Analysis routines def read_results(self): """Read the results from output files.""" self.read_energy() self.read_forces(self.atoms) self.read_eigenvalues() self.read_dipole() self.read_xray() def read_energy(self): """Read energy from deMon's text-output file.""" with open(self.label + '/deMon.out', 'r') as fd: text = fd.read().upper() lines = iter(text.split('\n')) for line in lines: if line.startswith(' TOTAL ENERGY ='): self.results['energy'] = float(line.split()[-1]) * Hartree break else: raise RuntimeError def read_forces(self, atoms): """Read the forces from the deMon.out file.""" natoms = len(atoms) filename = self.label + '/deMon.out' if op.isfile(filename): with open(filename, 'r') as fd: lines = fd.readlines() # find line where the orbitals start flag_found = False for i in range(len(lines)): if lines[i].rfind('GRADIENTS OF TIME STEP 0 IN A.U.') > -1: start = i + 4 flag_found = True break if flag_found: self.results['forces'] = np.zeros((natoms, 3), float) for i in range(natoms): line = [s for s in lines[i + start].strip().split(' ') if len(s) > 0] f = -np.array([float(x) for x in line[2:5]]) self.results['forces'][i, :] = f * (Hartree / Bohr) def read_eigenvalues(self): """Read eigenvalues from the 'deMon.out' file.""" assert os.access(self.label + '/deMon.out', os.F_OK) # Read eigenvalues with open(self.label + '/deMon.out', 'r') as fd: lines = fd.readlines() # try PRINT MOE eig_alpha, occ_alpha = self.read_eigenvalues_one_spin( lines, 'ALPHA MO ENERGIES', 6) eig_beta, occ_beta = self.read_eigenvalues_one_spin( lines, 'BETA MO ENERGIES', 6) # otherwise try PRINT MOS if len(eig_alpha) == 0 and len(eig_beta) == 0: eig_alpha, occ_alpha = self.read_eigenvalues_one_spin( lines, 'ALPHA MO COEFFICIENTS', 5) eig_beta, occ_beta = self.read_eigenvalues_one_spin( lines, 'BETA MO COEFFICIENTS', 5) self.results['eigenvalues'] = np.array([eig_alpha, eig_beta]) * Hartree self.results['occupations'] = np.array([occ_alpha, occ_beta]) def read_eigenvalues_one_spin(self, lines, string, neigs_per_line): """Utility method for retreiving eigenvalues after the string "string" with neigs_per_line eigenvlaues written per line """ eig = [] occ = [] skip_line = False more_eigs = False # find line where the orbitals start for i in range(len(lines)): if lines[i].rfind(string) > -1: ii = i more_eigs = True break while more_eigs: # search for two empty lines in a row preceding a line with # numbers for i in range(ii + 1, len(lines)): if len(lines[i].split()) == 0 and \ len(lines[i + 1].split()) == 0 and \ len(lines[i + 2].split()) > 0: ii = i + 2 break # read eigenvalues, occupations line = lines[ii].split() if len(line) < neigs_per_line: # last row more_eigs = False if line[0] != str(len(eig) + 1): more_eigs = False skip_line = True if not skip_line: line = lines[ii + 1].split() for l in line: eig.append(float(l)) line = lines[ii + 3].split() for l in line: occ.append(float(l)) ii = ii + 3 return eig, occ def read_dipole(self): """Read dipole moment.""" dipole = np.zeros(3) with open(self.label + '/deMon.out', 'r') as fd: lines = fd.readlines() for i in range(len(lines)): if lines[i].rfind('DIPOLE') > -1 and lines[i].rfind('XAS') == -1: dipole[0] = float(lines[i + 1].split()[3]) dipole[1] = float(lines[i + 2].split()[3]) dipole[2] = float(lines[i + 3].split()[3]) # debye to e*Ang self.results['dipole'] = dipole * 0.2081943482534 break def read_xray(self): """Read deMon.xry if present.""" # try to read core IP from, .out file filename = self.label + '/deMon.out' core_IP = None if op.isfile(filename): with open(filename, 'r') as fd: lines = fd.readlines() for i in range(len(lines)): if lines[i].rfind('IONIZATION POTENTIAL') > -1: core_IP = float(lines[i].split()[3]) try: mode, ntrans, E_trans, osc_strength, trans_dip = parse_xray(self.label + '/deMon.xry') except ReadError: pass else: xray_results = {'xray_mode': mode, 'ntrans': ntrans, 'E_trans': E_trans, 'osc_strength': osc_strength, # units? 'trans_dip': trans_dip, # units? 'core_IP': core_IP} self.results['xray'] = xray_results def deMon_inp_to_atoms(self, filename): """Routine to read deMon.inp and convert it to an atoms object.""" with open(filename, 'r') as fd: lines = fd.readlines() # find line where geometry starts for i in range(len(lines)): if lines[i].rfind('GEOMETRY') > -1: if lines[i].rfind('ANGSTROM'): coord_units = 'Ang' elif lines.rfind('Bohr'): coord_units = 'Bohr' ii = i break chemical_symbols = [] xyz = [] atomic_numbers = [] masses = [] for i in range(ii + 1, len(lines)): try: line = lines[i].split() if(len(line) > 0): for symbol in ase.data.chemical_symbols: found = None if line[0].upper().rfind(symbol.upper()) > -1: found = symbol break if found is not None: chemical_symbols.append(found) else: break xyz.append([float(line[1]), float(line[2]), float(line[3])]) if len(line) > 4: atomic_numbers.append(int(line[4])) if len(line) > 5: masses.append(float(line[5])) except Exception: # XXX Which kind of exception? raise RuntimeError if coord_units == 'Bohr': xyz = xyz * Bohr natoms = len(chemical_symbols) # set atoms object atoms = ase.Atoms(symbols=chemical_symbols, positions=xyz) # if atomic numbers were read in, set them if(len(atomic_numbers) == natoms): atoms.set_atomic_numbers(atomic_numbers) # if masses were read in, set them if(len(masses) == natoms): atoms.set_masses(masses) return atoms ase-3.22.1/ase/calculators/demon/demon_io.py000066400000000000000000000021301415166253600207050ustar00rootroot00000000000000from ase.calculators.calculator import ReadError import os.path as op import numpy as np from ase.units import Hartree def parse_xray(filename): #filename = self.label + '/deMon.xry' if op.isfile(filename): with open(filename, 'r') as fd: lines = fd.readlines() mode = lines[0].split()[0] ntrans = int(lines[0].split()[1]) E_trans = [] osc_strength = [] trans_dip = [] for i in range(1, ntrans + 1): tokens = lines[i].split() E_trans.append(float(tokens[0])) osc_strength.append( float(tokens[1].replace('D', 'e'))) dip1 = float(tokens[3].replace('D', 'e')) dip2 = float(tokens[4].replace('D', 'e')) dip3 = float(tokens[5].replace('D', 'e')) trans_dip.append([dip1, dip2, dip3]) return mode, ntrans, np.array(E_trans) * Hartree, np.array(osc_strength), np.array(trans_dip) else: raise ReadError('The file {0} does not exist' .format(filename)) ase-3.22.1/ase/calculators/demonnano.py000066400000000000000000000267401415166253600200050ustar00rootroot00000000000000# flake8: noqa """This module defines an ASE interface to deMon-nano. Link to the open-source DFTB code deMon-nano: http://demon-nano.ups-tlse.fr/ export ASE_DEMONNANO_COMMAND="/path/to/bin/deMon.username.x" export DEMONNANO_BASIS_PATH="/path/to/basis/" The file 'deMon.inp' contains the input geometry and parameters The file 'deMon.out' contains the results """ import os import os.path as op #import subprocess import pathlib as pl import numpy as np from ase.units import Bohr, Hartree import ase.data from ase.calculators.calculator import FileIOCalculator, ReadError from ase.calculators.calculator import Parameters import ase.io class DemonNanoParameters(Parameters): """Parameters class for the calculator. The options here are the most important ones that the user needs to be aware of. Further options accepted by deMon can be set in the dictionary input_arguments. """ def __init__( self, label='.', atoms=None, command=None, basis_path=None, restart_path='.', print_out='ASE', title='deMonNano input file', forces=False, input_arguments=None): kwargs = locals() kwargs.pop('self') Parameters.__init__(self, **kwargs) class DemonNano(FileIOCalculator): """Calculator interface to the deMon-nano code. """ implemented_properties = ['energy', 'forces'] def __init__(self, **kwargs): """ASE interface to the deMon-nano code. The deMon-nano code can be obtained from http://demon-nano.ups-tlse.fr/ The ASE_DEMONNANO_COMMAND environment variable must be set to run the executable, in bash it would be set along the lines of export ASE_DEMONNANO_COMMAND="pathway-to-deMon-binary/deMon.username.x" Parameters: label : str relative path to the run directory atoms : Atoms object the atoms object command : str Command to run deMon. If not present, the environment variable ASE_DEMONNANO_COMMAND is used basis_path : str Relative path to the directory containing DFTB-SCC or DFTB-0 parameters If not present, the environment variable DEMONNANO_BASIS_PATH is used restart_path : str Relative path to the deMon restart dir title : str Title in the deMon input file. forces : bool If True a force calculation is enforced print_out : str | list Options for the printing in deMon input_arguments : dict Explicitly given input arguments. The key is the input keyword and the value is either a str, a list of str (will be written on the same line as the keyword), or a list of lists of str (first list is written on the first line, the others on following lines.) """ parameters = DemonNanoParameters(**kwargs) # basis path basis_path = parameters['basis_path'] if basis_path is None: basis_path = os.environ.get('DEMONNANO_BASIS_PATH') if basis_path is None: mess = 'The "DEMONNANO_BASIS_PATH" environment is not defined.' raise ValueError(mess) else: parameters['basis_path'] = basis_path # Call the base class. FileIOCalculator.__init__( self, **parameters) def __getitem__(self, key): """Convenience method to retrieve a parameter as calculator[key] rather than calculator.parameters[key] Parameters: key : str, the name of the parameters to get. """ return self.parameters[key] def write_input(self, atoms, properties=None, system_changes=None): """Write input (in)-file. See calculator.py for further details. Parameters: atoms : The Atoms object to write. properties : The properties which should be calculated. system_changes : List of properties changed since last run. """ # Call base calculator. FileIOCalculator.write_input( self, atoms=atoms, properties=properties, system_changes=system_changes) if system_changes is None and properties is None: return filename = self.label + '/deMon.inp' # Start writing the file. with open(filename, 'w') as fd: # write keyword argument keywords value = self.parameters['title'] self._write_argument('TITLE', value, fd) fd.write('\n') # obtain forces through a single BOMD step # only if forces is in properties, or if keyword forces is True value = self.parameters['forces'] if 'forces' in properties or value: self._write_argument('MDYNAMICS', 'ZERO', fd) self._write_argument('MDSTEP', 'MAX=1', fd) #default timestep is 0.25 fs if not enough - uncomment the line below #self._write_argument('TIMESTEP', '0.1', fd) # print argument, here other options could change this value = self.parameters['print_out'] assert(isinstance(value, str)) if not len(value) == 0: self._write_argument('PRINT', value, fd) fd.write('\n') # write general input arguments self._write_input_arguments(fd) if 'BASISPATH' not in self.parameters['input_arguments']: value = self.parameters['basis_path'] fd.write(value) fd.write('\n') # write geometry self._write_atomic_coordinates(fd, atoms) # write xyz file for good measure. ase.io.write(self.label + '/deMon_atoms.xyz', self.atoms) def read(self, restart_path): """Read parameters from directory restart_path.""" self.set_label(restart_path) rpath = pl.Path(restart_path) if not (rpath / 'deMon.inp').exists(): raise ReadError('The restart_path file {0} does not exist' .format(rpath)) self.atoms = self.deMon_inp_to_atoms(rpath / 'deMon.inp') self.read_results() def _write_input_arguments(self, fd): """Write directly given input-arguments.""" input_arguments = self.parameters['input_arguments'] # Early return if input_arguments is None: return for key, value in input_arguments.items(): self._write_argument(key, value, fd) def _write_argument(self, key, value, fd): """Write an argument to file. key : a string coresponding to the input keyword value : the arguments, can be a string, a number or a list fd : and open file """ if key == 'BASISPATH': # Write a basis path to file. # Has to be in lowercase for deMon-nano to work line = value.lower() fd.write(line) fd.write('\n') elif not isinstance(value, (tuple, list)): # for only one argument, write on same line line = key.upper() line += ' ' + str(value).upper() fd.write(line) fd.write('\n') # for a list, write first argument on the first line, # then the rest on new lines else: line = key if not isinstance(value[0], (tuple, list)): for i in range(len(value)): line += ' ' + str(value[i].upper()) fd.write(line) fd.write('\n') else: for i in range(len(value)): for j in range(len(value[i])): line += ' ' + str(value[i][j]).upper() fd.write(line) fd.write('\n') line = '' def _write_atomic_coordinates(self, fd, atoms): """Write atomic coordinates. Parameters: - fd: An open file object. - atoms: An atoms object. """ #fd.write('#\n') #fd.write('# Atomic coordinates\n') #fd.write('#\n') fd.write('GEOMETRY CARTESIAN ANGSTROM\n') for sym, pos in zip(atoms.symbols, atoms.positions): fd.write('{:9s} {:10.5f} {:10.5f} {:10.5f}\n'.format(sym, *pos)) fd.write('\n') # Analysis routines def read_results(self): """Read the results from output files.""" self.read_energy() self.read_forces(self.atoms) #self.read_eigenvalues() def read_energy(self): """Read energy from deMon.ase output file.""" epath = pl.Path(self.label) if not (epath / 'deMon.ase').exists(): raise ReadError('The deMonNano output file for ASE {0} does not exist' .format(epath)) filename = self.label + '/deMon.ase' if op.isfile(filename): with open(filename, 'r') as fd: lines = fd.readlines() for i in range(len(lines)): if lines[i].startswith(' DFTB total energy [Hartree]'): self.results['energy'] = float(lines[i+1])*Hartree break def read_forces(self, atoms): """Read forces from the deMon.ase file.""" natoms = len(atoms) epath = pl.Path(self.label) if not (epath / 'deMon.ase').exists(): raise ReadError('The deMonNano output file for ASE {0} does not exist' .format(epath)) filename = self.label + '/deMon.ase' with open(filename, 'r') as fd: lines = fd.readlines() # find line where the forces start flag_found = False for i in range(len(lines)): if 'DFTB gradients at 0 time step in a.u.' in lines[i]: start = i + 1 flag_found = True break if flag_found: self.results['forces'] = np.zeros((natoms, 3), float) for i in range(natoms): line = [s for s in lines[i + start].strip().split(' ') if len(s) > 0] f = -np.array([float(x) for x in line[1:4]]) # output forces in a.u. #self.results['forces'][i, :] = f # output forces with real dimension self.results['forces'][i, :] = f * (Hartree / Bohr) def deMon_inp_to_atoms(self, filename): """Routine to read deMon.inp and convert it to an atoms object.""" read_flag=False chem_symbols = [] xyz = [] with open(filename, 'r') as fd: for line in fd: if 'GEOMETRY' in line: read_flag = True if 'ANGSTROM' in line: coord_units = 'Ang' elif 'BOHR' in line: coord_units = 'Bohr' if read_flag: tokens = line.split() symbol = tokens[0] xyz_loc = np.array(tokens[1:4]).astype(float) if read_flag and tokens : chem_symbols.append(symbol) xyz.append(xyz_loc) if coord_units == 'Bohr': xyz = xyz * Bohr # set atoms object atoms = ase.Atoms(symbols=chem_symbols, positions=xyz) return atoms ase-3.22.1/ase/calculators/dftb.py000066400000000000000000000530231415166253600167400ustar00rootroot00000000000000""" This module defines a FileIOCalculator for DFTB+ http://www.dftbplus.org/ http://www.dftb.org/ Initial development: markus.kaukonen@iki.fi """ import os import numpy as np from ase.calculators.calculator import (FileIOCalculator, kpts2ndarray, kpts2sizeandoffsets) from ase.units import Hartree, Bohr class Dftb(FileIOCalculator): if 'DFTB_COMMAND' in os.environ: command = os.environ['DFTB_COMMAND'] + ' > PREFIX.out' else: command = 'dftb+ > PREFIX.out' implemented_properties = ['energy', 'forces', 'charges', 'stress', 'dipole'] discard_results_on_any_change = True def __init__(self, restart=None, ignore_bad_restart_file=FileIOCalculator._deprecated, label='dftb', atoms=None, kpts=None, slako_dir=None, **kwargs): """ All keywords for the dftb_in.hsd input file (see the DFTB+ manual) can be set by ASE. Consider the following input file block: >>> Hamiltonian = DFTB { >>> SCC = Yes >>> SCCTolerance = 1e-8 >>> MaxAngularMomentum = { >>> H = s >>> O = p >>> } >>> } This can be generated by the DFTB+ calculator by using the following settings: >>> calc = Dftb(Hamiltonian_='DFTB', # line is included by default >>> Hamiltonian_SCC='Yes', >>> Hamiltonian_SCCTolerance=1e-8, >>> Hamiltonian_MaxAngularMomentum_='', >>> Hamiltonian_MaxAngularMomentum_H='s', >>> Hamiltonian_MaxAngularMomentum_O='p') In addition to keywords specific to DFTB+, also the following keywords arguments can be used: restart: str Prefix for restart file. May contain a directory. Default is None: don't restart. ignore_bad_restart_file: bool Ignore broken or missing restart file. By default, it is an error if the restart file is missing or broken. label: str (default 'dftb') Prefix used for the main output file (